1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines an instruction selector for the AArch64 target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AArch64TargetMachine.h" 15 #include "MCTargetDesc/AArch64AddressingModes.h" 16 #include "llvm/ADT/APSInt.h" 17 #include "llvm/CodeGen/SelectionDAGISel.h" 18 #include "llvm/IR/Function.h" // To access function attributes. 19 #include "llvm/IR/GlobalValue.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include "llvm/Support/MathExtras.h" 24 #include "llvm/Support/raw_ostream.h" 25 26 using namespace llvm; 27 28 #define DEBUG_TYPE "aarch64-isel" 29 30 //===--------------------------------------------------------------------===// 31 /// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine 32 /// instructions for SelectionDAG operations. 33 /// 34 namespace { 35 36 class AArch64DAGToDAGISel : public SelectionDAGISel { 37 38 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can 39 /// make the right decision when generating code for different targets. 40 const AArch64Subtarget *Subtarget; 41 42 bool ForCodeSize; 43 44 public: 45 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm, 46 CodeGenOpt::Level OptLevel) 47 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), 48 ForCodeSize(false) {} 49 50 const char *getPassName() const override { 51 return "AArch64 Instruction Selection"; 52 } 53 54 bool runOnMachineFunction(MachineFunction &MF) override { 55 ForCodeSize = MF.getFunction()->optForSize(); 56 Subtarget = &MF.getSubtarget<AArch64Subtarget>(); 57 return SelectionDAGISel::runOnMachineFunction(MF); 58 } 59 60 void Select(SDNode *Node) override; 61 62 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 63 /// inline asm expressions. 64 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 65 unsigned ConstraintID, 66 std::vector<SDValue> &OutOps) override; 67 68 bool tryMLAV64LaneV128(SDNode *N); 69 bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N); 70 bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift); 71 bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift); 72 bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift); 73 bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) { 74 return SelectShiftedRegister(N, false, Reg, Shift); 75 } 76 bool SelectLogicalShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) { 77 return SelectShiftedRegister(N, true, Reg, Shift); 78 } 79 bool SelectAddrModeIndexed7S8(SDValue N, SDValue &Base, SDValue &OffImm) { 80 return SelectAddrModeIndexed7S(N, 1, Base, OffImm); 81 } 82 bool SelectAddrModeIndexed7S16(SDValue N, SDValue &Base, SDValue &OffImm) { 83 return SelectAddrModeIndexed7S(N, 2, Base, OffImm); 84 } 85 bool SelectAddrModeIndexed7S32(SDValue N, SDValue &Base, SDValue &OffImm) { 86 return SelectAddrModeIndexed7S(N, 4, Base, OffImm); 87 } 88 bool SelectAddrModeIndexed7S64(SDValue N, SDValue &Base, SDValue &OffImm) { 89 return SelectAddrModeIndexed7S(N, 8, Base, OffImm); 90 } 91 bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) { 92 return SelectAddrModeIndexed7S(N, 16, Base, OffImm); 93 } 94 bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) { 95 return SelectAddrModeIndexed(N, 1, Base, OffImm); 96 } 97 bool SelectAddrModeIndexed16(SDValue N, SDValue &Base, SDValue &OffImm) { 98 return SelectAddrModeIndexed(N, 2, Base, OffImm); 99 } 100 bool SelectAddrModeIndexed32(SDValue N, SDValue &Base, SDValue &OffImm) { 101 return SelectAddrModeIndexed(N, 4, Base, OffImm); 102 } 103 bool SelectAddrModeIndexed64(SDValue N, SDValue &Base, SDValue &OffImm) { 104 return SelectAddrModeIndexed(N, 8, Base, OffImm); 105 } 106 bool SelectAddrModeIndexed128(SDValue N, SDValue &Base, SDValue &OffImm) { 107 return SelectAddrModeIndexed(N, 16, Base, OffImm); 108 } 109 bool SelectAddrModeUnscaled8(SDValue N, SDValue &Base, SDValue &OffImm) { 110 return SelectAddrModeUnscaled(N, 1, Base, OffImm); 111 } 112 bool SelectAddrModeUnscaled16(SDValue N, SDValue &Base, SDValue &OffImm) { 113 return SelectAddrModeUnscaled(N, 2, Base, OffImm); 114 } 115 bool SelectAddrModeUnscaled32(SDValue N, SDValue &Base, SDValue &OffImm) { 116 return SelectAddrModeUnscaled(N, 4, Base, OffImm); 117 } 118 bool SelectAddrModeUnscaled64(SDValue N, SDValue &Base, SDValue &OffImm) { 119 return SelectAddrModeUnscaled(N, 8, Base, OffImm); 120 } 121 bool SelectAddrModeUnscaled128(SDValue N, SDValue &Base, SDValue &OffImm) { 122 return SelectAddrModeUnscaled(N, 16, Base, OffImm); 123 } 124 125 template<int Width> 126 bool SelectAddrModeWRO(SDValue N, SDValue &Base, SDValue &Offset, 127 SDValue &SignExtend, SDValue &DoShift) { 128 return SelectAddrModeWRO(N, Width / 8, Base, Offset, SignExtend, DoShift); 129 } 130 131 template<int Width> 132 bool SelectAddrModeXRO(SDValue N, SDValue &Base, SDValue &Offset, 133 SDValue &SignExtend, SDValue &DoShift) { 134 return SelectAddrModeXRO(N, Width / 8, Base, Offset, SignExtend, DoShift); 135 } 136 137 138 /// Form sequences of consecutive 64/128-bit registers for use in NEON 139 /// instructions making use of a vector-list (e.g. ldN, tbl). Vecs must have 140 /// between 1 and 4 elements. If it contains a single element that is returned 141 /// unchanged; otherwise a REG_SEQUENCE value is returned. 142 SDValue createDTuple(ArrayRef<SDValue> Vecs); 143 SDValue createQTuple(ArrayRef<SDValue> Vecs); 144 145 /// Generic helper for the createDTuple/createQTuple 146 /// functions. Those should almost always be called instead. 147 SDValue createTuple(ArrayRef<SDValue> Vecs, const unsigned RegClassIDs[], 148 const unsigned SubRegs[]); 149 150 void SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, bool isExt); 151 152 bool tryIndexedLoad(SDNode *N); 153 154 void SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc, 155 unsigned SubRegIdx); 156 void SelectPostLoad(SDNode *N, unsigned NumVecs, unsigned Opc, 157 unsigned SubRegIdx); 158 void SelectLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); 159 void SelectPostLoadLane(SDNode *N, unsigned NumVecs, unsigned Opc); 160 161 void SelectStore(SDNode *N, unsigned NumVecs, unsigned Opc); 162 void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc); 163 void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc); 164 void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc); 165 166 bool tryBitfieldExtractOp(SDNode *N); 167 bool tryBitfieldExtractOpFromSExt(SDNode *N); 168 bool tryBitfieldInsertOp(SDNode *N); 169 bool tryBitfieldInsertInZeroOp(SDNode *N); 170 171 bool tryReadRegister(SDNode *N); 172 bool tryWriteRegister(SDNode *N); 173 174 // Include the pieces autogenerated from the target description. 175 #include "AArch64GenDAGISel.inc" 176 177 private: 178 bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg, 179 SDValue &Shift); 180 bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base, 181 SDValue &OffImm); 182 bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base, 183 SDValue &OffImm); 184 bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base, 185 SDValue &OffImm); 186 bool SelectAddrModeWRO(SDValue N, unsigned Size, SDValue &Base, 187 SDValue &Offset, SDValue &SignExtend, 188 SDValue &DoShift); 189 bool SelectAddrModeXRO(SDValue N, unsigned Size, SDValue &Base, 190 SDValue &Offset, SDValue &SignExtend, 191 SDValue &DoShift); 192 bool isWorthFolding(SDValue V) const; 193 bool SelectExtendedSHL(SDValue N, unsigned Size, bool WantExtend, 194 SDValue &Offset, SDValue &SignExtend); 195 196 template<unsigned RegWidth> 197 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) { 198 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth); 199 } 200 201 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned Width); 202 203 void SelectCMP_SWAP(SDNode *N); 204 205 }; 206 } // end anonymous namespace 207 208 /// isIntImmediate - This method tests to see if the node is a constant 209 /// operand. If so Imm will receive the 32-bit value. 210 static bool isIntImmediate(const SDNode *N, uint64_t &Imm) { 211 if (const ConstantSDNode *C = dyn_cast<const ConstantSDNode>(N)) { 212 Imm = C->getZExtValue(); 213 return true; 214 } 215 return false; 216 } 217 218 // isIntImmediate - This method tests to see if a constant operand. 219 // If so Imm will receive the value. 220 static bool isIntImmediate(SDValue N, uint64_t &Imm) { 221 return isIntImmediate(N.getNode(), Imm); 222 } 223 224 // isOpcWithIntImmediate - This method tests to see if the node is a specific 225 // opcode and that it has a immediate integer right operand. 226 // If so Imm will receive the 32 bit value. 227 static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc, 228 uint64_t &Imm) { 229 return N->getOpcode() == Opc && 230 isIntImmediate(N->getOperand(1).getNode(), Imm); 231 } 232 233 bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand( 234 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) { 235 switch(ConstraintID) { 236 default: 237 llvm_unreachable("Unexpected asm memory constraint"); 238 case InlineAsm::Constraint_i: 239 case InlineAsm::Constraint_m: 240 case InlineAsm::Constraint_Q: 241 // Require the address to be in a register. That is safe for all AArch64 242 // variants and it is hard to do anything much smarter without knowing 243 // how the operand is used. 244 OutOps.push_back(Op); 245 return false; 246 } 247 return true; 248 } 249 250 /// SelectArithImmed - Select an immediate value that can be represented as 251 /// a 12-bit value shifted left by either 0 or 12. If so, return true with 252 /// Val set to the 12-bit value and Shift set to the shifter operand. 253 bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val, 254 SDValue &Shift) { 255 // This function is called from the addsub_shifted_imm ComplexPattern, 256 // which lists [imm] as the list of opcode it's interested in, however 257 // we still need to check whether the operand is actually an immediate 258 // here because the ComplexPattern opcode list is only used in 259 // root-level opcode matching. 260 if (!isa<ConstantSDNode>(N.getNode())) 261 return false; 262 263 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue(); 264 unsigned ShiftAmt; 265 266 if (Immed >> 12 == 0) { 267 ShiftAmt = 0; 268 } else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) { 269 ShiftAmt = 12; 270 Immed = Immed >> 12; 271 } else 272 return false; 273 274 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); 275 SDLoc dl(N); 276 Val = CurDAG->getTargetConstant(Immed, dl, MVT::i32); 277 Shift = CurDAG->getTargetConstant(ShVal, dl, MVT::i32); 278 return true; 279 } 280 281 /// SelectNegArithImmed - As above, but negates the value before trying to 282 /// select it. 283 bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val, 284 SDValue &Shift) { 285 // This function is called from the addsub_shifted_imm ComplexPattern, 286 // which lists [imm] as the list of opcode it's interested in, however 287 // we still need to check whether the operand is actually an immediate 288 // here because the ComplexPattern opcode list is only used in 289 // root-level opcode matching. 290 if (!isa<ConstantSDNode>(N.getNode())) 291 return false; 292 293 // The immediate operand must be a 24-bit zero-extended immediate. 294 uint64_t Immed = cast<ConstantSDNode>(N.getNode())->getZExtValue(); 295 296 // This negation is almost always valid, but "cmp wN, #0" and "cmn wN, #0" 297 // have the opposite effect on the C flag, so this pattern mustn't match under 298 // those circumstances. 299 if (Immed == 0) 300 return false; 301 302 if (N.getValueType() == MVT::i32) 303 Immed = ~((uint32_t)Immed) + 1; 304 else 305 Immed = ~Immed + 1ULL; 306 if (Immed & 0xFFFFFFFFFF000000ULL) 307 return false; 308 309 Immed &= 0xFFFFFFULL; 310 return SelectArithImmed(CurDAG->getConstant(Immed, SDLoc(N), MVT::i32), Val, 311 Shift); 312 } 313 314 /// getShiftTypeForNode - Translate a shift node to the corresponding 315 /// ShiftType value. 316 static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) { 317 switch (N.getOpcode()) { 318 default: 319 return AArch64_AM::InvalidShiftExtend; 320 case ISD::SHL: 321 return AArch64_AM::LSL; 322 case ISD::SRL: 323 return AArch64_AM::LSR; 324 case ISD::SRA: 325 return AArch64_AM::ASR; 326 case ISD::ROTR: 327 return AArch64_AM::ROR; 328 } 329 } 330 331 /// \brief Determine whether it is worth to fold V into an extended register. 332 bool AArch64DAGToDAGISel::isWorthFolding(SDValue V) const { 333 // it hurts if the value is used at least twice, unless we are optimizing 334 // for code size. 335 return ForCodeSize || V.hasOneUse(); 336 } 337 338 /// SelectShiftedRegister - Select a "shifted register" operand. If the value 339 /// is not shifted, set the Shift operand to default of "LSL 0". The logical 340 /// instructions allow the shifted register to be rotated, but the arithmetic 341 /// instructions do not. The AllowROR parameter specifies whether ROR is 342 /// supported. 343 bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR, 344 SDValue &Reg, SDValue &Shift) { 345 AArch64_AM::ShiftExtendType ShType = getShiftTypeForNode(N); 346 if (ShType == AArch64_AM::InvalidShiftExtend) 347 return false; 348 if (!AllowROR && ShType == AArch64_AM::ROR) 349 return false; 350 351 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 352 unsigned BitSize = N.getValueType().getSizeInBits(); 353 unsigned Val = RHS->getZExtValue() & (BitSize - 1); 354 unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val); 355 356 Reg = N.getOperand(0); 357 Shift = CurDAG->getTargetConstant(ShVal, SDLoc(N), MVT::i32); 358 return isWorthFolding(N); 359 } 360 361 return false; 362 } 363 364 /// getExtendTypeForNode - Translate an extend node to the corresponding 365 /// ExtendType value. 366 static AArch64_AM::ShiftExtendType 367 getExtendTypeForNode(SDValue N, bool IsLoadStore = false) { 368 if (N.getOpcode() == ISD::SIGN_EXTEND || 369 N.getOpcode() == ISD::SIGN_EXTEND_INREG) { 370 EVT SrcVT; 371 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG) 372 SrcVT = cast<VTSDNode>(N.getOperand(1))->getVT(); 373 else 374 SrcVT = N.getOperand(0).getValueType(); 375 376 if (!IsLoadStore && SrcVT == MVT::i8) 377 return AArch64_AM::SXTB; 378 else if (!IsLoadStore && SrcVT == MVT::i16) 379 return AArch64_AM::SXTH; 380 else if (SrcVT == MVT::i32) 381 return AArch64_AM::SXTW; 382 assert(SrcVT != MVT::i64 && "extend from 64-bits?"); 383 384 return AArch64_AM::InvalidShiftExtend; 385 } else if (N.getOpcode() == ISD::ZERO_EXTEND || 386 N.getOpcode() == ISD::ANY_EXTEND) { 387 EVT SrcVT = N.getOperand(0).getValueType(); 388 if (!IsLoadStore && SrcVT == MVT::i8) 389 return AArch64_AM::UXTB; 390 else if (!IsLoadStore && SrcVT == MVT::i16) 391 return AArch64_AM::UXTH; 392 else if (SrcVT == MVT::i32) 393 return AArch64_AM::UXTW; 394 assert(SrcVT != MVT::i64 && "extend from 64-bits?"); 395 396 return AArch64_AM::InvalidShiftExtend; 397 } else if (N.getOpcode() == ISD::AND) { 398 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1)); 399 if (!CSD) 400 return AArch64_AM::InvalidShiftExtend; 401 uint64_t AndMask = CSD->getZExtValue(); 402 403 switch (AndMask) { 404 default: 405 return AArch64_AM::InvalidShiftExtend; 406 case 0xFF: 407 return !IsLoadStore ? AArch64_AM::UXTB : AArch64_AM::InvalidShiftExtend; 408 case 0xFFFF: 409 return !IsLoadStore ? AArch64_AM::UXTH : AArch64_AM::InvalidShiftExtend; 410 case 0xFFFFFFFF: 411 return AArch64_AM::UXTW; 412 } 413 } 414 415 return AArch64_AM::InvalidShiftExtend; 416 } 417 418 // Helper for SelectMLAV64LaneV128 - Recognize high lane extracts. 419 static bool checkHighLaneIndex(SDNode *DL, SDValue &LaneOp, int &LaneIdx) { 420 if (DL->getOpcode() != AArch64ISD::DUPLANE16 && 421 DL->getOpcode() != AArch64ISD::DUPLANE32) 422 return false; 423 424 SDValue SV = DL->getOperand(0); 425 if (SV.getOpcode() != ISD::INSERT_SUBVECTOR) 426 return false; 427 428 SDValue EV = SV.getOperand(1); 429 if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR) 430 return false; 431 432 ConstantSDNode *DLidx = cast<ConstantSDNode>(DL->getOperand(1).getNode()); 433 ConstantSDNode *EVidx = cast<ConstantSDNode>(EV.getOperand(1).getNode()); 434 LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue(); 435 LaneOp = EV.getOperand(0); 436 437 return true; 438 } 439 440 // Helper for SelectOpcV64LaneV128 - Recognize operations where one operand is a 441 // high lane extract. 442 static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp, 443 SDValue &LaneOp, int &LaneIdx) { 444 445 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) { 446 std::swap(Op0, Op1); 447 if (!checkHighLaneIndex(Op0.getNode(), LaneOp, LaneIdx)) 448 return false; 449 } 450 StdOp = Op1; 451 return true; 452 } 453 454 /// SelectMLAV64LaneV128 - AArch64 supports vector MLAs where one multiplicand 455 /// is a lane in the upper half of a 128-bit vector. Recognize and select this 456 /// so that we don't emit unnecessary lane extracts. 457 bool AArch64DAGToDAGISel::tryMLAV64LaneV128(SDNode *N) { 458 SDLoc dl(N); 459 SDValue Op0 = N->getOperand(0); 460 SDValue Op1 = N->getOperand(1); 461 SDValue MLAOp1; // Will hold ordinary multiplicand for MLA. 462 SDValue MLAOp2; // Will hold lane-accessed multiplicand for MLA. 463 int LaneIdx = -1; // Will hold the lane index. 464 465 if (Op1.getOpcode() != ISD::MUL || 466 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2, 467 LaneIdx)) { 468 std::swap(Op0, Op1); 469 if (Op1.getOpcode() != ISD::MUL || 470 !checkV64LaneV128(Op1.getOperand(0), Op1.getOperand(1), MLAOp1, MLAOp2, 471 LaneIdx)) 472 return false; 473 } 474 475 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64); 476 477 SDValue Ops[] = { Op0, MLAOp1, MLAOp2, LaneIdxVal }; 478 479 unsigned MLAOpc = ~0U; 480 481 switch (N->getSimpleValueType(0).SimpleTy) { 482 default: 483 llvm_unreachable("Unrecognized MLA."); 484 case MVT::v4i16: 485 MLAOpc = AArch64::MLAv4i16_indexed; 486 break; 487 case MVT::v8i16: 488 MLAOpc = AArch64::MLAv8i16_indexed; 489 break; 490 case MVT::v2i32: 491 MLAOpc = AArch64::MLAv2i32_indexed; 492 break; 493 case MVT::v4i32: 494 MLAOpc = AArch64::MLAv4i32_indexed; 495 break; 496 } 497 498 ReplaceNode(N, CurDAG->getMachineNode(MLAOpc, dl, N->getValueType(0), Ops)); 499 return true; 500 } 501 502 bool AArch64DAGToDAGISel::tryMULLV64LaneV128(unsigned IntNo, SDNode *N) { 503 SDLoc dl(N); 504 SDValue SMULLOp0; 505 SDValue SMULLOp1; 506 int LaneIdx; 507 508 if (!checkV64LaneV128(N->getOperand(1), N->getOperand(2), SMULLOp0, SMULLOp1, 509 LaneIdx)) 510 return false; 511 512 SDValue LaneIdxVal = CurDAG->getTargetConstant(LaneIdx, dl, MVT::i64); 513 514 SDValue Ops[] = { SMULLOp0, SMULLOp1, LaneIdxVal }; 515 516 unsigned SMULLOpc = ~0U; 517 518 if (IntNo == Intrinsic::aarch64_neon_smull) { 519 switch (N->getSimpleValueType(0).SimpleTy) { 520 default: 521 llvm_unreachable("Unrecognized SMULL."); 522 case MVT::v4i32: 523 SMULLOpc = AArch64::SMULLv4i16_indexed; 524 break; 525 case MVT::v2i64: 526 SMULLOpc = AArch64::SMULLv2i32_indexed; 527 break; 528 } 529 } else if (IntNo == Intrinsic::aarch64_neon_umull) { 530 switch (N->getSimpleValueType(0).SimpleTy) { 531 default: 532 llvm_unreachable("Unrecognized SMULL."); 533 case MVT::v4i32: 534 SMULLOpc = AArch64::UMULLv4i16_indexed; 535 break; 536 case MVT::v2i64: 537 SMULLOpc = AArch64::UMULLv2i32_indexed; 538 break; 539 } 540 } else 541 llvm_unreachable("Unrecognized intrinsic."); 542 543 ReplaceNode(N, CurDAG->getMachineNode(SMULLOpc, dl, N->getValueType(0), Ops)); 544 return true; 545 } 546 547 /// Instructions that accept extend modifiers like UXTW expect the register 548 /// being extended to be a GPR32, but the incoming DAG might be acting on a 549 /// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if 550 /// this is the case. 551 static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) { 552 if (N.getValueType() == MVT::i32) 553 return N; 554 555 SDLoc dl(N); 556 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); 557 MachineSDNode *Node = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, 558 dl, MVT::i32, N, SubReg); 559 return SDValue(Node, 0); 560 } 561 562 563 /// SelectArithExtendedRegister - Select a "extended register" operand. This 564 /// operand folds in an extend followed by an optional left shift. 565 bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg, 566 SDValue &Shift) { 567 unsigned ShiftVal = 0; 568 AArch64_AM::ShiftExtendType Ext; 569 570 if (N.getOpcode() == ISD::SHL) { 571 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1)); 572 if (!CSD) 573 return false; 574 ShiftVal = CSD->getZExtValue(); 575 if (ShiftVal > 4) 576 return false; 577 578 Ext = getExtendTypeForNode(N.getOperand(0)); 579 if (Ext == AArch64_AM::InvalidShiftExtend) 580 return false; 581 582 Reg = N.getOperand(0).getOperand(0); 583 } else { 584 Ext = getExtendTypeForNode(N); 585 if (Ext == AArch64_AM::InvalidShiftExtend) 586 return false; 587 588 Reg = N.getOperand(0); 589 } 590 591 // AArch64 mandates that the RHS of the operation must use the smallest 592 // register class that could contain the size being extended from. Thus, 593 // if we're folding a (sext i8), we need the RHS to be a GPR32, even though 594 // there might not be an actual 32-bit value in the program. We can 595 // (harmlessly) synthesize one by injected an EXTRACT_SUBREG here. 596 assert(Ext != AArch64_AM::UXTX && Ext != AArch64_AM::SXTX); 597 Reg = narrowIfNeeded(CurDAG, Reg); 598 Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N), 599 MVT::i32); 600 return isWorthFolding(N); 601 } 602 603 /// If there's a use of this ADDlow that's not itself a load/store then we'll 604 /// need to create a real ADD instruction from it anyway and there's no point in 605 /// folding it into the mem op. Theoretically, it shouldn't matter, but there's 606 /// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding 607 /// leads to duplicated ADRP instructions. 608 static bool isWorthFoldingADDlow(SDValue N) { 609 for (auto Use : N->uses()) { 610 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && 611 Use->getOpcode() != ISD::ATOMIC_LOAD && 612 Use->getOpcode() != ISD::ATOMIC_STORE) 613 return false; 614 615 // ldar and stlr have much more restrictive addressing modes (just a 616 // register). 617 if (isStrongerThanMonotonic(cast<MemSDNode>(Use)->getOrdering())) 618 return false; 619 } 620 621 return true; 622 } 623 624 /// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit 625 /// immediate" address. The "Size" argument is the size in bytes of the memory 626 /// reference, which determines the scale. 627 bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size, 628 SDValue &Base, 629 SDValue &OffImm) { 630 SDLoc dl(N); 631 const DataLayout &DL = CurDAG->getDataLayout(); 632 const TargetLowering *TLI = getTargetLowering(); 633 if (N.getOpcode() == ISD::FrameIndex) { 634 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 635 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); 636 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64); 637 return true; 638 } 639 640 // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed 641 // selected here doesn't support labels/immediates, only base+offset. 642 643 if (CurDAG->isBaseWithConstantOffset(N)) { 644 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 645 int64_t RHSC = RHS->getSExtValue(); 646 unsigned Scale = Log2_32(Size); 647 if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) && 648 RHSC < (0x40 << Scale)) { 649 Base = N.getOperand(0); 650 if (Base.getOpcode() == ISD::FrameIndex) { 651 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 652 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); 653 } 654 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64); 655 return true; 656 } 657 } 658 } 659 660 // Base only. The address will be materialized into a register before 661 // the memory is accessed. 662 // add x0, Xbase, #offset 663 // stp x1, x2, [x0] 664 Base = N; 665 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64); 666 return true; 667 } 668 669 /// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit 670 /// immediate" address. The "Size" argument is the size in bytes of the memory 671 /// reference, which determines the scale. 672 bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size, 673 SDValue &Base, SDValue &OffImm) { 674 SDLoc dl(N); 675 const DataLayout &DL = CurDAG->getDataLayout(); 676 const TargetLowering *TLI = getTargetLowering(); 677 if (N.getOpcode() == ISD::FrameIndex) { 678 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 679 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); 680 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64); 681 return true; 682 } 683 684 if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) { 685 GlobalAddressSDNode *GAN = 686 dyn_cast<GlobalAddressSDNode>(N.getOperand(1).getNode()); 687 Base = N.getOperand(0); 688 OffImm = N.getOperand(1); 689 if (!GAN) 690 return true; 691 692 const GlobalValue *GV = GAN->getGlobal(); 693 unsigned Alignment = GV->getAlignment(); 694 Type *Ty = GV->getValueType(); 695 if (Alignment == 0 && Ty->isSized()) 696 Alignment = DL.getABITypeAlignment(Ty); 697 698 if (Alignment >= Size) 699 return true; 700 } 701 702 if (CurDAG->isBaseWithConstantOffset(N)) { 703 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 704 int64_t RHSC = (int64_t)RHS->getZExtValue(); 705 unsigned Scale = Log2_32(Size); 706 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) { 707 Base = N.getOperand(0); 708 if (Base.getOpcode() == ISD::FrameIndex) { 709 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 710 Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); 711 } 712 OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64); 713 return true; 714 } 715 } 716 } 717 718 // Before falling back to our general case, check if the unscaled 719 // instructions can handle this. If so, that's preferable. 720 if (SelectAddrModeUnscaled(N, Size, Base, OffImm)) 721 return false; 722 723 // Base only. The address will be materialized into a register before 724 // the memory is accessed. 725 // add x0, Xbase, #offset 726 // ldr x0, [x0] 727 Base = N; 728 OffImm = CurDAG->getTargetConstant(0, dl, MVT::i64); 729 return true; 730 } 731 732 /// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit 733 /// immediate" address. This should only match when there is an offset that 734 /// is not valid for a scaled immediate addressing mode. The "Size" argument 735 /// is the size in bytes of the memory reference, which is needed here to know 736 /// what is valid for a scaled immediate. 737 bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size, 738 SDValue &Base, 739 SDValue &OffImm) { 740 if (!CurDAG->isBaseWithConstantOffset(N)) 741 return false; 742 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 743 int64_t RHSC = RHS->getSExtValue(); 744 // If the offset is valid as a scaled immediate, don't match here. 745 if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && 746 RHSC < (0x1000 << Log2_32(Size))) 747 return false; 748 if (RHSC >= -256 && RHSC < 256) { 749 Base = N.getOperand(0); 750 if (Base.getOpcode() == ISD::FrameIndex) { 751 int FI = cast<FrameIndexSDNode>(Base)->getIndex(); 752 const TargetLowering *TLI = getTargetLowering(); 753 Base = CurDAG->getTargetFrameIndex( 754 FI, TLI->getPointerTy(CurDAG->getDataLayout())); 755 } 756 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i64); 757 return true; 758 } 759 } 760 return false; 761 } 762 763 static SDValue Widen(SelectionDAG *CurDAG, SDValue N) { 764 SDLoc dl(N); 765 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); 766 SDValue ImpDef = SDValue( 767 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, MVT::i64), 0); 768 MachineSDNode *Node = CurDAG->getMachineNode( 769 TargetOpcode::INSERT_SUBREG, dl, MVT::i64, ImpDef, N, SubReg); 770 return SDValue(Node, 0); 771 } 772 773 /// \brief Check if the given SHL node (\p N), can be used to form an 774 /// extended register for an addressing mode. 775 bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size, 776 bool WantExtend, SDValue &Offset, 777 SDValue &SignExtend) { 778 assert(N.getOpcode() == ISD::SHL && "Invalid opcode."); 779 ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1)); 780 if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue()) 781 return false; 782 783 SDLoc dl(N); 784 if (WantExtend) { 785 AArch64_AM::ShiftExtendType Ext = 786 getExtendTypeForNode(N.getOperand(0), true); 787 if (Ext == AArch64_AM::InvalidShiftExtend) 788 return false; 789 790 Offset = narrowIfNeeded(CurDAG, N.getOperand(0).getOperand(0)); 791 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, 792 MVT::i32); 793 } else { 794 Offset = N.getOperand(0); 795 SignExtend = CurDAG->getTargetConstant(0, dl, MVT::i32); 796 } 797 798 unsigned LegalShiftVal = Log2_32(Size); 799 unsigned ShiftVal = CSD->getZExtValue(); 800 801 if (ShiftVal != 0 && ShiftVal != LegalShiftVal) 802 return false; 803 804 return isWorthFolding(N); 805 } 806 807 bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size, 808 SDValue &Base, SDValue &Offset, 809 SDValue &SignExtend, 810 SDValue &DoShift) { 811 if (N.getOpcode() != ISD::ADD) 812 return false; 813 SDValue LHS = N.getOperand(0); 814 SDValue RHS = N.getOperand(1); 815 SDLoc dl(N); 816 817 // We don't want to match immediate adds here, because they are better lowered 818 // to the register-immediate addressing modes. 819 if (isa<ConstantSDNode>(LHS) || isa<ConstantSDNode>(RHS)) 820 return false; 821 822 // Check if this particular node is reused in any non-memory related 823 // operation. If yes, do not try to fold this node into the address 824 // computation, since the computation will be kept. 825 const SDNode *Node = N.getNode(); 826 for (SDNode *UI : Node->uses()) { 827 if (!isa<MemSDNode>(*UI)) 828 return false; 829 } 830 831 // Remember if it is worth folding N when it produces extended register. 832 bool IsExtendedRegisterWorthFolding = isWorthFolding(N); 833 834 // Try to match a shifted extend on the RHS. 835 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL && 836 SelectExtendedSHL(RHS, Size, true, Offset, SignExtend)) { 837 Base = LHS; 838 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32); 839 return true; 840 } 841 842 // Try to match a shifted extend on the LHS. 843 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL && 844 SelectExtendedSHL(LHS, Size, true, Offset, SignExtend)) { 845 Base = RHS; 846 DoShift = CurDAG->getTargetConstant(true, dl, MVT::i32); 847 return true; 848 } 849 850 // There was no shift, whatever else we find. 851 DoShift = CurDAG->getTargetConstant(false, dl, MVT::i32); 852 853 AArch64_AM::ShiftExtendType Ext = AArch64_AM::InvalidShiftExtend; 854 // Try to match an unshifted extend on the LHS. 855 if (IsExtendedRegisterWorthFolding && 856 (Ext = getExtendTypeForNode(LHS, true)) != 857 AArch64_AM::InvalidShiftExtend) { 858 Base = RHS; 859 Offset = narrowIfNeeded(CurDAG, LHS.getOperand(0)); 860 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, 861 MVT::i32); 862 if (isWorthFolding(LHS)) 863 return true; 864 } 865 866 // Try to match an unshifted extend on the RHS. 867 if (IsExtendedRegisterWorthFolding && 868 (Ext = getExtendTypeForNode(RHS, true)) != 869 AArch64_AM::InvalidShiftExtend) { 870 Base = LHS; 871 Offset = narrowIfNeeded(CurDAG, RHS.getOperand(0)); 872 SignExtend = CurDAG->getTargetConstant(Ext == AArch64_AM::SXTW, dl, 873 MVT::i32); 874 if (isWorthFolding(RHS)) 875 return true; 876 } 877 878 return false; 879 } 880 881 // Check if the given immediate is preferred by ADD. If an immediate can be 882 // encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be 883 // encoded by one MOVZ, return true. 884 static bool isPreferredADD(int64_t ImmOff) { 885 // Constant in [0x0, 0xfff] can be encoded in ADD. 886 if ((ImmOff & 0xfffffffffffff000LL) == 0x0LL) 887 return true; 888 // Check if it can be encoded in an "ADD LSL #12". 889 if ((ImmOff & 0xffffffffff000fffLL) == 0x0LL) 890 // As a single MOVZ is faster than a "ADD of LSL #12", ignore such constant. 891 return (ImmOff & 0xffffffffff00ffffLL) != 0x0LL && 892 (ImmOff & 0xffffffffffff0fffLL) != 0x0LL; 893 return false; 894 } 895 896 bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size, 897 SDValue &Base, SDValue &Offset, 898 SDValue &SignExtend, 899 SDValue &DoShift) { 900 if (N.getOpcode() != ISD::ADD) 901 return false; 902 SDValue LHS = N.getOperand(0); 903 SDValue RHS = N.getOperand(1); 904 SDLoc DL(N); 905 906 // Check if this particular node is reused in any non-memory related 907 // operation. If yes, do not try to fold this node into the address 908 // computation, since the computation will be kept. 909 const SDNode *Node = N.getNode(); 910 for (SDNode *UI : Node->uses()) { 911 if (!isa<MemSDNode>(*UI)) 912 return false; 913 } 914 915 // Watch out if RHS is a wide immediate, it can not be selected into 916 // [BaseReg+Imm] addressing mode. Also it may not be able to be encoded into 917 // ADD/SUB. Instead it will use [BaseReg + 0] address mode and generate 918 // instructions like: 919 // MOV X0, WideImmediate 920 // ADD X1, BaseReg, X0 921 // LDR X2, [X1, 0] 922 // For such situation, using [BaseReg, XReg] addressing mode can save one 923 // ADD/SUB: 924 // MOV X0, WideImmediate 925 // LDR X2, [BaseReg, X0] 926 if (isa<ConstantSDNode>(RHS)) { 927 int64_t ImmOff = (int64_t)cast<ConstantSDNode>(RHS)->getZExtValue(); 928 unsigned Scale = Log2_32(Size); 929 // Skip the immediate can be selected by load/store addressing mode. 930 // Also skip the immediate can be encoded by a single ADD (SUB is also 931 // checked by using -ImmOff). 932 if ((ImmOff % Size == 0 && ImmOff >= 0 && ImmOff < (0x1000 << Scale)) || 933 isPreferredADD(ImmOff) || isPreferredADD(-ImmOff)) 934 return false; 935 936 SDValue Ops[] = { RHS }; 937 SDNode *MOVI = 938 CurDAG->getMachineNode(AArch64::MOVi64imm, DL, MVT::i64, Ops); 939 SDValue MOVIV = SDValue(MOVI, 0); 940 // This ADD of two X register will be selected into [Reg+Reg] mode. 941 N = CurDAG->getNode(ISD::ADD, DL, MVT::i64, LHS, MOVIV); 942 } 943 944 // Remember if it is worth folding N when it produces extended register. 945 bool IsExtendedRegisterWorthFolding = isWorthFolding(N); 946 947 // Try to match a shifted extend on the RHS. 948 if (IsExtendedRegisterWorthFolding && RHS.getOpcode() == ISD::SHL && 949 SelectExtendedSHL(RHS, Size, false, Offset, SignExtend)) { 950 Base = LHS; 951 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32); 952 return true; 953 } 954 955 // Try to match a shifted extend on the LHS. 956 if (IsExtendedRegisterWorthFolding && LHS.getOpcode() == ISD::SHL && 957 SelectExtendedSHL(LHS, Size, false, Offset, SignExtend)) { 958 Base = RHS; 959 DoShift = CurDAG->getTargetConstant(true, DL, MVT::i32); 960 return true; 961 } 962 963 // Match any non-shifted, non-extend, non-immediate add expression. 964 Base = LHS; 965 Offset = RHS; 966 SignExtend = CurDAG->getTargetConstant(false, DL, MVT::i32); 967 DoShift = CurDAG->getTargetConstant(false, DL, MVT::i32); 968 // Reg1 + Reg2 is free: no check needed. 969 return true; 970 } 971 972 SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) { 973 static const unsigned RegClassIDs[] = { 974 AArch64::DDRegClassID, AArch64::DDDRegClassID, AArch64::DDDDRegClassID}; 975 static const unsigned SubRegs[] = {AArch64::dsub0, AArch64::dsub1, 976 AArch64::dsub2, AArch64::dsub3}; 977 978 return createTuple(Regs, RegClassIDs, SubRegs); 979 } 980 981 SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) { 982 static const unsigned RegClassIDs[] = { 983 AArch64::QQRegClassID, AArch64::QQQRegClassID, AArch64::QQQQRegClassID}; 984 static const unsigned SubRegs[] = {AArch64::qsub0, AArch64::qsub1, 985 AArch64::qsub2, AArch64::qsub3}; 986 987 return createTuple(Regs, RegClassIDs, SubRegs); 988 } 989 990 SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs, 991 const unsigned RegClassIDs[], 992 const unsigned SubRegs[]) { 993 // There's no special register-class for a vector-list of 1 element: it's just 994 // a vector. 995 if (Regs.size() == 1) 996 return Regs[0]; 997 998 assert(Regs.size() >= 2 && Regs.size() <= 4); 999 1000 SDLoc DL(Regs[0]); 1001 1002 SmallVector<SDValue, 4> Ops; 1003 1004 // First operand of REG_SEQUENCE is the desired RegClass. 1005 Ops.push_back( 1006 CurDAG->getTargetConstant(RegClassIDs[Regs.size() - 2], DL, MVT::i32)); 1007 1008 // Then we get pairs of source & subregister-position for the components. 1009 for (unsigned i = 0; i < Regs.size(); ++i) { 1010 Ops.push_back(Regs[i]); 1011 Ops.push_back(CurDAG->getTargetConstant(SubRegs[i], DL, MVT::i32)); 1012 } 1013 1014 SDNode *N = 1015 CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops); 1016 return SDValue(N, 0); 1017 } 1018 1019 void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc, 1020 bool isExt) { 1021 SDLoc dl(N); 1022 EVT VT = N->getValueType(0); 1023 1024 unsigned ExtOff = isExt; 1025 1026 // Form a REG_SEQUENCE to force register allocation. 1027 unsigned Vec0Off = ExtOff + 1; 1028 SmallVector<SDValue, 4> Regs(N->op_begin() + Vec0Off, 1029 N->op_begin() + Vec0Off + NumVecs); 1030 SDValue RegSeq = createQTuple(Regs); 1031 1032 SmallVector<SDValue, 6> Ops; 1033 if (isExt) 1034 Ops.push_back(N->getOperand(1)); 1035 Ops.push_back(RegSeq); 1036 Ops.push_back(N->getOperand(NumVecs + ExtOff + 1)); 1037 ReplaceNode(N, CurDAG->getMachineNode(Opc, dl, VT, Ops)); 1038 } 1039 1040 bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) { 1041 LoadSDNode *LD = cast<LoadSDNode>(N); 1042 if (LD->isUnindexed()) 1043 return false; 1044 EVT VT = LD->getMemoryVT(); 1045 EVT DstVT = N->getValueType(0); 1046 ISD::MemIndexedMode AM = LD->getAddressingMode(); 1047 bool IsPre = AM == ISD::PRE_INC || AM == ISD::PRE_DEC; 1048 1049 // We're not doing validity checking here. That was done when checking 1050 // if we should mark the load as indexed or not. We're just selecting 1051 // the right instruction. 1052 unsigned Opcode = 0; 1053 1054 ISD::LoadExtType ExtType = LD->getExtensionType(); 1055 bool InsertTo64 = false; 1056 if (VT == MVT::i64) 1057 Opcode = IsPre ? AArch64::LDRXpre : AArch64::LDRXpost; 1058 else if (VT == MVT::i32) { 1059 if (ExtType == ISD::NON_EXTLOAD) 1060 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost; 1061 else if (ExtType == ISD::SEXTLOAD) 1062 Opcode = IsPre ? AArch64::LDRSWpre : AArch64::LDRSWpost; 1063 else { 1064 Opcode = IsPre ? AArch64::LDRWpre : AArch64::LDRWpost; 1065 InsertTo64 = true; 1066 // The result of the load is only i32. It's the subreg_to_reg that makes 1067 // it into an i64. 1068 DstVT = MVT::i32; 1069 } 1070 } else if (VT == MVT::i16) { 1071 if (ExtType == ISD::SEXTLOAD) { 1072 if (DstVT == MVT::i64) 1073 Opcode = IsPre ? AArch64::LDRSHXpre : AArch64::LDRSHXpost; 1074 else 1075 Opcode = IsPre ? AArch64::LDRSHWpre : AArch64::LDRSHWpost; 1076 } else { 1077 Opcode = IsPre ? AArch64::LDRHHpre : AArch64::LDRHHpost; 1078 InsertTo64 = DstVT == MVT::i64; 1079 // The result of the load is only i32. It's the subreg_to_reg that makes 1080 // it into an i64. 1081 DstVT = MVT::i32; 1082 } 1083 } else if (VT == MVT::i8) { 1084 if (ExtType == ISD::SEXTLOAD) { 1085 if (DstVT == MVT::i64) 1086 Opcode = IsPre ? AArch64::LDRSBXpre : AArch64::LDRSBXpost; 1087 else 1088 Opcode = IsPre ? AArch64::LDRSBWpre : AArch64::LDRSBWpost; 1089 } else { 1090 Opcode = IsPre ? AArch64::LDRBBpre : AArch64::LDRBBpost; 1091 InsertTo64 = DstVT == MVT::i64; 1092 // The result of the load is only i32. It's the subreg_to_reg that makes 1093 // it into an i64. 1094 DstVT = MVT::i32; 1095 } 1096 } else if (VT == MVT::f16) { 1097 Opcode = IsPre ? AArch64::LDRHpre : AArch64::LDRHpost; 1098 } else if (VT == MVT::f32) { 1099 Opcode = IsPre ? AArch64::LDRSpre : AArch64::LDRSpost; 1100 } else if (VT == MVT::f64 || VT.is64BitVector()) { 1101 Opcode = IsPre ? AArch64::LDRDpre : AArch64::LDRDpost; 1102 } else if (VT.is128BitVector()) { 1103 Opcode = IsPre ? AArch64::LDRQpre : AArch64::LDRQpost; 1104 } else 1105 return false; 1106 SDValue Chain = LD->getChain(); 1107 SDValue Base = LD->getBasePtr(); 1108 ConstantSDNode *OffsetOp = cast<ConstantSDNode>(LD->getOffset()); 1109 int OffsetVal = (int)OffsetOp->getZExtValue(); 1110 SDLoc dl(N); 1111 SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64); 1112 SDValue Ops[] = { Base, Offset, Chain }; 1113 SDNode *Res = CurDAG->getMachineNode(Opcode, dl, MVT::i64, DstVT, 1114 MVT::Other, Ops); 1115 // Either way, we're replacing the node, so tell the caller that. 1116 SDValue LoadedVal = SDValue(Res, 1); 1117 if (InsertTo64) { 1118 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); 1119 LoadedVal = 1120 SDValue(CurDAG->getMachineNode( 1121 AArch64::SUBREG_TO_REG, dl, MVT::i64, 1122 CurDAG->getTargetConstant(0, dl, MVT::i64), LoadedVal, 1123 SubReg), 1124 0); 1125 } 1126 1127 ReplaceUses(SDValue(N, 0), LoadedVal); 1128 ReplaceUses(SDValue(N, 1), SDValue(Res, 0)); 1129 ReplaceUses(SDValue(N, 2), SDValue(Res, 2)); 1130 CurDAG->RemoveDeadNode(N); 1131 return true; 1132 } 1133 1134 void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc, 1135 unsigned SubRegIdx) { 1136 SDLoc dl(N); 1137 EVT VT = N->getValueType(0); 1138 SDValue Chain = N->getOperand(0); 1139 1140 SDValue Ops[] = {N->getOperand(2), // Mem operand; 1141 Chain}; 1142 1143 const EVT ResTys[] = {MVT::Untyped, MVT::Other}; 1144 1145 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1146 SDValue SuperReg = SDValue(Ld, 0); 1147 for (unsigned i = 0; i < NumVecs; ++i) 1148 ReplaceUses(SDValue(N, i), 1149 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg)); 1150 1151 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1)); 1152 CurDAG->RemoveDeadNode(N); 1153 } 1154 1155 void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs, 1156 unsigned Opc, unsigned SubRegIdx) { 1157 SDLoc dl(N); 1158 EVT VT = N->getValueType(0); 1159 SDValue Chain = N->getOperand(0); 1160 1161 SDValue Ops[] = {N->getOperand(1), // Mem operand 1162 N->getOperand(2), // Incremental 1163 Chain}; 1164 1165 const EVT ResTys[] = {MVT::i64, // Type of the write back register 1166 MVT::Untyped, MVT::Other}; 1167 1168 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1169 1170 // Update uses of write back register 1171 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0)); 1172 1173 // Update uses of vector list 1174 SDValue SuperReg = SDValue(Ld, 1); 1175 if (NumVecs == 1) 1176 ReplaceUses(SDValue(N, 0), SuperReg); 1177 else 1178 for (unsigned i = 0; i < NumVecs; ++i) 1179 ReplaceUses(SDValue(N, i), 1180 CurDAG->getTargetExtractSubreg(SubRegIdx + i, dl, VT, SuperReg)); 1181 1182 // Update the chain 1183 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2)); 1184 CurDAG->RemoveDeadNode(N); 1185 } 1186 1187 void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs, 1188 unsigned Opc) { 1189 SDLoc dl(N); 1190 EVT VT = N->getOperand(2)->getValueType(0); 1191 1192 // Form a REG_SEQUENCE to force register allocation. 1193 bool Is128Bit = VT.getSizeInBits() == 128; 1194 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); 1195 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs); 1196 1197 SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), N->getOperand(0)}; 1198 SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops); 1199 1200 ReplaceNode(N, St); 1201 } 1202 1203 void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs, 1204 unsigned Opc) { 1205 SDLoc dl(N); 1206 EVT VT = N->getOperand(2)->getValueType(0); 1207 const EVT ResTys[] = {MVT::i64, // Type of the write back register 1208 MVT::Other}; // Type for the Chain 1209 1210 // Form a REG_SEQUENCE to force register allocation. 1211 bool Is128Bit = VT.getSizeInBits() == 128; 1212 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs); 1213 SDValue RegSeq = Is128Bit ? createQTuple(Regs) : createDTuple(Regs); 1214 1215 SDValue Ops[] = {RegSeq, 1216 N->getOperand(NumVecs + 1), // base register 1217 N->getOperand(NumVecs + 2), // Incremental 1218 N->getOperand(0)}; // Chain 1219 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1220 1221 ReplaceNode(N, St); 1222 } 1223 1224 namespace { 1225 /// WidenVector - Given a value in the V64 register class, produce the 1226 /// equivalent value in the V128 register class. 1227 class WidenVector { 1228 SelectionDAG &DAG; 1229 1230 public: 1231 WidenVector(SelectionDAG &DAG) : DAG(DAG) {} 1232 1233 SDValue operator()(SDValue V64Reg) { 1234 EVT VT = V64Reg.getValueType(); 1235 unsigned NarrowSize = VT.getVectorNumElements(); 1236 MVT EltTy = VT.getVectorElementType().getSimpleVT(); 1237 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); 1238 SDLoc DL(V64Reg); 1239 1240 SDValue Undef = 1241 SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, WideTy), 0); 1242 return DAG.getTargetInsertSubreg(AArch64::dsub, DL, WideTy, Undef, V64Reg); 1243 } 1244 }; 1245 } // namespace 1246 1247 /// NarrowVector - Given a value in the V128 register class, produce the 1248 /// equivalent value in the V64 register class. 1249 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { 1250 EVT VT = V128Reg.getValueType(); 1251 unsigned WideSize = VT.getVectorNumElements(); 1252 MVT EltTy = VT.getVectorElementType().getSimpleVT(); 1253 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); 1254 1255 return DAG.getTargetExtractSubreg(AArch64::dsub, SDLoc(V128Reg), NarrowTy, 1256 V128Reg); 1257 } 1258 1259 void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs, 1260 unsigned Opc) { 1261 SDLoc dl(N); 1262 EVT VT = N->getValueType(0); 1263 bool Narrow = VT.getSizeInBits() == 64; 1264 1265 // Form a REG_SEQUENCE to force register allocation. 1266 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); 1267 1268 if (Narrow) 1269 std::transform(Regs.begin(), Regs.end(), Regs.begin(), 1270 WidenVector(*CurDAG)); 1271 1272 SDValue RegSeq = createQTuple(Regs); 1273 1274 const EVT ResTys[] = {MVT::Untyped, MVT::Other}; 1275 1276 unsigned LaneNo = 1277 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue(); 1278 1279 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), 1280 N->getOperand(NumVecs + 3), N->getOperand(0)}; 1281 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1282 SDValue SuperReg = SDValue(Ld, 0); 1283 1284 EVT WideVT = RegSeq.getOperand(1)->getValueType(0); 1285 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, 1286 AArch64::qsub2, AArch64::qsub3 }; 1287 for (unsigned i = 0; i < NumVecs; ++i) { 1288 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, SuperReg); 1289 if (Narrow) 1290 NV = NarrowVector(NV, *CurDAG); 1291 ReplaceUses(SDValue(N, i), NV); 1292 } 1293 1294 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 1)); 1295 CurDAG->RemoveDeadNode(N); 1296 } 1297 1298 void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs, 1299 unsigned Opc) { 1300 SDLoc dl(N); 1301 EVT VT = N->getValueType(0); 1302 bool Narrow = VT.getSizeInBits() == 64; 1303 1304 // Form a REG_SEQUENCE to force register allocation. 1305 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs); 1306 1307 if (Narrow) 1308 std::transform(Regs.begin(), Regs.end(), Regs.begin(), 1309 WidenVector(*CurDAG)); 1310 1311 SDValue RegSeq = createQTuple(Regs); 1312 1313 const EVT ResTys[] = {MVT::i64, // Type of the write back register 1314 RegSeq->getValueType(0), MVT::Other}; 1315 1316 unsigned LaneNo = 1317 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue(); 1318 1319 SDValue Ops[] = {RegSeq, 1320 CurDAG->getTargetConstant(LaneNo, dl, 1321 MVT::i64), // Lane Number 1322 N->getOperand(NumVecs + 2), // Base register 1323 N->getOperand(NumVecs + 3), // Incremental 1324 N->getOperand(0)}; 1325 SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1326 1327 // Update uses of the write back register 1328 ReplaceUses(SDValue(N, NumVecs), SDValue(Ld, 0)); 1329 1330 // Update uses of the vector list 1331 SDValue SuperReg = SDValue(Ld, 1); 1332 if (NumVecs == 1) { 1333 ReplaceUses(SDValue(N, 0), 1334 Narrow ? NarrowVector(SuperReg, *CurDAG) : SuperReg); 1335 } else { 1336 EVT WideVT = RegSeq.getOperand(1)->getValueType(0); 1337 static const unsigned QSubs[] = { AArch64::qsub0, AArch64::qsub1, 1338 AArch64::qsub2, AArch64::qsub3 }; 1339 for (unsigned i = 0; i < NumVecs; ++i) { 1340 SDValue NV = CurDAG->getTargetExtractSubreg(QSubs[i], dl, WideVT, 1341 SuperReg); 1342 if (Narrow) 1343 NV = NarrowVector(NV, *CurDAG); 1344 ReplaceUses(SDValue(N, i), NV); 1345 } 1346 } 1347 1348 // Update the Chain 1349 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(Ld, 2)); 1350 CurDAG->RemoveDeadNode(N); 1351 } 1352 1353 void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs, 1354 unsigned Opc) { 1355 SDLoc dl(N); 1356 EVT VT = N->getOperand(2)->getValueType(0); 1357 bool Narrow = VT.getSizeInBits() == 64; 1358 1359 // Form a REG_SEQUENCE to force register allocation. 1360 SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs); 1361 1362 if (Narrow) 1363 std::transform(Regs.begin(), Regs.end(), Regs.begin(), 1364 WidenVector(*CurDAG)); 1365 1366 SDValue RegSeq = createQTuple(Regs); 1367 1368 unsigned LaneNo = 1369 cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue(); 1370 1371 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), 1372 N->getOperand(NumVecs + 3), N->getOperand(0)}; 1373 SDNode *St = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops); 1374 1375 // Transfer memoperands. 1376 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1377 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1378 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1); 1379 1380 ReplaceNode(N, St); 1381 } 1382 1383 void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs, 1384 unsigned Opc) { 1385 SDLoc dl(N); 1386 EVT VT = N->getOperand(2)->getValueType(0); 1387 bool Narrow = VT.getSizeInBits() == 64; 1388 1389 // Form a REG_SEQUENCE to force register allocation. 1390 SmallVector<SDValue, 4> Regs(N->op_begin() + 1, N->op_begin() + 1 + NumVecs); 1391 1392 if (Narrow) 1393 std::transform(Regs.begin(), Regs.end(), Regs.begin(), 1394 WidenVector(*CurDAG)); 1395 1396 SDValue RegSeq = createQTuple(Regs); 1397 1398 const EVT ResTys[] = {MVT::i64, // Type of the write back register 1399 MVT::Other}; 1400 1401 unsigned LaneNo = 1402 cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue(); 1403 1404 SDValue Ops[] = {RegSeq, CurDAG->getTargetConstant(LaneNo, dl, MVT::i64), 1405 N->getOperand(NumVecs + 2), // Base Register 1406 N->getOperand(NumVecs + 3), // Incremental 1407 N->getOperand(0)}; 1408 SDNode *St = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); 1409 1410 // Transfer memoperands. 1411 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1412 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand(); 1413 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1); 1414 1415 ReplaceNode(N, St); 1416 } 1417 1418 static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N, 1419 unsigned &Opc, SDValue &Opd0, 1420 unsigned &LSB, unsigned &MSB, 1421 unsigned NumberOfIgnoredLowBits, 1422 bool BiggerPattern) { 1423 assert(N->getOpcode() == ISD::AND && 1424 "N must be a AND operation to call this function"); 1425 1426 EVT VT = N->getValueType(0); 1427 1428 // Here we can test the type of VT and return false when the type does not 1429 // match, but since it is done prior to that call in the current context 1430 // we turned that into an assert to avoid redundant code. 1431 assert((VT == MVT::i32 || VT == MVT::i64) && 1432 "Type checking must have been done before calling this function"); 1433 1434 // FIXME: simplify-demanded-bits in DAGCombine will probably have 1435 // changed the AND node to a 32-bit mask operation. We'll have to 1436 // undo that as part of the transform here if we want to catch all 1437 // the opportunities. 1438 // Currently the NumberOfIgnoredLowBits argument helps to recover 1439 // form these situations when matching bigger pattern (bitfield insert). 1440 1441 // For unsigned extracts, check for a shift right and mask 1442 uint64_t AndImm = 0; 1443 if (!isOpcWithIntImmediate(N, ISD::AND, AndImm)) 1444 return false; 1445 1446 const SDNode *Op0 = N->getOperand(0).getNode(); 1447 1448 // Because of simplify-demanded-bits in DAGCombine, the mask may have been 1449 // simplified. Try to undo that 1450 AndImm |= (1 << NumberOfIgnoredLowBits) - 1; 1451 1452 // The immediate is a mask of the low bits iff imm & (imm+1) == 0 1453 if (AndImm & (AndImm + 1)) 1454 return false; 1455 1456 bool ClampMSB = false; 1457 uint64_t SrlImm = 0; 1458 // Handle the SRL + ANY_EXTEND case. 1459 if (VT == MVT::i64 && Op0->getOpcode() == ISD::ANY_EXTEND && 1460 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, SrlImm)) { 1461 // Extend the incoming operand of the SRL to 64-bit. 1462 Opd0 = Widen(CurDAG, Op0->getOperand(0).getOperand(0)); 1463 // Make sure to clamp the MSB so that we preserve the semantics of the 1464 // original operations. 1465 ClampMSB = true; 1466 } else if (VT == MVT::i32 && Op0->getOpcode() == ISD::TRUNCATE && 1467 isOpcWithIntImmediate(Op0->getOperand(0).getNode(), ISD::SRL, 1468 SrlImm)) { 1469 // If the shift result was truncated, we can still combine them. 1470 Opd0 = Op0->getOperand(0).getOperand(0); 1471 1472 // Use the type of SRL node. 1473 VT = Opd0->getValueType(0); 1474 } else if (isOpcWithIntImmediate(Op0, ISD::SRL, SrlImm)) { 1475 Opd0 = Op0->getOperand(0); 1476 } else if (BiggerPattern) { 1477 // Let's pretend a 0 shift right has been performed. 1478 // The resulting code will be at least as good as the original one 1479 // plus it may expose more opportunities for bitfield insert pattern. 1480 // FIXME: Currently we limit this to the bigger pattern, because 1481 // some optimizations expect AND and not UBFM. 1482 Opd0 = N->getOperand(0); 1483 } else 1484 return false; 1485 1486 // Bail out on large immediates. This happens when no proper 1487 // combining/constant folding was performed. 1488 if (!BiggerPattern && (SrlImm <= 0 || SrlImm >= VT.getSizeInBits())) { 1489 DEBUG((dbgs() << N 1490 << ": Found large shift immediate, this should not happen\n")); 1491 return false; 1492 } 1493 1494 LSB = SrlImm; 1495 MSB = SrlImm + (VT == MVT::i32 ? countTrailingOnes<uint32_t>(AndImm) 1496 : countTrailingOnes<uint64_t>(AndImm)) - 1497 1; 1498 if (ClampMSB) 1499 // Since we're moving the extend before the right shift operation, we need 1500 // to clamp the MSB to make sure we don't shift in undefined bits instead of 1501 // the zeros which would get shifted in with the original right shift 1502 // operation. 1503 MSB = MSB > 31 ? 31 : MSB; 1504 1505 Opc = VT == MVT::i32 ? AArch64::UBFMWri : AArch64::UBFMXri; 1506 return true; 1507 } 1508 1509 static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc, 1510 SDValue &Opd0, unsigned &Immr, 1511 unsigned &Imms) { 1512 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG); 1513 1514 EVT VT = N->getValueType(0); 1515 unsigned BitWidth = VT.getSizeInBits(); 1516 assert((VT == MVT::i32 || VT == MVT::i64) && 1517 "Type checking must have been done before calling this function"); 1518 1519 SDValue Op = N->getOperand(0); 1520 if (Op->getOpcode() == ISD::TRUNCATE) { 1521 Op = Op->getOperand(0); 1522 VT = Op->getValueType(0); 1523 BitWidth = VT.getSizeInBits(); 1524 } 1525 1526 uint64_t ShiftImm; 1527 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRL, ShiftImm) && 1528 !isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm)) 1529 return false; 1530 1531 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits(); 1532 if (ShiftImm + Width > BitWidth) 1533 return false; 1534 1535 Opc = (VT == MVT::i32) ? AArch64::SBFMWri : AArch64::SBFMXri; 1536 Opd0 = Op.getOperand(0); 1537 Immr = ShiftImm; 1538 Imms = ShiftImm + Width - 1; 1539 return true; 1540 } 1541 1542 static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc, 1543 SDValue &Opd0, unsigned &LSB, 1544 unsigned &MSB) { 1545 // We are looking for the following pattern which basically extracts several 1546 // continuous bits from the source value and places it from the LSB of the 1547 // destination value, all other bits of the destination value or set to zero: 1548 // 1549 // Value2 = AND Value, MaskImm 1550 // SRL Value2, ShiftImm 1551 // 1552 // with MaskImm >> ShiftImm to search for the bit width. 1553 // 1554 // This gets selected into a single UBFM: 1555 // 1556 // UBFM Value, ShiftImm, BitWide + SrlImm -1 1557 // 1558 1559 if (N->getOpcode() != ISD::SRL) 1560 return false; 1561 1562 uint64_t AndMask = 0; 1563 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, AndMask)) 1564 return false; 1565 1566 Opd0 = N->getOperand(0).getOperand(0); 1567 1568 uint64_t SrlImm = 0; 1569 if (!isIntImmediate(N->getOperand(1), SrlImm)) 1570 return false; 1571 1572 // Check whether we really have several bits extract here. 1573 unsigned BitWide = 64 - countLeadingOnes(~(AndMask >> SrlImm)); 1574 if (BitWide && isMask_64(AndMask >> SrlImm)) { 1575 if (N->getValueType(0) == MVT::i32) 1576 Opc = AArch64::UBFMWri; 1577 else 1578 Opc = AArch64::UBFMXri; 1579 1580 LSB = SrlImm; 1581 MSB = BitWide + SrlImm - 1; 1582 return true; 1583 } 1584 1585 return false; 1586 } 1587 1588 static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0, 1589 unsigned &Immr, unsigned &Imms, 1590 bool BiggerPattern) { 1591 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) && 1592 "N must be a SHR/SRA operation to call this function"); 1593 1594 EVT VT = N->getValueType(0); 1595 1596 // Here we can test the type of VT and return false when the type does not 1597 // match, but since it is done prior to that call in the current context 1598 // we turned that into an assert to avoid redundant code. 1599 assert((VT == MVT::i32 || VT == MVT::i64) && 1600 "Type checking must have been done before calling this function"); 1601 1602 // Check for AND + SRL doing several bits extract. 1603 if (isSeveralBitsExtractOpFromShr(N, Opc, Opd0, Immr, Imms)) 1604 return true; 1605 1606 // We're looking for a shift of a shift. 1607 uint64_t ShlImm = 0; 1608 uint64_t TruncBits = 0; 1609 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, ShlImm)) { 1610 Opd0 = N->getOperand(0).getOperand(0); 1611 } else if (VT == MVT::i32 && N->getOpcode() == ISD::SRL && 1612 N->getOperand(0).getNode()->getOpcode() == ISD::TRUNCATE) { 1613 // We are looking for a shift of truncate. Truncate from i64 to i32 could 1614 // be considered as setting high 32 bits as zero. Our strategy here is to 1615 // always generate 64bit UBFM. This consistency will help the CSE pass 1616 // later find more redundancy. 1617 Opd0 = N->getOperand(0).getOperand(0); 1618 TruncBits = Opd0->getValueType(0).getSizeInBits() - VT.getSizeInBits(); 1619 VT = Opd0->getValueType(0); 1620 assert(VT == MVT::i64 && "the promoted type should be i64"); 1621 } else if (BiggerPattern) { 1622 // Let's pretend a 0 shift left has been performed. 1623 // FIXME: Currently we limit this to the bigger pattern case, 1624 // because some optimizations expect AND and not UBFM 1625 Opd0 = N->getOperand(0); 1626 } else 1627 return false; 1628 1629 // Missing combines/constant folding may have left us with strange 1630 // constants. 1631 if (ShlImm >= VT.getSizeInBits()) { 1632 DEBUG((dbgs() << N 1633 << ": Found large shift immediate, this should not happen\n")); 1634 return false; 1635 } 1636 1637 uint64_t SrlImm = 0; 1638 if (!isIntImmediate(N->getOperand(1), SrlImm)) 1639 return false; 1640 1641 assert(SrlImm > 0 && SrlImm < VT.getSizeInBits() && 1642 "bad amount in shift node!"); 1643 int immr = SrlImm - ShlImm; 1644 Immr = immr < 0 ? immr + VT.getSizeInBits() : immr; 1645 Imms = VT.getSizeInBits() - ShlImm - TruncBits - 1; 1646 // SRA requires a signed extraction 1647 if (VT == MVT::i32) 1648 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMWri : AArch64::UBFMWri; 1649 else 1650 Opc = N->getOpcode() == ISD::SRA ? AArch64::SBFMXri : AArch64::UBFMXri; 1651 return true; 1652 } 1653 1654 bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) { 1655 assert(N->getOpcode() == ISD::SIGN_EXTEND); 1656 1657 EVT VT = N->getValueType(0); 1658 EVT NarrowVT = N->getOperand(0)->getValueType(0); 1659 if (VT != MVT::i64 || NarrowVT != MVT::i32) 1660 return false; 1661 1662 uint64_t ShiftImm; 1663 SDValue Op = N->getOperand(0); 1664 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SRA, ShiftImm)) 1665 return false; 1666 1667 SDLoc dl(N); 1668 // Extend the incoming operand of the shift to 64-bits. 1669 SDValue Opd0 = Widen(CurDAG, Op.getOperand(0)); 1670 unsigned Immr = ShiftImm; 1671 unsigned Imms = NarrowVT.getSizeInBits() - 1; 1672 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT), 1673 CurDAG->getTargetConstant(Imms, dl, VT)}; 1674 CurDAG->SelectNodeTo(N, AArch64::SBFMXri, VT, Ops); 1675 return true; 1676 } 1677 1678 static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc, 1679 SDValue &Opd0, unsigned &Immr, unsigned &Imms, 1680 unsigned NumberOfIgnoredLowBits = 0, 1681 bool BiggerPattern = false) { 1682 if (N->getValueType(0) != MVT::i32 && N->getValueType(0) != MVT::i64) 1683 return false; 1684 1685 switch (N->getOpcode()) { 1686 default: 1687 if (!N->isMachineOpcode()) 1688 return false; 1689 break; 1690 case ISD::AND: 1691 return isBitfieldExtractOpFromAnd(CurDAG, N, Opc, Opd0, Immr, Imms, 1692 NumberOfIgnoredLowBits, BiggerPattern); 1693 case ISD::SRL: 1694 case ISD::SRA: 1695 return isBitfieldExtractOpFromShr(N, Opc, Opd0, Immr, Imms, BiggerPattern); 1696 1697 case ISD::SIGN_EXTEND_INREG: 1698 return isBitfieldExtractOpFromSExtInReg(N, Opc, Opd0, Immr, Imms); 1699 } 1700 1701 unsigned NOpc = N->getMachineOpcode(); 1702 switch (NOpc) { 1703 default: 1704 return false; 1705 case AArch64::SBFMWri: 1706 case AArch64::UBFMWri: 1707 case AArch64::SBFMXri: 1708 case AArch64::UBFMXri: 1709 Opc = NOpc; 1710 Opd0 = N->getOperand(0); 1711 Immr = cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue(); 1712 Imms = cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue(); 1713 return true; 1714 } 1715 // Unreachable 1716 return false; 1717 } 1718 1719 bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) { 1720 unsigned Opc, Immr, Imms; 1721 SDValue Opd0; 1722 if (!isBitfieldExtractOp(CurDAG, N, Opc, Opd0, Immr, Imms)) 1723 return false; 1724 1725 EVT VT = N->getValueType(0); 1726 SDLoc dl(N); 1727 1728 // If the bit extract operation is 64bit but the original type is 32bit, we 1729 // need to add one EXTRACT_SUBREG. 1730 if ((Opc == AArch64::SBFMXri || Opc == AArch64::UBFMXri) && VT == MVT::i32) { 1731 SDValue Ops64[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, MVT::i64), 1732 CurDAG->getTargetConstant(Imms, dl, MVT::i64)}; 1733 1734 SDNode *BFM = CurDAG->getMachineNode(Opc, dl, MVT::i64, Ops64); 1735 SDValue SubReg = CurDAG->getTargetConstant(AArch64::sub_32, dl, MVT::i32); 1736 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 1737 MVT::i32, SDValue(BFM, 0), SubReg)); 1738 return true; 1739 } 1740 1741 SDValue Ops[] = {Opd0, CurDAG->getTargetConstant(Immr, dl, VT), 1742 CurDAG->getTargetConstant(Imms, dl, VT)}; 1743 CurDAG->SelectNodeTo(N, Opc, VT, Ops); 1744 return true; 1745 } 1746 1747 /// Does DstMask form a complementary pair with the mask provided by 1748 /// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking, 1749 /// this asks whether DstMask zeroes precisely those bits that will be set by 1750 /// the other half. 1751 static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted, 1752 unsigned NumberOfIgnoredHighBits, EVT VT) { 1753 assert((VT == MVT::i32 || VT == MVT::i64) && 1754 "i32 or i64 mask type expected!"); 1755 unsigned BitWidth = VT.getSizeInBits() - NumberOfIgnoredHighBits; 1756 1757 APInt SignificantDstMask = APInt(BitWidth, DstMask); 1758 APInt SignificantBitsToBeInserted = BitsToBeInserted.zextOrTrunc(BitWidth); 1759 1760 return (SignificantDstMask & SignificantBitsToBeInserted) == 0 && 1761 (SignificantDstMask | SignificantBitsToBeInserted).isAllOnesValue(); 1762 } 1763 1764 // Look for bits that will be useful for later uses. 1765 // A bit is consider useless as soon as it is dropped and never used 1766 // before it as been dropped. 1767 // E.g., looking for useful bit of x 1768 // 1. y = x & 0x7 1769 // 2. z = y >> 2 1770 // After #1, x useful bits are 0x7, then the useful bits of x, live through 1771 // y. 1772 // After #2, the useful bits of x are 0x4. 1773 // However, if x is used on an unpredicatable instruction, then all its bits 1774 // are useful. 1775 // E.g. 1776 // 1. y = x & 0x7 1777 // 2. z = y >> 2 1778 // 3. str x, [@x] 1779 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0); 1780 1781 static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits, 1782 unsigned Depth) { 1783 uint64_t Imm = 1784 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue(); 1785 Imm = AArch64_AM::decodeLogicalImmediate(Imm, UsefulBits.getBitWidth()); 1786 UsefulBits &= APInt(UsefulBits.getBitWidth(), Imm); 1787 getUsefulBits(Op, UsefulBits, Depth + 1); 1788 } 1789 1790 static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, 1791 uint64_t Imm, uint64_t MSB, 1792 unsigned Depth) { 1793 // inherit the bitwidth value 1794 APInt OpUsefulBits(UsefulBits); 1795 OpUsefulBits = 1; 1796 1797 if (MSB >= Imm) { 1798 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); 1799 --OpUsefulBits; 1800 // The interesting part will be in the lower part of the result 1801 getUsefulBits(Op, OpUsefulBits, Depth + 1); 1802 // The interesting part was starting at Imm in the argument 1803 OpUsefulBits = OpUsefulBits.shl(Imm); 1804 } else { 1805 OpUsefulBits = OpUsefulBits.shl(MSB + 1); 1806 --OpUsefulBits; 1807 // The interesting part will be shifted in the result 1808 OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); 1809 getUsefulBits(Op, OpUsefulBits, Depth + 1); 1810 // The interesting part was at zero in the argument 1811 OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm); 1812 } 1813 1814 UsefulBits &= OpUsefulBits; 1815 } 1816 1817 static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits, 1818 unsigned Depth) { 1819 uint64_t Imm = 1820 cast<const ConstantSDNode>(Op.getOperand(1).getNode())->getZExtValue(); 1821 uint64_t MSB = 1822 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue(); 1823 1824 getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth); 1825 } 1826 1827 static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, 1828 unsigned Depth) { 1829 uint64_t ShiftTypeAndValue = 1830 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue(); 1831 APInt Mask(UsefulBits); 1832 Mask.clearAllBits(); 1833 Mask.flipAllBits(); 1834 1835 if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSL) { 1836 // Shift Left 1837 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); 1838 Mask = Mask.shl(ShiftAmt); 1839 getUsefulBits(Op, Mask, Depth + 1); 1840 Mask = Mask.lshr(ShiftAmt); 1841 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { 1842 // Shift Right 1843 // We do not handle AArch64_AM::ASR, because the sign will change the 1844 // number of useful bits 1845 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); 1846 Mask = Mask.lshr(ShiftAmt); 1847 getUsefulBits(Op, Mask, Depth + 1); 1848 Mask = Mask.shl(ShiftAmt); 1849 } else 1850 return; 1851 1852 UsefulBits &= Mask; 1853 } 1854 1855 static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, 1856 unsigned Depth) { 1857 uint64_t Imm = 1858 cast<const ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue(); 1859 uint64_t MSB = 1860 cast<const ConstantSDNode>(Op.getOperand(3).getNode())->getZExtValue(); 1861 1862 if (Op.getOperand(1) == Orig) 1863 return getUsefulBitsFromBitfieldMoveOpd(Op, UsefulBits, Imm, MSB, Depth); 1864 1865 APInt OpUsefulBits(UsefulBits); 1866 OpUsefulBits = 1; 1867 1868 if (MSB >= Imm) { 1869 OpUsefulBits = OpUsefulBits.shl(MSB - Imm + 1); 1870 --OpUsefulBits; 1871 UsefulBits &= ~OpUsefulBits; 1872 getUsefulBits(Op, UsefulBits, Depth + 1); 1873 } else { 1874 OpUsefulBits = OpUsefulBits.shl(MSB + 1); 1875 --OpUsefulBits; 1876 UsefulBits = ~(OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm)); 1877 getUsefulBits(Op, UsefulBits, Depth + 1); 1878 } 1879 } 1880 1881 static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits, 1882 SDValue Orig, unsigned Depth) { 1883 1884 // Users of this node should have already been instruction selected 1885 // FIXME: Can we turn that into an assert? 1886 if (!UserNode->isMachineOpcode()) 1887 return; 1888 1889 switch (UserNode->getMachineOpcode()) { 1890 default: 1891 return; 1892 case AArch64::ANDSWri: 1893 case AArch64::ANDSXri: 1894 case AArch64::ANDWri: 1895 case AArch64::ANDXri: 1896 // We increment Depth only when we call the getUsefulBits 1897 return getUsefulBitsFromAndWithImmediate(SDValue(UserNode, 0), UsefulBits, 1898 Depth); 1899 case AArch64::UBFMWri: 1900 case AArch64::UBFMXri: 1901 return getUsefulBitsFromUBFM(SDValue(UserNode, 0), UsefulBits, Depth); 1902 1903 case AArch64::ORRWrs: 1904 case AArch64::ORRXrs: 1905 if (UserNode->getOperand(1) != Orig) 1906 return; 1907 return getUsefulBitsFromOrWithShiftedReg(SDValue(UserNode, 0), UsefulBits, 1908 Depth); 1909 case AArch64::BFMWri: 1910 case AArch64::BFMXri: 1911 return getUsefulBitsFromBFM(SDValue(UserNode, 0), Orig, UsefulBits, Depth); 1912 1913 case AArch64::STRBBui: 1914 case AArch64::STURBBi: 1915 if (UserNode->getOperand(0) != Orig) 1916 return; 1917 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xff); 1918 return; 1919 1920 case AArch64::STRHHui: 1921 case AArch64::STURHHi: 1922 if (UserNode->getOperand(0) != Orig) 1923 return; 1924 UsefulBits &= APInt(UsefulBits.getBitWidth(), 0xffff); 1925 return; 1926 } 1927 } 1928 1929 static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) { 1930 if (Depth >= 6) 1931 return; 1932 // Initialize UsefulBits 1933 if (!Depth) { 1934 unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits(); 1935 // At the beginning, assume every produced bits is useful 1936 UsefulBits = APInt(Bitwidth, 0); 1937 UsefulBits.flipAllBits(); 1938 } 1939 APInt UsersUsefulBits(UsefulBits.getBitWidth(), 0); 1940 1941 for (SDNode *Node : Op.getNode()->uses()) { 1942 // A use cannot produce useful bits 1943 APInt UsefulBitsForUse = APInt(UsefulBits); 1944 getUsefulBitsForUse(Node, UsefulBitsForUse, Op, Depth); 1945 UsersUsefulBits |= UsefulBitsForUse; 1946 } 1947 // UsefulBits contains the produced bits that are meaningful for the 1948 // current definition, thus a user cannot make a bit meaningful at 1949 // this point 1950 UsefulBits &= UsersUsefulBits; 1951 } 1952 1953 /// Create a machine node performing a notional SHL of Op by ShlAmount. If 1954 /// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is 1955 /// 0, return Op unchanged. 1956 static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) { 1957 if (ShlAmount == 0) 1958 return Op; 1959 1960 EVT VT = Op.getValueType(); 1961 SDLoc dl(Op); 1962 unsigned BitWidth = VT.getSizeInBits(); 1963 unsigned UBFMOpc = BitWidth == 32 ? AArch64::UBFMWri : AArch64::UBFMXri; 1964 1965 SDNode *ShiftNode; 1966 if (ShlAmount > 0) { 1967 // LSL wD, wN, #Amt == UBFM wD, wN, #32-Amt, #31-Amt 1968 ShiftNode = CurDAG->getMachineNode( 1969 UBFMOpc, dl, VT, Op, 1970 CurDAG->getTargetConstant(BitWidth - ShlAmount, dl, VT), 1971 CurDAG->getTargetConstant(BitWidth - 1 - ShlAmount, dl, VT)); 1972 } else { 1973 // LSR wD, wN, #Amt == UBFM wD, wN, #Amt, #32-1 1974 assert(ShlAmount < 0 && "expected right shift"); 1975 int ShrAmount = -ShlAmount; 1976 ShiftNode = CurDAG->getMachineNode( 1977 UBFMOpc, dl, VT, Op, CurDAG->getTargetConstant(ShrAmount, dl, VT), 1978 CurDAG->getTargetConstant(BitWidth - 1, dl, VT)); 1979 } 1980 1981 return SDValue(ShiftNode, 0); 1982 } 1983 1984 /// Does this tree qualify as an attempt to move a bitfield into position, 1985 /// essentially "(and (shl VAL, N), Mask)". 1986 static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op, 1987 bool BiggerPattern, 1988 SDValue &Src, int &ShiftAmount, 1989 int &MaskWidth) { 1990 EVT VT = Op.getValueType(); 1991 unsigned BitWidth = VT.getSizeInBits(); 1992 (void)BitWidth; 1993 assert(BitWidth == 32 || BitWidth == 64); 1994 1995 APInt KnownZero, KnownOne; 1996 CurDAG->computeKnownBits(Op, KnownZero, KnownOne); 1997 1998 // Non-zero in the sense that they're not provably zero, which is the key 1999 // point if we want to use this value 2000 uint64_t NonZeroBits = (~KnownZero).getZExtValue(); 2001 2002 // Discard a constant AND mask if present. It's safe because the node will 2003 // already have been factored into the computeKnownBits calculation above. 2004 uint64_t AndImm; 2005 if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) { 2006 assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0); 2007 Op = Op.getOperand(0); 2008 } 2009 2010 // Don't match if the SHL has more than one use, since then we'll end up 2011 // generating SHL+UBFIZ instead of just keeping SHL+AND. 2012 if (!BiggerPattern && !Op.hasOneUse()) 2013 return false; 2014 2015 uint64_t ShlImm; 2016 if (!isOpcWithIntImmediate(Op.getNode(), ISD::SHL, ShlImm)) 2017 return false; 2018 Op = Op.getOperand(0); 2019 2020 if (!isShiftedMask_64(NonZeroBits)) 2021 return false; 2022 2023 ShiftAmount = countTrailingZeros(NonZeroBits); 2024 MaskWidth = countTrailingOnes(NonZeroBits >> ShiftAmount); 2025 2026 // BFI encompasses sufficiently many nodes that it's worth inserting an extra 2027 // LSL/LSR if the mask in NonZeroBits doesn't quite match up with the ISD::SHL 2028 // amount. BiggerPattern is true when this pattern is being matched for BFI, 2029 // BiggerPattern is false when this pattern is being matched for UBFIZ, in 2030 // which case it is not profitable to insert an extra shift. 2031 if (ShlImm - ShiftAmount != 0 && !BiggerPattern) 2032 return false; 2033 Src = getLeftShift(CurDAG, Op, ShlImm - ShiftAmount); 2034 2035 return true; 2036 } 2037 2038 static bool isShiftedMask(uint64_t Mask, EVT VT) { 2039 assert(VT == MVT::i32 || VT == MVT::i64); 2040 if (VT == MVT::i32) 2041 return isShiftedMask_32(Mask); 2042 return isShiftedMask_64(Mask); 2043 } 2044 2045 // Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being 2046 // inserted only sets known zero bits. 2047 static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) { 2048 assert(N->getOpcode() == ISD::OR && "Expect a OR operation"); 2049 2050 EVT VT = N->getValueType(0); 2051 if (VT != MVT::i32 && VT != MVT::i64) 2052 return false; 2053 2054 unsigned BitWidth = VT.getSizeInBits(); 2055 2056 uint64_t OrImm; 2057 if (!isOpcWithIntImmediate(N, ISD::OR, OrImm)) 2058 return false; 2059 2060 // Skip this transformation if the ORR immediate can be encoded in the ORR. 2061 // Otherwise, we'll trade an AND+ORR for ORR+BFI/BFXIL, which is most likely 2062 // performance neutral. 2063 if (AArch64_AM::isLogicalImmediate(OrImm, BitWidth)) 2064 return false; 2065 2066 uint64_t MaskImm; 2067 SDValue And = N->getOperand(0); 2068 // Must be a single use AND with an immediate operand. 2069 if (!And.hasOneUse() || 2070 !isOpcWithIntImmediate(And.getNode(), ISD::AND, MaskImm)) 2071 return false; 2072 2073 // Compute the Known Zero for the AND as this allows us to catch more general 2074 // cases than just looking for AND with imm. 2075 APInt KnownZero, KnownOne; 2076 CurDAG->computeKnownBits(And, KnownZero, KnownOne); 2077 2078 // Non-zero in the sense that they're not provably zero, which is the key 2079 // point if we want to use this value. 2080 uint64_t NotKnownZero = (~KnownZero).getZExtValue(); 2081 2082 // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00). 2083 if (!isShiftedMask(KnownZero.getZExtValue(), VT)) 2084 return false; 2085 2086 // The bits being inserted must only set those bits that are known to be zero. 2087 if ((OrImm & NotKnownZero) != 0) { 2088 // FIXME: It's okay if the OrImm sets NotKnownZero bits to 1, but we don't 2089 // currently handle this case. 2090 return false; 2091 } 2092 2093 // BFI/BFXIL dst, src, #lsb, #width. 2094 int LSB = countTrailingOnes(NotKnownZero); 2095 int Width = BitWidth - APInt(BitWidth, NotKnownZero).countPopulation(); 2096 2097 // BFI/BFXIL is an alias of BFM, so translate to BFM operands. 2098 unsigned ImmR = (BitWidth - LSB) % BitWidth; 2099 unsigned ImmS = Width - 1; 2100 2101 // If we're creating a BFI instruction avoid cases where we need more 2102 // instructions to materialize the BFI constant as compared to the original 2103 // ORR. A BFXIL will use the same constant as the original ORR, so the code 2104 // should be no worse in this case. 2105 bool IsBFI = LSB != 0; 2106 uint64_t BFIImm = OrImm >> LSB; 2107 if (IsBFI && !AArch64_AM::isLogicalImmediate(BFIImm, BitWidth)) { 2108 // We have a BFI instruction and we know the constant can't be materialized 2109 // with a ORR-immediate with the zero register. 2110 unsigned OrChunks = 0, BFIChunks = 0; 2111 for (unsigned Shift = 0; Shift < BitWidth; Shift += 16) { 2112 if (((OrImm >> Shift) & 0xFFFF) != 0) 2113 ++OrChunks; 2114 if (((BFIImm >> Shift) & 0xFFFF) != 0) 2115 ++BFIChunks; 2116 } 2117 if (BFIChunks > OrChunks) 2118 return false; 2119 } 2120 2121 // Materialize the constant to be inserted. 2122 SDLoc DL(N); 2123 unsigned MOVIOpc = VT == MVT::i32 ? AArch64::MOVi32imm : AArch64::MOVi64imm; 2124 SDNode *MOVI = CurDAG->getMachineNode( 2125 MOVIOpc, DL, VT, CurDAG->getTargetConstant(BFIImm, DL, VT)); 2126 2127 // Create the BFI/BFXIL instruction. 2128 SDValue Ops[] = {And.getOperand(0), SDValue(MOVI, 0), 2129 CurDAG->getTargetConstant(ImmR, DL, VT), 2130 CurDAG->getTargetConstant(ImmS, DL, VT)}; 2131 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri; 2132 CurDAG->SelectNodeTo(N, Opc, VT, Ops); 2133 return true; 2134 } 2135 2136 static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits, 2137 SelectionDAG *CurDAG) { 2138 assert(N->getOpcode() == ISD::OR && "Expect a OR operation"); 2139 2140 EVT VT = N->getValueType(0); 2141 if (VT != MVT::i32 && VT != MVT::i64) 2142 return false; 2143 2144 unsigned BitWidth = VT.getSizeInBits(); 2145 2146 // Because of simplify-demanded-bits in DAGCombine, involved masks may not 2147 // have the expected shape. Try to undo that. 2148 2149 unsigned NumberOfIgnoredLowBits = UsefulBits.countTrailingZeros(); 2150 unsigned NumberOfIgnoredHighBits = UsefulBits.countLeadingZeros(); 2151 2152 // Given a OR operation, check if we have the following pattern 2153 // ubfm c, b, imm, imm2 (or something that does the same jobs, see 2154 // isBitfieldExtractOp) 2155 // d = e & mask2 ; where mask is a binary sequence of 1..10..0 and 2156 // countTrailingZeros(mask2) == imm2 - imm + 1 2157 // f = d | c 2158 // if yes, replace the OR instruction with: 2159 // f = BFM Opd0, Opd1, LSB, MSB ; where LSB = imm, and MSB = imm2 2160 2161 // OR is commutative, check all combinations of operand order and values of 2162 // BiggerPattern, i.e. 2163 // Opd0, Opd1, BiggerPattern=false 2164 // Opd1, Opd0, BiggerPattern=false 2165 // Opd0, Opd1, BiggerPattern=true 2166 // Opd1, Opd0, BiggerPattern=true 2167 // Several of these combinations may match, so check with BiggerPattern=false 2168 // first since that will produce better results by matching more instructions 2169 // and/or inserting fewer extra instructions. 2170 for (int I = 0; I < 4; ++I) { 2171 2172 SDValue Dst, Src; 2173 unsigned ImmR, ImmS; 2174 bool BiggerPattern = I / 2; 2175 SDValue OrOpd0Val = N->getOperand(I % 2); 2176 SDNode *OrOpd0 = OrOpd0Val.getNode(); 2177 SDValue OrOpd1Val = N->getOperand((I + 1) % 2); 2178 SDNode *OrOpd1 = OrOpd1Val.getNode(); 2179 2180 unsigned BFXOpc; 2181 int DstLSB, Width; 2182 if (isBitfieldExtractOp(CurDAG, OrOpd0, BFXOpc, Src, ImmR, ImmS, 2183 NumberOfIgnoredLowBits, BiggerPattern)) { 2184 // Check that the returned opcode is compatible with the pattern, 2185 // i.e., same type and zero extended (U and not S) 2186 if ((BFXOpc != AArch64::UBFMXri && VT == MVT::i64) || 2187 (BFXOpc != AArch64::UBFMWri && VT == MVT::i32)) 2188 continue; 2189 2190 // Compute the width of the bitfield insertion 2191 DstLSB = 0; 2192 Width = ImmS - ImmR + 1; 2193 // FIXME: This constraint is to catch bitfield insertion we may 2194 // want to widen the pattern if we want to grab general bitfied 2195 // move case 2196 if (Width <= 0) 2197 continue; 2198 2199 // If the mask on the insertee is correct, we have a BFXIL operation. We 2200 // can share the ImmR and ImmS values from the already-computed UBFM. 2201 } else if (isBitfieldPositioningOp(CurDAG, OrOpd0Val, 2202 BiggerPattern, 2203 Src, DstLSB, Width)) { 2204 ImmR = (BitWidth - DstLSB) % BitWidth; 2205 ImmS = Width - 1; 2206 } else 2207 continue; 2208 2209 // Check the second part of the pattern 2210 EVT VT = OrOpd1->getValueType(0); 2211 assert((VT == MVT::i32 || VT == MVT::i64) && "unexpected OR operand"); 2212 2213 // Compute the Known Zero for the candidate of the first operand. 2214 // This allows to catch more general case than just looking for 2215 // AND with imm. Indeed, simplify-demanded-bits may have removed 2216 // the AND instruction because it proves it was useless. 2217 APInt KnownZero, KnownOne; 2218 CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne); 2219 2220 // Check if there is enough room for the second operand to appear 2221 // in the first one 2222 APInt BitsToBeInserted = 2223 APInt::getBitsSet(KnownZero.getBitWidth(), DstLSB, DstLSB + Width); 2224 2225 if ((BitsToBeInserted & ~KnownZero) != 0) 2226 continue; 2227 2228 // Set the first operand 2229 uint64_t Imm; 2230 if (isOpcWithIntImmediate(OrOpd1, ISD::AND, Imm) && 2231 isBitfieldDstMask(Imm, BitsToBeInserted, NumberOfIgnoredHighBits, VT)) 2232 // In that case, we can eliminate the AND 2233 Dst = OrOpd1->getOperand(0); 2234 else 2235 // Maybe the AND has been removed by simplify-demanded-bits 2236 // or is useful because it discards more bits 2237 Dst = OrOpd1Val; 2238 2239 // both parts match 2240 SDLoc DL(N); 2241 SDValue Ops[] = {Dst, Src, CurDAG->getTargetConstant(ImmR, DL, VT), 2242 CurDAG->getTargetConstant(ImmS, DL, VT)}; 2243 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri; 2244 CurDAG->SelectNodeTo(N, Opc, VT, Ops); 2245 return true; 2246 } 2247 2248 // Generate a BFXIL from 'or (and X, Mask0Imm), (and Y, Mask1Imm)' iff 2249 // Mask0Imm and ~Mask1Imm are equivalent and one of the MaskImms is a shifted 2250 // mask (e.g., 0x000ffff0). 2251 uint64_t Mask0Imm, Mask1Imm; 2252 SDValue And0 = N->getOperand(0); 2253 SDValue And1 = N->getOperand(1); 2254 if (And0.hasOneUse() && And1.hasOneUse() && 2255 isOpcWithIntImmediate(And0.getNode(), ISD::AND, Mask0Imm) && 2256 isOpcWithIntImmediate(And1.getNode(), ISD::AND, Mask1Imm) && 2257 APInt(BitWidth, Mask0Imm) == ~APInt(BitWidth, Mask1Imm) && 2258 (isShiftedMask(Mask0Imm, VT) || isShiftedMask(Mask1Imm, VT))) { 2259 2260 // ORR is commutative, so canonicalize to the form 'or (and X, Mask0Imm), 2261 // (and Y, Mask1Imm)' where Mask1Imm is the shifted mask masking off the 2262 // bits to be inserted. 2263 if (isShiftedMask(Mask0Imm, VT)) { 2264 std::swap(And0, And1); 2265 std::swap(Mask0Imm, Mask1Imm); 2266 } 2267 2268 SDValue Src = And1->getOperand(0); 2269 SDValue Dst = And0->getOperand(0); 2270 unsigned LSB = countTrailingZeros(Mask1Imm); 2271 int Width = BitWidth - APInt(BitWidth, Mask0Imm).countPopulation(); 2272 2273 // The BFXIL inserts the low-order bits from a source register, so right 2274 // shift the needed bits into place. 2275 SDLoc DL(N); 2276 unsigned ShiftOpc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri; 2277 SDNode *LSR = CurDAG->getMachineNode( 2278 ShiftOpc, DL, VT, Src, CurDAG->getTargetConstant(LSB, DL, VT), 2279 CurDAG->getTargetConstant(BitWidth - 1, DL, VT)); 2280 2281 // BFXIL is an alias of BFM, so translate to BFM operands. 2282 unsigned ImmR = (BitWidth - LSB) % BitWidth; 2283 unsigned ImmS = Width - 1; 2284 2285 // Create the BFXIL instruction. 2286 SDValue Ops[] = {Dst, SDValue(LSR, 0), 2287 CurDAG->getTargetConstant(ImmR, DL, VT), 2288 CurDAG->getTargetConstant(ImmS, DL, VT)}; 2289 unsigned Opc = (VT == MVT::i32) ? AArch64::BFMWri : AArch64::BFMXri; 2290 CurDAG->SelectNodeTo(N, Opc, VT, Ops); 2291 return true; 2292 } 2293 2294 return false; 2295 } 2296 2297 bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) { 2298 if (N->getOpcode() != ISD::OR) 2299 return false; 2300 2301 APInt NUsefulBits; 2302 getUsefulBits(SDValue(N, 0), NUsefulBits); 2303 2304 // If all bits are not useful, just return UNDEF. 2305 if (!NUsefulBits) { 2306 CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF, N->getValueType(0)); 2307 return true; 2308 } 2309 2310 if (tryBitfieldInsertOpFromOr(N, NUsefulBits, CurDAG)) 2311 return true; 2312 2313 return tryBitfieldInsertOpFromOrAndImm(N, CurDAG); 2314 } 2315 2316 /// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the 2317 /// equivalent of a left shift by a constant amount followed by an and masking 2318 /// out a contiguous set of bits. 2319 bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) { 2320 if (N->getOpcode() != ISD::AND) 2321 return false; 2322 2323 EVT VT = N->getValueType(0); 2324 if (VT != MVT::i32 && VT != MVT::i64) 2325 return false; 2326 2327 SDValue Op0; 2328 int DstLSB, Width; 2329 if (!isBitfieldPositioningOp(CurDAG, SDValue(N, 0), /*BiggerPattern=*/false, 2330 Op0, DstLSB, Width)) 2331 return false; 2332 2333 // ImmR is the rotate right amount. 2334 unsigned ImmR = (VT.getSizeInBits() - DstLSB) % VT.getSizeInBits(); 2335 // ImmS is the most significant bit of the source to be moved. 2336 unsigned ImmS = Width - 1; 2337 2338 SDLoc DL(N); 2339 SDValue Ops[] = {Op0, CurDAG->getTargetConstant(ImmR, DL, VT), 2340 CurDAG->getTargetConstant(ImmS, DL, VT)}; 2341 unsigned Opc = (VT == MVT::i32) ? AArch64::UBFMWri : AArch64::UBFMXri; 2342 CurDAG->SelectNodeTo(N, Opc, VT, Ops); 2343 return true; 2344 } 2345 2346 bool 2347 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, 2348 unsigned RegWidth) { 2349 APFloat FVal(0.0); 2350 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) 2351 FVal = CN->getValueAPF(); 2352 else if (LoadSDNode *LN = dyn_cast<LoadSDNode>(N)) { 2353 // Some otherwise illegal constants are allowed in this case. 2354 if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow || 2355 !isa<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1))) 2356 return false; 2357 2358 ConstantPoolSDNode *CN = 2359 dyn_cast<ConstantPoolSDNode>(LN->getOperand(1)->getOperand(1)); 2360 FVal = cast<ConstantFP>(CN->getConstVal())->getValueAPF(); 2361 } else 2362 return false; 2363 2364 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits 2365 // is between 1 and 32 for a destination w-register, or 1 and 64 for an 2366 // x-register. 2367 // 2368 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we 2369 // want THIS_NODE to be 2^fbits. This is much easier to deal with using 2370 // integers. 2371 bool IsExact; 2372 2373 // fbits is between 1 and 64 in the worst-case, which means the fmul 2374 // could have 2^64 as an actual operand. Need 65 bits of precision. 2375 APSInt IntVal(65, true); 2376 FVal.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact); 2377 2378 // N.b. isPowerOf2 also checks for > 0. 2379 if (!IsExact || !IntVal.isPowerOf2()) return false; 2380 unsigned FBits = IntVal.logBase2(); 2381 2382 // Checks above should have guaranteed that we haven't lost information in 2383 // finding FBits, but it must still be in range. 2384 if (FBits == 0 || FBits > RegWidth) return false; 2385 2386 FixedPos = CurDAG->getTargetConstant(FBits, SDLoc(N), MVT::i32); 2387 return true; 2388 } 2389 2390 // Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields 2391 // of the string and obtains the integer values from them and combines these 2392 // into a single value to be used in the MRS/MSR instruction. 2393 static int getIntOperandFromRegisterString(StringRef RegString) { 2394 SmallVector<StringRef, 5> Fields; 2395 RegString.split(Fields, ':'); 2396 2397 if (Fields.size() == 1) 2398 return -1; 2399 2400 assert(Fields.size() == 5 2401 && "Invalid number of fields in read register string"); 2402 2403 SmallVector<int, 5> Ops; 2404 bool AllIntFields = true; 2405 2406 for (StringRef Field : Fields) { 2407 unsigned IntField; 2408 AllIntFields &= !Field.getAsInteger(10, IntField); 2409 Ops.push_back(IntField); 2410 } 2411 2412 assert(AllIntFields && 2413 "Unexpected non-integer value in special register string."); 2414 2415 // Need to combine the integer fields of the string into a single value 2416 // based on the bit encoding of MRS/MSR instruction. 2417 return (Ops[0] << 14) | (Ops[1] << 11) | (Ops[2] << 7) | 2418 (Ops[3] << 3) | (Ops[4]); 2419 } 2420 2421 // Lower the read_register intrinsic to an MRS instruction node if the special 2422 // register string argument is either of the form detailed in the ALCE (the 2423 // form described in getIntOperandsFromRegsterString) or is a named register 2424 // known by the MRS SysReg mapper. 2425 bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) { 2426 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1)); 2427 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0)); 2428 SDLoc DL(N); 2429 2430 int Reg = getIntOperandFromRegisterString(RegString->getString()); 2431 if (Reg != -1) { 2432 ReplaceNode(N, CurDAG->getMachineNode( 2433 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other, 2434 CurDAG->getTargetConstant(Reg, DL, MVT::i32), 2435 N->getOperand(0))); 2436 return true; 2437 } 2438 2439 // Use the sysreg mapper to map the remaining possible strings to the 2440 // value for the register to be used for the instruction operand. 2441 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString()); 2442 if (TheReg && TheReg->Readable && 2443 TheReg->haveFeatures(Subtarget->getFeatureBits())) 2444 Reg = TheReg->Encoding; 2445 else 2446 Reg = AArch64SysReg::parseGenericRegister(RegString->getString()); 2447 2448 if (Reg != -1) { 2449 ReplaceNode(N, CurDAG->getMachineNode( 2450 AArch64::MRS, DL, N->getSimpleValueType(0), MVT::Other, 2451 CurDAG->getTargetConstant(Reg, DL, MVT::i32), 2452 N->getOperand(0))); 2453 return true; 2454 } 2455 2456 return false; 2457 } 2458 2459 // Lower the write_register intrinsic to an MSR instruction node if the special 2460 // register string argument is either of the form detailed in the ALCE (the 2461 // form described in getIntOperandsFromRegsterString) or is a named register 2462 // known by the MSR SysReg mapper. 2463 bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) { 2464 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1)); 2465 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0)); 2466 SDLoc DL(N); 2467 2468 int Reg = getIntOperandFromRegisterString(RegString->getString()); 2469 if (Reg != -1) { 2470 ReplaceNode( 2471 N, CurDAG->getMachineNode(AArch64::MSR, DL, MVT::Other, 2472 CurDAG->getTargetConstant(Reg, DL, MVT::i32), 2473 N->getOperand(2), N->getOperand(0))); 2474 return true; 2475 } 2476 2477 // Check if the register was one of those allowed as the pstatefield value in 2478 // the MSR (immediate) instruction. To accept the values allowed in the 2479 // pstatefield for the MSR (immediate) instruction, we also require that an 2480 // immediate value has been provided as an argument, we know that this is 2481 // the case as it has been ensured by semantic checking. 2482 auto PMapper = AArch64PState::lookupPStateByName(RegString->getString());; 2483 if (PMapper) { 2484 assert (isa<ConstantSDNode>(N->getOperand(2)) 2485 && "Expected a constant integer expression."); 2486 unsigned Reg = PMapper->Encoding; 2487 uint64_t Immed = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 2488 unsigned State; 2489 if (Reg == AArch64PState::PAN || Reg == AArch64PState::UAO) { 2490 assert(Immed < 2 && "Bad imm"); 2491 State = AArch64::MSRpstateImm1; 2492 } else { 2493 assert(Immed < 16 && "Bad imm"); 2494 State = AArch64::MSRpstateImm4; 2495 } 2496 ReplaceNode(N, CurDAG->getMachineNode( 2497 State, DL, MVT::Other, 2498 CurDAG->getTargetConstant(Reg, DL, MVT::i32), 2499 CurDAG->getTargetConstant(Immed, DL, MVT::i16), 2500 N->getOperand(0))); 2501 return true; 2502 } 2503 2504 // Use the sysreg mapper to attempt to map the remaining possible strings 2505 // to the value for the register to be used for the MSR (register) 2506 // instruction operand. 2507 auto TheReg = AArch64SysReg::lookupSysRegByName(RegString->getString()); 2508 if (TheReg && TheReg->Writeable && 2509 TheReg->haveFeatures(Subtarget->getFeatureBits())) 2510 Reg = TheReg->Encoding; 2511 else 2512 Reg = AArch64SysReg::parseGenericRegister(RegString->getString()); 2513 if (Reg != -1) { 2514 ReplaceNode(N, CurDAG->getMachineNode( 2515 AArch64::MSR, DL, MVT::Other, 2516 CurDAG->getTargetConstant(Reg, DL, MVT::i32), 2517 N->getOperand(2), N->getOperand(0))); 2518 return true; 2519 } 2520 2521 return false; 2522 } 2523 2524 /// We've got special pseudo-instructions for these 2525 void AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) { 2526 unsigned Opcode; 2527 EVT MemTy = cast<MemSDNode>(N)->getMemoryVT(); 2528 if (MemTy == MVT::i8) 2529 Opcode = AArch64::CMP_SWAP_8; 2530 else if (MemTy == MVT::i16) 2531 Opcode = AArch64::CMP_SWAP_16; 2532 else if (MemTy == MVT::i32) 2533 Opcode = AArch64::CMP_SWAP_32; 2534 else if (MemTy == MVT::i64) 2535 Opcode = AArch64::CMP_SWAP_64; 2536 else 2537 llvm_unreachable("Unknown AtomicCmpSwap type"); 2538 2539 MVT RegTy = MemTy == MVT::i64 ? MVT::i64 : MVT::i32; 2540 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3), 2541 N->getOperand(0)}; 2542 SDNode *CmpSwap = CurDAG->getMachineNode( 2543 Opcode, SDLoc(N), 2544 CurDAG->getVTList(RegTy, MVT::i32, MVT::Other), Ops); 2545 2546 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2547 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 2548 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); 2549 2550 ReplaceUses(SDValue(N, 0), SDValue(CmpSwap, 0)); 2551 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 2)); 2552 CurDAG->RemoveDeadNode(N); 2553 } 2554 2555 void AArch64DAGToDAGISel::Select(SDNode *Node) { 2556 // Dump information about the Node being selected 2557 DEBUG(errs() << "Selecting: "); 2558 DEBUG(Node->dump(CurDAG)); 2559 DEBUG(errs() << "\n"); 2560 2561 // If we have a custom node, we already have selected! 2562 if (Node->isMachineOpcode()) { 2563 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); 2564 Node->setNodeId(-1); 2565 return; 2566 } 2567 2568 // Few custom selection stuff. 2569 EVT VT = Node->getValueType(0); 2570 2571 switch (Node->getOpcode()) { 2572 default: 2573 break; 2574 2575 case ISD::ATOMIC_CMP_SWAP: 2576 SelectCMP_SWAP(Node); 2577 return; 2578 2579 case ISD::READ_REGISTER: 2580 if (tryReadRegister(Node)) 2581 return; 2582 break; 2583 2584 case ISD::WRITE_REGISTER: 2585 if (tryWriteRegister(Node)) 2586 return; 2587 break; 2588 2589 case ISD::ADD: 2590 if (tryMLAV64LaneV128(Node)) 2591 return; 2592 break; 2593 2594 case ISD::LOAD: { 2595 // Try to select as an indexed load. Fall through to normal processing 2596 // if we can't. 2597 if (tryIndexedLoad(Node)) 2598 return; 2599 break; 2600 } 2601 2602 case ISD::SRL: 2603 case ISD::AND: 2604 case ISD::SRA: 2605 case ISD::SIGN_EXTEND_INREG: 2606 if (tryBitfieldExtractOp(Node)) 2607 return; 2608 if (tryBitfieldInsertInZeroOp(Node)) 2609 return; 2610 break; 2611 2612 case ISD::SIGN_EXTEND: 2613 if (tryBitfieldExtractOpFromSExt(Node)) 2614 return; 2615 break; 2616 2617 case ISD::OR: 2618 if (tryBitfieldInsertOp(Node)) 2619 return; 2620 break; 2621 2622 case ISD::EXTRACT_VECTOR_ELT: { 2623 // Extracting lane zero is a special case where we can just use a plain 2624 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for 2625 // the rest of the compiler, especially the register allocator and copyi 2626 // propagation, to reason about, so is preferred when it's possible to 2627 // use it. 2628 ConstantSDNode *LaneNode = cast<ConstantSDNode>(Node->getOperand(1)); 2629 // Bail and use the default Select() for non-zero lanes. 2630 if (LaneNode->getZExtValue() != 0) 2631 break; 2632 // If the element type is not the same as the result type, likewise 2633 // bail and use the default Select(), as there's more to do than just 2634 // a cross-class COPY. This catches extracts of i8 and i16 elements 2635 // since they will need an explicit zext. 2636 if (VT != Node->getOperand(0).getValueType().getVectorElementType()) 2637 break; 2638 unsigned SubReg; 2639 switch (Node->getOperand(0) 2640 .getValueType() 2641 .getVectorElementType() 2642 .getSizeInBits()) { 2643 default: 2644 llvm_unreachable("Unexpected vector element type!"); 2645 case 64: 2646 SubReg = AArch64::dsub; 2647 break; 2648 case 32: 2649 SubReg = AArch64::ssub; 2650 break; 2651 case 16: 2652 SubReg = AArch64::hsub; 2653 break; 2654 case 8: 2655 llvm_unreachable("unexpected zext-requiring extract element!"); 2656 } 2657 SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SDLoc(Node), VT, 2658 Node->getOperand(0)); 2659 DEBUG(dbgs() << "ISEL: Custom selection!\n=> "); 2660 DEBUG(Extract->dumpr(CurDAG)); 2661 DEBUG(dbgs() << "\n"); 2662 ReplaceNode(Node, Extract.getNode()); 2663 return; 2664 } 2665 case ISD::Constant: { 2666 // Materialize zero constants as copies from WZR/XZR. This allows 2667 // the coalescer to propagate these into other instructions. 2668 ConstantSDNode *ConstNode = cast<ConstantSDNode>(Node); 2669 if (ConstNode->isNullValue()) { 2670 if (VT == MVT::i32) { 2671 SDValue New = CurDAG->getCopyFromReg( 2672 CurDAG->getEntryNode(), SDLoc(Node), AArch64::WZR, MVT::i32); 2673 ReplaceNode(Node, New.getNode()); 2674 return; 2675 } else if (VT == MVT::i64) { 2676 SDValue New = CurDAG->getCopyFromReg( 2677 CurDAG->getEntryNode(), SDLoc(Node), AArch64::XZR, MVT::i64); 2678 ReplaceNode(Node, New.getNode()); 2679 return; 2680 } 2681 } 2682 break; 2683 } 2684 2685 case ISD::FrameIndex: { 2686 // Selects to ADDXri FI, 0 which in turn will become ADDXri SP, imm. 2687 int FI = cast<FrameIndexSDNode>(Node)->getIndex(); 2688 unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0); 2689 const TargetLowering *TLI = getTargetLowering(); 2690 SDValue TFI = CurDAG->getTargetFrameIndex( 2691 FI, TLI->getPointerTy(CurDAG->getDataLayout())); 2692 SDLoc DL(Node); 2693 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, DL, MVT::i32), 2694 CurDAG->getTargetConstant(Shifter, DL, MVT::i32) }; 2695 CurDAG->SelectNodeTo(Node, AArch64::ADDXri, MVT::i64, Ops); 2696 return; 2697 } 2698 case ISD::INTRINSIC_W_CHAIN: { 2699 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 2700 switch (IntNo) { 2701 default: 2702 break; 2703 case Intrinsic::aarch64_ldaxp: 2704 case Intrinsic::aarch64_ldxp: { 2705 unsigned Op = 2706 IntNo == Intrinsic::aarch64_ldaxp ? AArch64::LDAXPX : AArch64::LDXPX; 2707 SDValue MemAddr = Node->getOperand(2); 2708 SDLoc DL(Node); 2709 SDValue Chain = Node->getOperand(0); 2710 2711 SDNode *Ld = CurDAG->getMachineNode(Op, DL, MVT::i64, MVT::i64, 2712 MVT::Other, MemAddr, Chain); 2713 2714 // Transfer memoperands. 2715 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2716 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand(); 2717 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1); 2718 ReplaceNode(Node, Ld); 2719 return; 2720 } 2721 case Intrinsic::aarch64_stlxp: 2722 case Intrinsic::aarch64_stxp: { 2723 unsigned Op = 2724 IntNo == Intrinsic::aarch64_stlxp ? AArch64::STLXPX : AArch64::STXPX; 2725 SDLoc DL(Node); 2726 SDValue Chain = Node->getOperand(0); 2727 SDValue ValLo = Node->getOperand(2); 2728 SDValue ValHi = Node->getOperand(3); 2729 SDValue MemAddr = Node->getOperand(4); 2730 2731 // Place arguments in the right order. 2732 SDValue Ops[] = {ValLo, ValHi, MemAddr, Chain}; 2733 2734 SDNode *St = CurDAG->getMachineNode(Op, DL, MVT::i32, MVT::Other, Ops); 2735 // Transfer memoperands. 2736 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2737 MemOp[0] = cast<MemIntrinsicSDNode>(Node)->getMemOperand(); 2738 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1); 2739 2740 ReplaceNode(Node, St); 2741 return; 2742 } 2743 case Intrinsic::aarch64_neon_ld1x2: 2744 if (VT == MVT::v8i8) { 2745 SelectLoad(Node, 2, AArch64::LD1Twov8b, AArch64::dsub0); 2746 return; 2747 } else if (VT == MVT::v16i8) { 2748 SelectLoad(Node, 2, AArch64::LD1Twov16b, AArch64::qsub0); 2749 return; 2750 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2751 SelectLoad(Node, 2, AArch64::LD1Twov4h, AArch64::dsub0); 2752 return; 2753 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2754 SelectLoad(Node, 2, AArch64::LD1Twov8h, AArch64::qsub0); 2755 return; 2756 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2757 SelectLoad(Node, 2, AArch64::LD1Twov2s, AArch64::dsub0); 2758 return; 2759 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2760 SelectLoad(Node, 2, AArch64::LD1Twov4s, AArch64::qsub0); 2761 return; 2762 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2763 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0); 2764 return; 2765 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2766 SelectLoad(Node, 2, AArch64::LD1Twov2d, AArch64::qsub0); 2767 return; 2768 } 2769 break; 2770 case Intrinsic::aarch64_neon_ld1x3: 2771 if (VT == MVT::v8i8) { 2772 SelectLoad(Node, 3, AArch64::LD1Threev8b, AArch64::dsub0); 2773 return; 2774 } else if (VT == MVT::v16i8) { 2775 SelectLoad(Node, 3, AArch64::LD1Threev16b, AArch64::qsub0); 2776 return; 2777 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2778 SelectLoad(Node, 3, AArch64::LD1Threev4h, AArch64::dsub0); 2779 return; 2780 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2781 SelectLoad(Node, 3, AArch64::LD1Threev8h, AArch64::qsub0); 2782 return; 2783 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2784 SelectLoad(Node, 3, AArch64::LD1Threev2s, AArch64::dsub0); 2785 return; 2786 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2787 SelectLoad(Node, 3, AArch64::LD1Threev4s, AArch64::qsub0); 2788 return; 2789 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2790 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0); 2791 return; 2792 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2793 SelectLoad(Node, 3, AArch64::LD1Threev2d, AArch64::qsub0); 2794 return; 2795 } 2796 break; 2797 case Intrinsic::aarch64_neon_ld1x4: 2798 if (VT == MVT::v8i8) { 2799 SelectLoad(Node, 4, AArch64::LD1Fourv8b, AArch64::dsub0); 2800 return; 2801 } else if (VT == MVT::v16i8) { 2802 SelectLoad(Node, 4, AArch64::LD1Fourv16b, AArch64::qsub0); 2803 return; 2804 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2805 SelectLoad(Node, 4, AArch64::LD1Fourv4h, AArch64::dsub0); 2806 return; 2807 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2808 SelectLoad(Node, 4, AArch64::LD1Fourv8h, AArch64::qsub0); 2809 return; 2810 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2811 SelectLoad(Node, 4, AArch64::LD1Fourv2s, AArch64::dsub0); 2812 return; 2813 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2814 SelectLoad(Node, 4, AArch64::LD1Fourv4s, AArch64::qsub0); 2815 return; 2816 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2817 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0); 2818 return; 2819 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2820 SelectLoad(Node, 4, AArch64::LD1Fourv2d, AArch64::qsub0); 2821 return; 2822 } 2823 break; 2824 case Intrinsic::aarch64_neon_ld2: 2825 if (VT == MVT::v8i8) { 2826 SelectLoad(Node, 2, AArch64::LD2Twov8b, AArch64::dsub0); 2827 return; 2828 } else if (VT == MVT::v16i8) { 2829 SelectLoad(Node, 2, AArch64::LD2Twov16b, AArch64::qsub0); 2830 return; 2831 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2832 SelectLoad(Node, 2, AArch64::LD2Twov4h, AArch64::dsub0); 2833 return; 2834 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2835 SelectLoad(Node, 2, AArch64::LD2Twov8h, AArch64::qsub0); 2836 return; 2837 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2838 SelectLoad(Node, 2, AArch64::LD2Twov2s, AArch64::dsub0); 2839 return; 2840 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2841 SelectLoad(Node, 2, AArch64::LD2Twov4s, AArch64::qsub0); 2842 return; 2843 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2844 SelectLoad(Node, 2, AArch64::LD1Twov1d, AArch64::dsub0); 2845 return; 2846 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2847 SelectLoad(Node, 2, AArch64::LD2Twov2d, AArch64::qsub0); 2848 return; 2849 } 2850 break; 2851 case Intrinsic::aarch64_neon_ld3: 2852 if (VT == MVT::v8i8) { 2853 SelectLoad(Node, 3, AArch64::LD3Threev8b, AArch64::dsub0); 2854 return; 2855 } else if (VT == MVT::v16i8) { 2856 SelectLoad(Node, 3, AArch64::LD3Threev16b, AArch64::qsub0); 2857 return; 2858 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2859 SelectLoad(Node, 3, AArch64::LD3Threev4h, AArch64::dsub0); 2860 return; 2861 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2862 SelectLoad(Node, 3, AArch64::LD3Threev8h, AArch64::qsub0); 2863 return; 2864 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2865 SelectLoad(Node, 3, AArch64::LD3Threev2s, AArch64::dsub0); 2866 return; 2867 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2868 SelectLoad(Node, 3, AArch64::LD3Threev4s, AArch64::qsub0); 2869 return; 2870 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2871 SelectLoad(Node, 3, AArch64::LD1Threev1d, AArch64::dsub0); 2872 return; 2873 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2874 SelectLoad(Node, 3, AArch64::LD3Threev2d, AArch64::qsub0); 2875 return; 2876 } 2877 break; 2878 case Intrinsic::aarch64_neon_ld4: 2879 if (VT == MVT::v8i8) { 2880 SelectLoad(Node, 4, AArch64::LD4Fourv8b, AArch64::dsub0); 2881 return; 2882 } else if (VT == MVT::v16i8) { 2883 SelectLoad(Node, 4, AArch64::LD4Fourv16b, AArch64::qsub0); 2884 return; 2885 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2886 SelectLoad(Node, 4, AArch64::LD4Fourv4h, AArch64::dsub0); 2887 return; 2888 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2889 SelectLoad(Node, 4, AArch64::LD4Fourv8h, AArch64::qsub0); 2890 return; 2891 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2892 SelectLoad(Node, 4, AArch64::LD4Fourv2s, AArch64::dsub0); 2893 return; 2894 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2895 SelectLoad(Node, 4, AArch64::LD4Fourv4s, AArch64::qsub0); 2896 return; 2897 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2898 SelectLoad(Node, 4, AArch64::LD1Fourv1d, AArch64::dsub0); 2899 return; 2900 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2901 SelectLoad(Node, 4, AArch64::LD4Fourv2d, AArch64::qsub0); 2902 return; 2903 } 2904 break; 2905 case Intrinsic::aarch64_neon_ld2r: 2906 if (VT == MVT::v8i8) { 2907 SelectLoad(Node, 2, AArch64::LD2Rv8b, AArch64::dsub0); 2908 return; 2909 } else if (VT == MVT::v16i8) { 2910 SelectLoad(Node, 2, AArch64::LD2Rv16b, AArch64::qsub0); 2911 return; 2912 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2913 SelectLoad(Node, 2, AArch64::LD2Rv4h, AArch64::dsub0); 2914 return; 2915 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2916 SelectLoad(Node, 2, AArch64::LD2Rv8h, AArch64::qsub0); 2917 return; 2918 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2919 SelectLoad(Node, 2, AArch64::LD2Rv2s, AArch64::dsub0); 2920 return; 2921 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2922 SelectLoad(Node, 2, AArch64::LD2Rv4s, AArch64::qsub0); 2923 return; 2924 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2925 SelectLoad(Node, 2, AArch64::LD2Rv1d, AArch64::dsub0); 2926 return; 2927 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2928 SelectLoad(Node, 2, AArch64::LD2Rv2d, AArch64::qsub0); 2929 return; 2930 } 2931 break; 2932 case Intrinsic::aarch64_neon_ld3r: 2933 if (VT == MVT::v8i8) { 2934 SelectLoad(Node, 3, AArch64::LD3Rv8b, AArch64::dsub0); 2935 return; 2936 } else if (VT == MVT::v16i8) { 2937 SelectLoad(Node, 3, AArch64::LD3Rv16b, AArch64::qsub0); 2938 return; 2939 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2940 SelectLoad(Node, 3, AArch64::LD3Rv4h, AArch64::dsub0); 2941 return; 2942 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2943 SelectLoad(Node, 3, AArch64::LD3Rv8h, AArch64::qsub0); 2944 return; 2945 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2946 SelectLoad(Node, 3, AArch64::LD3Rv2s, AArch64::dsub0); 2947 return; 2948 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2949 SelectLoad(Node, 3, AArch64::LD3Rv4s, AArch64::qsub0); 2950 return; 2951 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2952 SelectLoad(Node, 3, AArch64::LD3Rv1d, AArch64::dsub0); 2953 return; 2954 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2955 SelectLoad(Node, 3, AArch64::LD3Rv2d, AArch64::qsub0); 2956 return; 2957 } 2958 break; 2959 case Intrinsic::aarch64_neon_ld4r: 2960 if (VT == MVT::v8i8) { 2961 SelectLoad(Node, 4, AArch64::LD4Rv8b, AArch64::dsub0); 2962 return; 2963 } else if (VT == MVT::v16i8) { 2964 SelectLoad(Node, 4, AArch64::LD4Rv16b, AArch64::qsub0); 2965 return; 2966 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 2967 SelectLoad(Node, 4, AArch64::LD4Rv4h, AArch64::dsub0); 2968 return; 2969 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 2970 SelectLoad(Node, 4, AArch64::LD4Rv8h, AArch64::qsub0); 2971 return; 2972 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 2973 SelectLoad(Node, 4, AArch64::LD4Rv2s, AArch64::dsub0); 2974 return; 2975 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 2976 SelectLoad(Node, 4, AArch64::LD4Rv4s, AArch64::qsub0); 2977 return; 2978 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 2979 SelectLoad(Node, 4, AArch64::LD4Rv1d, AArch64::dsub0); 2980 return; 2981 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 2982 SelectLoad(Node, 4, AArch64::LD4Rv2d, AArch64::qsub0); 2983 return; 2984 } 2985 break; 2986 case Intrinsic::aarch64_neon_ld2lane: 2987 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 2988 SelectLoadLane(Node, 2, AArch64::LD2i8); 2989 return; 2990 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 2991 VT == MVT::v8f16) { 2992 SelectLoadLane(Node, 2, AArch64::LD2i16); 2993 return; 2994 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 2995 VT == MVT::v2f32) { 2996 SelectLoadLane(Node, 2, AArch64::LD2i32); 2997 return; 2998 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 2999 VT == MVT::v1f64) { 3000 SelectLoadLane(Node, 2, AArch64::LD2i64); 3001 return; 3002 } 3003 break; 3004 case Intrinsic::aarch64_neon_ld3lane: 3005 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3006 SelectLoadLane(Node, 3, AArch64::LD3i8); 3007 return; 3008 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3009 VT == MVT::v8f16) { 3010 SelectLoadLane(Node, 3, AArch64::LD3i16); 3011 return; 3012 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3013 VT == MVT::v2f32) { 3014 SelectLoadLane(Node, 3, AArch64::LD3i32); 3015 return; 3016 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3017 VT == MVT::v1f64) { 3018 SelectLoadLane(Node, 3, AArch64::LD3i64); 3019 return; 3020 } 3021 break; 3022 case Intrinsic::aarch64_neon_ld4lane: 3023 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3024 SelectLoadLane(Node, 4, AArch64::LD4i8); 3025 return; 3026 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3027 VT == MVT::v8f16) { 3028 SelectLoadLane(Node, 4, AArch64::LD4i16); 3029 return; 3030 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3031 VT == MVT::v2f32) { 3032 SelectLoadLane(Node, 4, AArch64::LD4i32); 3033 return; 3034 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3035 VT == MVT::v1f64) { 3036 SelectLoadLane(Node, 4, AArch64::LD4i64); 3037 return; 3038 } 3039 break; 3040 } 3041 } break; 3042 case ISD::INTRINSIC_WO_CHAIN: { 3043 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); 3044 switch (IntNo) { 3045 default: 3046 break; 3047 case Intrinsic::aarch64_neon_tbl2: 3048 SelectTable(Node, 2, 3049 VT == MVT::v8i8 ? AArch64::TBLv8i8Two : AArch64::TBLv16i8Two, 3050 false); 3051 return; 3052 case Intrinsic::aarch64_neon_tbl3: 3053 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBLv8i8Three 3054 : AArch64::TBLv16i8Three, 3055 false); 3056 return; 3057 case Intrinsic::aarch64_neon_tbl4: 3058 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBLv8i8Four 3059 : AArch64::TBLv16i8Four, 3060 false); 3061 return; 3062 case Intrinsic::aarch64_neon_tbx2: 3063 SelectTable(Node, 2, 3064 VT == MVT::v8i8 ? AArch64::TBXv8i8Two : AArch64::TBXv16i8Two, 3065 true); 3066 return; 3067 case Intrinsic::aarch64_neon_tbx3: 3068 SelectTable(Node, 3, VT == MVT::v8i8 ? AArch64::TBXv8i8Three 3069 : AArch64::TBXv16i8Three, 3070 true); 3071 return; 3072 case Intrinsic::aarch64_neon_tbx4: 3073 SelectTable(Node, 4, VT == MVT::v8i8 ? AArch64::TBXv8i8Four 3074 : AArch64::TBXv16i8Four, 3075 true); 3076 return; 3077 case Intrinsic::aarch64_neon_smull: 3078 case Intrinsic::aarch64_neon_umull: 3079 if (tryMULLV64LaneV128(IntNo, Node)) 3080 return; 3081 break; 3082 } 3083 break; 3084 } 3085 case ISD::INTRINSIC_VOID: { 3086 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 3087 if (Node->getNumOperands() >= 3) 3088 VT = Node->getOperand(2)->getValueType(0); 3089 switch (IntNo) { 3090 default: 3091 break; 3092 case Intrinsic::aarch64_neon_st1x2: { 3093 if (VT == MVT::v8i8) { 3094 SelectStore(Node, 2, AArch64::ST1Twov8b); 3095 return; 3096 } else if (VT == MVT::v16i8) { 3097 SelectStore(Node, 2, AArch64::ST1Twov16b); 3098 return; 3099 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3100 SelectStore(Node, 2, AArch64::ST1Twov4h); 3101 return; 3102 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3103 SelectStore(Node, 2, AArch64::ST1Twov8h); 3104 return; 3105 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3106 SelectStore(Node, 2, AArch64::ST1Twov2s); 3107 return; 3108 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3109 SelectStore(Node, 2, AArch64::ST1Twov4s); 3110 return; 3111 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3112 SelectStore(Node, 2, AArch64::ST1Twov2d); 3113 return; 3114 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3115 SelectStore(Node, 2, AArch64::ST1Twov1d); 3116 return; 3117 } 3118 break; 3119 } 3120 case Intrinsic::aarch64_neon_st1x3: { 3121 if (VT == MVT::v8i8) { 3122 SelectStore(Node, 3, AArch64::ST1Threev8b); 3123 return; 3124 } else if (VT == MVT::v16i8) { 3125 SelectStore(Node, 3, AArch64::ST1Threev16b); 3126 return; 3127 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3128 SelectStore(Node, 3, AArch64::ST1Threev4h); 3129 return; 3130 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3131 SelectStore(Node, 3, AArch64::ST1Threev8h); 3132 return; 3133 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3134 SelectStore(Node, 3, AArch64::ST1Threev2s); 3135 return; 3136 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3137 SelectStore(Node, 3, AArch64::ST1Threev4s); 3138 return; 3139 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3140 SelectStore(Node, 3, AArch64::ST1Threev2d); 3141 return; 3142 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3143 SelectStore(Node, 3, AArch64::ST1Threev1d); 3144 return; 3145 } 3146 break; 3147 } 3148 case Intrinsic::aarch64_neon_st1x4: { 3149 if (VT == MVT::v8i8) { 3150 SelectStore(Node, 4, AArch64::ST1Fourv8b); 3151 return; 3152 } else if (VT == MVT::v16i8) { 3153 SelectStore(Node, 4, AArch64::ST1Fourv16b); 3154 return; 3155 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3156 SelectStore(Node, 4, AArch64::ST1Fourv4h); 3157 return; 3158 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3159 SelectStore(Node, 4, AArch64::ST1Fourv8h); 3160 return; 3161 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3162 SelectStore(Node, 4, AArch64::ST1Fourv2s); 3163 return; 3164 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3165 SelectStore(Node, 4, AArch64::ST1Fourv4s); 3166 return; 3167 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3168 SelectStore(Node, 4, AArch64::ST1Fourv2d); 3169 return; 3170 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3171 SelectStore(Node, 4, AArch64::ST1Fourv1d); 3172 return; 3173 } 3174 break; 3175 } 3176 case Intrinsic::aarch64_neon_st2: { 3177 if (VT == MVT::v8i8) { 3178 SelectStore(Node, 2, AArch64::ST2Twov8b); 3179 return; 3180 } else if (VT == MVT::v16i8) { 3181 SelectStore(Node, 2, AArch64::ST2Twov16b); 3182 return; 3183 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3184 SelectStore(Node, 2, AArch64::ST2Twov4h); 3185 return; 3186 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3187 SelectStore(Node, 2, AArch64::ST2Twov8h); 3188 return; 3189 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3190 SelectStore(Node, 2, AArch64::ST2Twov2s); 3191 return; 3192 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3193 SelectStore(Node, 2, AArch64::ST2Twov4s); 3194 return; 3195 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3196 SelectStore(Node, 2, AArch64::ST2Twov2d); 3197 return; 3198 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3199 SelectStore(Node, 2, AArch64::ST1Twov1d); 3200 return; 3201 } 3202 break; 3203 } 3204 case Intrinsic::aarch64_neon_st3: { 3205 if (VT == MVT::v8i8) { 3206 SelectStore(Node, 3, AArch64::ST3Threev8b); 3207 return; 3208 } else if (VT == MVT::v16i8) { 3209 SelectStore(Node, 3, AArch64::ST3Threev16b); 3210 return; 3211 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3212 SelectStore(Node, 3, AArch64::ST3Threev4h); 3213 return; 3214 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3215 SelectStore(Node, 3, AArch64::ST3Threev8h); 3216 return; 3217 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3218 SelectStore(Node, 3, AArch64::ST3Threev2s); 3219 return; 3220 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3221 SelectStore(Node, 3, AArch64::ST3Threev4s); 3222 return; 3223 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3224 SelectStore(Node, 3, AArch64::ST3Threev2d); 3225 return; 3226 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3227 SelectStore(Node, 3, AArch64::ST1Threev1d); 3228 return; 3229 } 3230 break; 3231 } 3232 case Intrinsic::aarch64_neon_st4: { 3233 if (VT == MVT::v8i8) { 3234 SelectStore(Node, 4, AArch64::ST4Fourv8b); 3235 return; 3236 } else if (VT == MVT::v16i8) { 3237 SelectStore(Node, 4, AArch64::ST4Fourv16b); 3238 return; 3239 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3240 SelectStore(Node, 4, AArch64::ST4Fourv4h); 3241 return; 3242 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3243 SelectStore(Node, 4, AArch64::ST4Fourv8h); 3244 return; 3245 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3246 SelectStore(Node, 4, AArch64::ST4Fourv2s); 3247 return; 3248 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3249 SelectStore(Node, 4, AArch64::ST4Fourv4s); 3250 return; 3251 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3252 SelectStore(Node, 4, AArch64::ST4Fourv2d); 3253 return; 3254 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3255 SelectStore(Node, 4, AArch64::ST1Fourv1d); 3256 return; 3257 } 3258 break; 3259 } 3260 case Intrinsic::aarch64_neon_st2lane: { 3261 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3262 SelectStoreLane(Node, 2, AArch64::ST2i8); 3263 return; 3264 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3265 VT == MVT::v8f16) { 3266 SelectStoreLane(Node, 2, AArch64::ST2i16); 3267 return; 3268 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3269 VT == MVT::v2f32) { 3270 SelectStoreLane(Node, 2, AArch64::ST2i32); 3271 return; 3272 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3273 VT == MVT::v1f64) { 3274 SelectStoreLane(Node, 2, AArch64::ST2i64); 3275 return; 3276 } 3277 break; 3278 } 3279 case Intrinsic::aarch64_neon_st3lane: { 3280 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3281 SelectStoreLane(Node, 3, AArch64::ST3i8); 3282 return; 3283 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3284 VT == MVT::v8f16) { 3285 SelectStoreLane(Node, 3, AArch64::ST3i16); 3286 return; 3287 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3288 VT == MVT::v2f32) { 3289 SelectStoreLane(Node, 3, AArch64::ST3i32); 3290 return; 3291 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3292 VT == MVT::v1f64) { 3293 SelectStoreLane(Node, 3, AArch64::ST3i64); 3294 return; 3295 } 3296 break; 3297 } 3298 case Intrinsic::aarch64_neon_st4lane: { 3299 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3300 SelectStoreLane(Node, 4, AArch64::ST4i8); 3301 return; 3302 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3303 VT == MVT::v8f16) { 3304 SelectStoreLane(Node, 4, AArch64::ST4i16); 3305 return; 3306 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3307 VT == MVT::v2f32) { 3308 SelectStoreLane(Node, 4, AArch64::ST4i32); 3309 return; 3310 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3311 VT == MVT::v1f64) { 3312 SelectStoreLane(Node, 4, AArch64::ST4i64); 3313 return; 3314 } 3315 break; 3316 } 3317 } 3318 break; 3319 } 3320 case AArch64ISD::LD2post: { 3321 if (VT == MVT::v8i8) { 3322 SelectPostLoad(Node, 2, AArch64::LD2Twov8b_POST, AArch64::dsub0); 3323 return; 3324 } else if (VT == MVT::v16i8) { 3325 SelectPostLoad(Node, 2, AArch64::LD2Twov16b_POST, AArch64::qsub0); 3326 return; 3327 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3328 SelectPostLoad(Node, 2, AArch64::LD2Twov4h_POST, AArch64::dsub0); 3329 return; 3330 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3331 SelectPostLoad(Node, 2, AArch64::LD2Twov8h_POST, AArch64::qsub0); 3332 return; 3333 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3334 SelectPostLoad(Node, 2, AArch64::LD2Twov2s_POST, AArch64::dsub0); 3335 return; 3336 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3337 SelectPostLoad(Node, 2, AArch64::LD2Twov4s_POST, AArch64::qsub0); 3338 return; 3339 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3340 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0); 3341 return; 3342 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3343 SelectPostLoad(Node, 2, AArch64::LD2Twov2d_POST, AArch64::qsub0); 3344 return; 3345 } 3346 break; 3347 } 3348 case AArch64ISD::LD3post: { 3349 if (VT == MVT::v8i8) { 3350 SelectPostLoad(Node, 3, AArch64::LD3Threev8b_POST, AArch64::dsub0); 3351 return; 3352 } else if (VT == MVT::v16i8) { 3353 SelectPostLoad(Node, 3, AArch64::LD3Threev16b_POST, AArch64::qsub0); 3354 return; 3355 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3356 SelectPostLoad(Node, 3, AArch64::LD3Threev4h_POST, AArch64::dsub0); 3357 return; 3358 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3359 SelectPostLoad(Node, 3, AArch64::LD3Threev8h_POST, AArch64::qsub0); 3360 return; 3361 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3362 SelectPostLoad(Node, 3, AArch64::LD3Threev2s_POST, AArch64::dsub0); 3363 return; 3364 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3365 SelectPostLoad(Node, 3, AArch64::LD3Threev4s_POST, AArch64::qsub0); 3366 return; 3367 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3368 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0); 3369 return; 3370 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3371 SelectPostLoad(Node, 3, AArch64::LD3Threev2d_POST, AArch64::qsub0); 3372 return; 3373 } 3374 break; 3375 } 3376 case AArch64ISD::LD4post: { 3377 if (VT == MVT::v8i8) { 3378 SelectPostLoad(Node, 4, AArch64::LD4Fourv8b_POST, AArch64::dsub0); 3379 return; 3380 } else if (VT == MVT::v16i8) { 3381 SelectPostLoad(Node, 4, AArch64::LD4Fourv16b_POST, AArch64::qsub0); 3382 return; 3383 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3384 SelectPostLoad(Node, 4, AArch64::LD4Fourv4h_POST, AArch64::dsub0); 3385 return; 3386 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3387 SelectPostLoad(Node, 4, AArch64::LD4Fourv8h_POST, AArch64::qsub0); 3388 return; 3389 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3390 SelectPostLoad(Node, 4, AArch64::LD4Fourv2s_POST, AArch64::dsub0); 3391 return; 3392 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3393 SelectPostLoad(Node, 4, AArch64::LD4Fourv4s_POST, AArch64::qsub0); 3394 return; 3395 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3396 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0); 3397 return; 3398 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3399 SelectPostLoad(Node, 4, AArch64::LD4Fourv2d_POST, AArch64::qsub0); 3400 return; 3401 } 3402 break; 3403 } 3404 case AArch64ISD::LD1x2post: { 3405 if (VT == MVT::v8i8) { 3406 SelectPostLoad(Node, 2, AArch64::LD1Twov8b_POST, AArch64::dsub0); 3407 return; 3408 } else if (VT == MVT::v16i8) { 3409 SelectPostLoad(Node, 2, AArch64::LD1Twov16b_POST, AArch64::qsub0); 3410 return; 3411 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3412 SelectPostLoad(Node, 2, AArch64::LD1Twov4h_POST, AArch64::dsub0); 3413 return; 3414 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3415 SelectPostLoad(Node, 2, AArch64::LD1Twov8h_POST, AArch64::qsub0); 3416 return; 3417 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3418 SelectPostLoad(Node, 2, AArch64::LD1Twov2s_POST, AArch64::dsub0); 3419 return; 3420 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3421 SelectPostLoad(Node, 2, AArch64::LD1Twov4s_POST, AArch64::qsub0); 3422 return; 3423 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3424 SelectPostLoad(Node, 2, AArch64::LD1Twov1d_POST, AArch64::dsub0); 3425 return; 3426 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3427 SelectPostLoad(Node, 2, AArch64::LD1Twov2d_POST, AArch64::qsub0); 3428 return; 3429 } 3430 break; 3431 } 3432 case AArch64ISD::LD1x3post: { 3433 if (VT == MVT::v8i8) { 3434 SelectPostLoad(Node, 3, AArch64::LD1Threev8b_POST, AArch64::dsub0); 3435 return; 3436 } else if (VT == MVT::v16i8) { 3437 SelectPostLoad(Node, 3, AArch64::LD1Threev16b_POST, AArch64::qsub0); 3438 return; 3439 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3440 SelectPostLoad(Node, 3, AArch64::LD1Threev4h_POST, AArch64::dsub0); 3441 return; 3442 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3443 SelectPostLoad(Node, 3, AArch64::LD1Threev8h_POST, AArch64::qsub0); 3444 return; 3445 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3446 SelectPostLoad(Node, 3, AArch64::LD1Threev2s_POST, AArch64::dsub0); 3447 return; 3448 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3449 SelectPostLoad(Node, 3, AArch64::LD1Threev4s_POST, AArch64::qsub0); 3450 return; 3451 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3452 SelectPostLoad(Node, 3, AArch64::LD1Threev1d_POST, AArch64::dsub0); 3453 return; 3454 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3455 SelectPostLoad(Node, 3, AArch64::LD1Threev2d_POST, AArch64::qsub0); 3456 return; 3457 } 3458 break; 3459 } 3460 case AArch64ISD::LD1x4post: { 3461 if (VT == MVT::v8i8) { 3462 SelectPostLoad(Node, 4, AArch64::LD1Fourv8b_POST, AArch64::dsub0); 3463 return; 3464 } else if (VT == MVT::v16i8) { 3465 SelectPostLoad(Node, 4, AArch64::LD1Fourv16b_POST, AArch64::qsub0); 3466 return; 3467 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3468 SelectPostLoad(Node, 4, AArch64::LD1Fourv4h_POST, AArch64::dsub0); 3469 return; 3470 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3471 SelectPostLoad(Node, 4, AArch64::LD1Fourv8h_POST, AArch64::qsub0); 3472 return; 3473 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3474 SelectPostLoad(Node, 4, AArch64::LD1Fourv2s_POST, AArch64::dsub0); 3475 return; 3476 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3477 SelectPostLoad(Node, 4, AArch64::LD1Fourv4s_POST, AArch64::qsub0); 3478 return; 3479 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3480 SelectPostLoad(Node, 4, AArch64::LD1Fourv1d_POST, AArch64::dsub0); 3481 return; 3482 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3483 SelectPostLoad(Node, 4, AArch64::LD1Fourv2d_POST, AArch64::qsub0); 3484 return; 3485 } 3486 break; 3487 } 3488 case AArch64ISD::LD1DUPpost: { 3489 if (VT == MVT::v8i8) { 3490 SelectPostLoad(Node, 1, AArch64::LD1Rv8b_POST, AArch64::dsub0); 3491 return; 3492 } else if (VT == MVT::v16i8) { 3493 SelectPostLoad(Node, 1, AArch64::LD1Rv16b_POST, AArch64::qsub0); 3494 return; 3495 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3496 SelectPostLoad(Node, 1, AArch64::LD1Rv4h_POST, AArch64::dsub0); 3497 return; 3498 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3499 SelectPostLoad(Node, 1, AArch64::LD1Rv8h_POST, AArch64::qsub0); 3500 return; 3501 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3502 SelectPostLoad(Node, 1, AArch64::LD1Rv2s_POST, AArch64::dsub0); 3503 return; 3504 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3505 SelectPostLoad(Node, 1, AArch64::LD1Rv4s_POST, AArch64::qsub0); 3506 return; 3507 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3508 SelectPostLoad(Node, 1, AArch64::LD1Rv1d_POST, AArch64::dsub0); 3509 return; 3510 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3511 SelectPostLoad(Node, 1, AArch64::LD1Rv2d_POST, AArch64::qsub0); 3512 return; 3513 } 3514 break; 3515 } 3516 case AArch64ISD::LD2DUPpost: { 3517 if (VT == MVT::v8i8) { 3518 SelectPostLoad(Node, 2, AArch64::LD2Rv8b_POST, AArch64::dsub0); 3519 return; 3520 } else if (VT == MVT::v16i8) { 3521 SelectPostLoad(Node, 2, AArch64::LD2Rv16b_POST, AArch64::qsub0); 3522 return; 3523 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3524 SelectPostLoad(Node, 2, AArch64::LD2Rv4h_POST, AArch64::dsub0); 3525 return; 3526 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3527 SelectPostLoad(Node, 2, AArch64::LD2Rv8h_POST, AArch64::qsub0); 3528 return; 3529 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3530 SelectPostLoad(Node, 2, AArch64::LD2Rv2s_POST, AArch64::dsub0); 3531 return; 3532 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3533 SelectPostLoad(Node, 2, AArch64::LD2Rv4s_POST, AArch64::qsub0); 3534 return; 3535 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3536 SelectPostLoad(Node, 2, AArch64::LD2Rv1d_POST, AArch64::dsub0); 3537 return; 3538 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3539 SelectPostLoad(Node, 2, AArch64::LD2Rv2d_POST, AArch64::qsub0); 3540 return; 3541 } 3542 break; 3543 } 3544 case AArch64ISD::LD3DUPpost: { 3545 if (VT == MVT::v8i8) { 3546 SelectPostLoad(Node, 3, AArch64::LD3Rv8b_POST, AArch64::dsub0); 3547 return; 3548 } else if (VT == MVT::v16i8) { 3549 SelectPostLoad(Node, 3, AArch64::LD3Rv16b_POST, AArch64::qsub0); 3550 return; 3551 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3552 SelectPostLoad(Node, 3, AArch64::LD3Rv4h_POST, AArch64::dsub0); 3553 return; 3554 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3555 SelectPostLoad(Node, 3, AArch64::LD3Rv8h_POST, AArch64::qsub0); 3556 return; 3557 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3558 SelectPostLoad(Node, 3, AArch64::LD3Rv2s_POST, AArch64::dsub0); 3559 return; 3560 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3561 SelectPostLoad(Node, 3, AArch64::LD3Rv4s_POST, AArch64::qsub0); 3562 return; 3563 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3564 SelectPostLoad(Node, 3, AArch64::LD3Rv1d_POST, AArch64::dsub0); 3565 return; 3566 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3567 SelectPostLoad(Node, 3, AArch64::LD3Rv2d_POST, AArch64::qsub0); 3568 return; 3569 } 3570 break; 3571 } 3572 case AArch64ISD::LD4DUPpost: { 3573 if (VT == MVT::v8i8) { 3574 SelectPostLoad(Node, 4, AArch64::LD4Rv8b_POST, AArch64::dsub0); 3575 return; 3576 } else if (VT == MVT::v16i8) { 3577 SelectPostLoad(Node, 4, AArch64::LD4Rv16b_POST, AArch64::qsub0); 3578 return; 3579 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3580 SelectPostLoad(Node, 4, AArch64::LD4Rv4h_POST, AArch64::dsub0); 3581 return; 3582 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3583 SelectPostLoad(Node, 4, AArch64::LD4Rv8h_POST, AArch64::qsub0); 3584 return; 3585 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3586 SelectPostLoad(Node, 4, AArch64::LD4Rv2s_POST, AArch64::dsub0); 3587 return; 3588 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3589 SelectPostLoad(Node, 4, AArch64::LD4Rv4s_POST, AArch64::qsub0); 3590 return; 3591 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3592 SelectPostLoad(Node, 4, AArch64::LD4Rv1d_POST, AArch64::dsub0); 3593 return; 3594 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3595 SelectPostLoad(Node, 4, AArch64::LD4Rv2d_POST, AArch64::qsub0); 3596 return; 3597 } 3598 break; 3599 } 3600 case AArch64ISD::LD1LANEpost: { 3601 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3602 SelectPostLoadLane(Node, 1, AArch64::LD1i8_POST); 3603 return; 3604 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3605 VT == MVT::v8f16) { 3606 SelectPostLoadLane(Node, 1, AArch64::LD1i16_POST); 3607 return; 3608 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3609 VT == MVT::v2f32) { 3610 SelectPostLoadLane(Node, 1, AArch64::LD1i32_POST); 3611 return; 3612 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3613 VT == MVT::v1f64) { 3614 SelectPostLoadLane(Node, 1, AArch64::LD1i64_POST); 3615 return; 3616 } 3617 break; 3618 } 3619 case AArch64ISD::LD2LANEpost: { 3620 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3621 SelectPostLoadLane(Node, 2, AArch64::LD2i8_POST); 3622 return; 3623 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3624 VT == MVT::v8f16) { 3625 SelectPostLoadLane(Node, 2, AArch64::LD2i16_POST); 3626 return; 3627 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3628 VT == MVT::v2f32) { 3629 SelectPostLoadLane(Node, 2, AArch64::LD2i32_POST); 3630 return; 3631 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3632 VT == MVT::v1f64) { 3633 SelectPostLoadLane(Node, 2, AArch64::LD2i64_POST); 3634 return; 3635 } 3636 break; 3637 } 3638 case AArch64ISD::LD3LANEpost: { 3639 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3640 SelectPostLoadLane(Node, 3, AArch64::LD3i8_POST); 3641 return; 3642 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3643 VT == MVT::v8f16) { 3644 SelectPostLoadLane(Node, 3, AArch64::LD3i16_POST); 3645 return; 3646 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3647 VT == MVT::v2f32) { 3648 SelectPostLoadLane(Node, 3, AArch64::LD3i32_POST); 3649 return; 3650 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3651 VT == MVT::v1f64) { 3652 SelectPostLoadLane(Node, 3, AArch64::LD3i64_POST); 3653 return; 3654 } 3655 break; 3656 } 3657 case AArch64ISD::LD4LANEpost: { 3658 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3659 SelectPostLoadLane(Node, 4, AArch64::LD4i8_POST); 3660 return; 3661 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3662 VT == MVT::v8f16) { 3663 SelectPostLoadLane(Node, 4, AArch64::LD4i16_POST); 3664 return; 3665 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3666 VT == MVT::v2f32) { 3667 SelectPostLoadLane(Node, 4, AArch64::LD4i32_POST); 3668 return; 3669 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3670 VT == MVT::v1f64) { 3671 SelectPostLoadLane(Node, 4, AArch64::LD4i64_POST); 3672 return; 3673 } 3674 break; 3675 } 3676 case AArch64ISD::ST2post: { 3677 VT = Node->getOperand(1).getValueType(); 3678 if (VT == MVT::v8i8) { 3679 SelectPostStore(Node, 2, AArch64::ST2Twov8b_POST); 3680 return; 3681 } else if (VT == MVT::v16i8) { 3682 SelectPostStore(Node, 2, AArch64::ST2Twov16b_POST); 3683 return; 3684 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3685 SelectPostStore(Node, 2, AArch64::ST2Twov4h_POST); 3686 return; 3687 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3688 SelectPostStore(Node, 2, AArch64::ST2Twov8h_POST); 3689 return; 3690 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3691 SelectPostStore(Node, 2, AArch64::ST2Twov2s_POST); 3692 return; 3693 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3694 SelectPostStore(Node, 2, AArch64::ST2Twov4s_POST); 3695 return; 3696 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3697 SelectPostStore(Node, 2, AArch64::ST2Twov2d_POST); 3698 return; 3699 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3700 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST); 3701 return; 3702 } 3703 break; 3704 } 3705 case AArch64ISD::ST3post: { 3706 VT = Node->getOperand(1).getValueType(); 3707 if (VT == MVT::v8i8) { 3708 SelectPostStore(Node, 3, AArch64::ST3Threev8b_POST); 3709 return; 3710 } else if (VT == MVT::v16i8) { 3711 SelectPostStore(Node, 3, AArch64::ST3Threev16b_POST); 3712 return; 3713 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3714 SelectPostStore(Node, 3, AArch64::ST3Threev4h_POST); 3715 return; 3716 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3717 SelectPostStore(Node, 3, AArch64::ST3Threev8h_POST); 3718 return; 3719 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3720 SelectPostStore(Node, 3, AArch64::ST3Threev2s_POST); 3721 return; 3722 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3723 SelectPostStore(Node, 3, AArch64::ST3Threev4s_POST); 3724 return; 3725 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3726 SelectPostStore(Node, 3, AArch64::ST3Threev2d_POST); 3727 return; 3728 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3729 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST); 3730 return; 3731 } 3732 break; 3733 } 3734 case AArch64ISD::ST4post: { 3735 VT = Node->getOperand(1).getValueType(); 3736 if (VT == MVT::v8i8) { 3737 SelectPostStore(Node, 4, AArch64::ST4Fourv8b_POST); 3738 return; 3739 } else if (VT == MVT::v16i8) { 3740 SelectPostStore(Node, 4, AArch64::ST4Fourv16b_POST); 3741 return; 3742 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3743 SelectPostStore(Node, 4, AArch64::ST4Fourv4h_POST); 3744 return; 3745 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3746 SelectPostStore(Node, 4, AArch64::ST4Fourv8h_POST); 3747 return; 3748 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3749 SelectPostStore(Node, 4, AArch64::ST4Fourv2s_POST); 3750 return; 3751 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3752 SelectPostStore(Node, 4, AArch64::ST4Fourv4s_POST); 3753 return; 3754 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3755 SelectPostStore(Node, 4, AArch64::ST4Fourv2d_POST); 3756 return; 3757 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3758 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST); 3759 return; 3760 } 3761 break; 3762 } 3763 case AArch64ISD::ST1x2post: { 3764 VT = Node->getOperand(1).getValueType(); 3765 if (VT == MVT::v8i8) { 3766 SelectPostStore(Node, 2, AArch64::ST1Twov8b_POST); 3767 return; 3768 } else if (VT == MVT::v16i8) { 3769 SelectPostStore(Node, 2, AArch64::ST1Twov16b_POST); 3770 return; 3771 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3772 SelectPostStore(Node, 2, AArch64::ST1Twov4h_POST); 3773 return; 3774 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3775 SelectPostStore(Node, 2, AArch64::ST1Twov8h_POST); 3776 return; 3777 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3778 SelectPostStore(Node, 2, AArch64::ST1Twov2s_POST); 3779 return; 3780 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3781 SelectPostStore(Node, 2, AArch64::ST1Twov4s_POST); 3782 return; 3783 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3784 SelectPostStore(Node, 2, AArch64::ST1Twov1d_POST); 3785 return; 3786 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3787 SelectPostStore(Node, 2, AArch64::ST1Twov2d_POST); 3788 return; 3789 } 3790 break; 3791 } 3792 case AArch64ISD::ST1x3post: { 3793 VT = Node->getOperand(1).getValueType(); 3794 if (VT == MVT::v8i8) { 3795 SelectPostStore(Node, 3, AArch64::ST1Threev8b_POST); 3796 return; 3797 } else if (VT == MVT::v16i8) { 3798 SelectPostStore(Node, 3, AArch64::ST1Threev16b_POST); 3799 return; 3800 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3801 SelectPostStore(Node, 3, AArch64::ST1Threev4h_POST); 3802 return; 3803 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3804 SelectPostStore(Node, 3, AArch64::ST1Threev8h_POST); 3805 return; 3806 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3807 SelectPostStore(Node, 3, AArch64::ST1Threev2s_POST); 3808 return; 3809 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3810 SelectPostStore(Node, 3, AArch64::ST1Threev4s_POST); 3811 return; 3812 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3813 SelectPostStore(Node, 3, AArch64::ST1Threev1d_POST); 3814 return; 3815 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3816 SelectPostStore(Node, 3, AArch64::ST1Threev2d_POST); 3817 return; 3818 } 3819 break; 3820 } 3821 case AArch64ISD::ST1x4post: { 3822 VT = Node->getOperand(1).getValueType(); 3823 if (VT == MVT::v8i8) { 3824 SelectPostStore(Node, 4, AArch64::ST1Fourv8b_POST); 3825 return; 3826 } else if (VT == MVT::v16i8) { 3827 SelectPostStore(Node, 4, AArch64::ST1Fourv16b_POST); 3828 return; 3829 } else if (VT == MVT::v4i16 || VT == MVT::v4f16) { 3830 SelectPostStore(Node, 4, AArch64::ST1Fourv4h_POST); 3831 return; 3832 } else if (VT == MVT::v8i16 || VT == MVT::v8f16) { 3833 SelectPostStore(Node, 4, AArch64::ST1Fourv8h_POST); 3834 return; 3835 } else if (VT == MVT::v2i32 || VT == MVT::v2f32) { 3836 SelectPostStore(Node, 4, AArch64::ST1Fourv2s_POST); 3837 return; 3838 } else if (VT == MVT::v4i32 || VT == MVT::v4f32) { 3839 SelectPostStore(Node, 4, AArch64::ST1Fourv4s_POST); 3840 return; 3841 } else if (VT == MVT::v1i64 || VT == MVT::v1f64) { 3842 SelectPostStore(Node, 4, AArch64::ST1Fourv1d_POST); 3843 return; 3844 } else if (VT == MVT::v2i64 || VT == MVT::v2f64) { 3845 SelectPostStore(Node, 4, AArch64::ST1Fourv2d_POST); 3846 return; 3847 } 3848 break; 3849 } 3850 case AArch64ISD::ST2LANEpost: { 3851 VT = Node->getOperand(1).getValueType(); 3852 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3853 SelectPostStoreLane(Node, 2, AArch64::ST2i8_POST); 3854 return; 3855 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3856 VT == MVT::v8f16) { 3857 SelectPostStoreLane(Node, 2, AArch64::ST2i16_POST); 3858 return; 3859 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3860 VT == MVT::v2f32) { 3861 SelectPostStoreLane(Node, 2, AArch64::ST2i32_POST); 3862 return; 3863 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3864 VT == MVT::v1f64) { 3865 SelectPostStoreLane(Node, 2, AArch64::ST2i64_POST); 3866 return; 3867 } 3868 break; 3869 } 3870 case AArch64ISD::ST3LANEpost: { 3871 VT = Node->getOperand(1).getValueType(); 3872 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3873 SelectPostStoreLane(Node, 3, AArch64::ST3i8_POST); 3874 return; 3875 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3876 VT == MVT::v8f16) { 3877 SelectPostStoreLane(Node, 3, AArch64::ST3i16_POST); 3878 return; 3879 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3880 VT == MVT::v2f32) { 3881 SelectPostStoreLane(Node, 3, AArch64::ST3i32_POST); 3882 return; 3883 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3884 VT == MVT::v1f64) { 3885 SelectPostStoreLane(Node, 3, AArch64::ST3i64_POST); 3886 return; 3887 } 3888 break; 3889 } 3890 case AArch64ISD::ST4LANEpost: { 3891 VT = Node->getOperand(1).getValueType(); 3892 if (VT == MVT::v16i8 || VT == MVT::v8i8) { 3893 SelectPostStoreLane(Node, 4, AArch64::ST4i8_POST); 3894 return; 3895 } else if (VT == MVT::v8i16 || VT == MVT::v4i16 || VT == MVT::v4f16 || 3896 VT == MVT::v8f16) { 3897 SelectPostStoreLane(Node, 4, AArch64::ST4i16_POST); 3898 return; 3899 } else if (VT == MVT::v4i32 || VT == MVT::v2i32 || VT == MVT::v4f32 || 3900 VT == MVT::v2f32) { 3901 SelectPostStoreLane(Node, 4, AArch64::ST4i32_POST); 3902 return; 3903 } else if (VT == MVT::v2i64 || VT == MVT::v1i64 || VT == MVT::v2f64 || 3904 VT == MVT::v1f64) { 3905 SelectPostStoreLane(Node, 4, AArch64::ST4i64_POST); 3906 return; 3907 } 3908 break; 3909 } 3910 } 3911 3912 // Select the default instruction 3913 SelectCode(Node); 3914 } 3915 3916 /// createAArch64ISelDag - This pass converts a legalized DAG into a 3917 /// AArch64-specific DAG, ready for instruction scheduling. 3918 FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM, 3919 CodeGenOpt::Level OptLevel) { 3920 return new AArch64DAGToDAGISel(TM, OptLevel); 3921 } 3922