1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// Custom DAG lowering for SI 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifdef _MSC_VER 16 // Provide M_PI. 17 #define _USE_MATH_DEFINES 18 #endif 19 20 #include "SIISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUIntrinsicInfo.h" 23 #include "AMDGPUSubtarget.h" 24 #include "AMDGPUTargetMachine.h" 25 #include "SIDefines.h" 26 #include "SIInstrInfo.h" 27 #include "SIMachineFunctionInfo.h" 28 #include "SIRegisterInfo.h" 29 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 30 #include "Utils/AMDGPUBaseInfo.h" 31 #include "llvm/ADT/APFloat.h" 32 #include "llvm/ADT/APInt.h" 33 #include "llvm/ADT/ArrayRef.h" 34 #include "llvm/ADT/BitVector.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/ADT/StringRef.h" 38 #include "llvm/ADT/StringSwitch.h" 39 #include "llvm/ADT/Twine.h" 40 #include "llvm/CodeGen/Analysis.h" 41 #include "llvm/CodeGen/CallingConvLower.h" 42 #include "llvm/CodeGen/DAGCombine.h" 43 #include "llvm/CodeGen/ISDOpcodes.h" 44 #include "llvm/CodeGen/MachineBasicBlock.h" 45 #include "llvm/CodeGen/MachineFrameInfo.h" 46 #include "llvm/CodeGen/MachineFunction.h" 47 #include "llvm/CodeGen/MachineInstr.h" 48 #include "llvm/CodeGen/MachineInstrBuilder.h" 49 #include "llvm/CodeGen/MachineMemOperand.h" 50 #include "llvm/CodeGen/MachineModuleInfo.h" 51 #include "llvm/CodeGen/MachineOperand.h" 52 #include "llvm/CodeGen/MachineRegisterInfo.h" 53 #include "llvm/CodeGen/SelectionDAG.h" 54 #include "llvm/CodeGen/SelectionDAGNodes.h" 55 #include "llvm/CodeGen/TargetCallingConv.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/Constants.h" 59 #include "llvm/IR/DataLayout.h" 60 #include "llvm/IR/DebugLoc.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/DiagnosticInfo.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/GlobalValue.h" 65 #include "llvm/IR/InstrTypes.h" 66 #include "llvm/IR/Instruction.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/IntrinsicInst.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/Support/Casting.h" 71 #include "llvm/Support/CodeGen.h" 72 #include "llvm/Support/CommandLine.h" 73 #include "llvm/Support/Compiler.h" 74 #include "llvm/Support/ErrorHandling.h" 75 #include "llvm/Support/KnownBits.h" 76 #include "llvm/Support/MachineValueType.h" 77 #include "llvm/Support/MathExtras.h" 78 #include "llvm/Target/TargetOptions.h" 79 #include <cassert> 80 #include <cmath> 81 #include <cstdint> 82 #include <iterator> 83 #include <tuple> 84 #include <utility> 85 #include <vector> 86 87 using namespace llvm; 88 89 #define DEBUG_TYPE "si-lower" 90 91 STATISTIC(NumTailCalls, "Number of tail calls"); 92 93 static cl::opt<bool> EnableVGPRIndexMode( 94 "amdgpu-vgpr-index-mode", 95 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 96 cl::init(false)); 97 98 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits( 99 "amdgpu-frame-index-zero-bits", 100 cl::desc("High bits of frame index assumed to be zero"), 101 cl::init(5), 102 cl::ReallyHidden); 103 104 static unsigned findFirstFreeSGPR(CCState &CCInfo) { 105 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); 106 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { 107 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { 108 return AMDGPU::SGPR0 + Reg; 109 } 110 } 111 llvm_unreachable("Cannot allocate sgpr"); 112 } 113 114 SITargetLowering::SITargetLowering(const TargetMachine &TM, 115 const GCNSubtarget &STI) 116 : AMDGPUTargetLowering(TM, STI), 117 Subtarget(&STI) { 118 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass); 119 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass); 120 121 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass); 122 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass); 123 124 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass); 125 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass); 126 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass); 127 128 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass); 129 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass); 130 131 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass); 132 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); 133 134 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass); 135 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); 136 137 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); 138 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); 139 140 if (Subtarget->has16BitInsts()) { 141 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass); 142 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass); 143 144 // Unless there are also VOP3P operations, not operations are really legal. 145 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass); 146 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass); 147 addRegisterClass(MVT::v4i16, &AMDGPU::SReg_64RegClass); 148 addRegisterClass(MVT::v4f16, &AMDGPU::SReg_64RegClass); 149 } 150 151 computeRegisterProperties(Subtarget->getRegisterInfo()); 152 153 // We need to custom lower vector stores from local memory 154 setOperationAction(ISD::LOAD, MVT::v2i32, Custom); 155 setOperationAction(ISD::LOAD, MVT::v4i32, Custom); 156 setOperationAction(ISD::LOAD, MVT::v8i32, Custom); 157 setOperationAction(ISD::LOAD, MVT::v16i32, Custom); 158 setOperationAction(ISD::LOAD, MVT::i1, Custom); 159 160 setOperationAction(ISD::STORE, MVT::v2i32, Custom); 161 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 162 setOperationAction(ISD::STORE, MVT::v8i32, Custom); 163 setOperationAction(ISD::STORE, MVT::v16i32, Custom); 164 setOperationAction(ISD::STORE, MVT::i1, Custom); 165 166 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 167 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 168 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand); 169 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand); 170 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand); 171 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand); 172 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand); 173 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand); 174 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand); 175 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand); 176 177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 179 180 setOperationAction(ISD::SELECT, MVT::i1, Promote); 181 setOperationAction(ISD::SELECT, MVT::i64, Custom); 182 setOperationAction(ISD::SELECT, MVT::f64, Promote); 183 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64); 184 185 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 186 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 187 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 188 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 189 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); 190 191 setOperationAction(ISD::SETCC, MVT::i1, Promote); 192 setOperationAction(ISD::SETCC, MVT::v2i1, Expand); 193 setOperationAction(ISD::SETCC, MVT::v4i1, Expand); 194 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); 195 196 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand); 197 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 198 199 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom); 200 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom); 201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 202 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom); 203 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 204 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom); 205 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom); 206 207 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 208 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom); 209 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom); 210 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2i16, Custom); 211 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom); 212 213 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v2f16, Custom); 214 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::v4f16, Custom); 215 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 216 217 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 218 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom); 219 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom); 220 setOperationAction(ISD::INTRINSIC_VOID, MVT::v4f16, Custom); 221 222 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 223 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 224 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 225 setOperationAction(ISD::BR_CC, MVT::i64, Expand); 226 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 227 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 228 229 setOperationAction(ISD::UADDO, MVT::i32, Legal); 230 setOperationAction(ISD::USUBO, MVT::i32, Legal); 231 232 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal); 233 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal); 234 235 #if 0 236 setOperationAction(ISD::ADDCARRY, MVT::i64, Legal); 237 setOperationAction(ISD::SUBCARRY, MVT::i64, Legal); 238 #endif 239 240 // We only support LOAD/STORE and vector manipulation ops for vectors 241 // with > 4 elements. 242 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, 243 MVT::v2i64, MVT::v2f64, MVT::v4i16, MVT::v4f16 }) { 244 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 245 switch (Op) { 246 case ISD::LOAD: 247 case ISD::STORE: 248 case ISD::BUILD_VECTOR: 249 case ISD::BITCAST: 250 case ISD::EXTRACT_VECTOR_ELT: 251 case ISD::INSERT_VECTOR_ELT: 252 case ISD::INSERT_SUBVECTOR: 253 case ISD::EXTRACT_SUBVECTOR: 254 case ISD::SCALAR_TO_VECTOR: 255 break; 256 case ISD::CONCAT_VECTORS: 257 setOperationAction(Op, VT, Custom); 258 break; 259 default: 260 setOperationAction(Op, VT, Expand); 261 break; 262 } 263 } 264 } 265 266 setOperationAction(ISD::FP_EXTEND, MVT::v4f32, Expand); 267 268 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that 269 // is expanded to avoid having two separate loops in case the index is a VGPR. 270 271 // Most operations are naturally 32-bit vector operations. We only support 272 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32. 273 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) { 274 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote); 275 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32); 276 277 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote); 278 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32); 279 280 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote); 281 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32); 282 283 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote); 284 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32); 285 } 286 287 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand); 288 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand); 289 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand); 290 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand); 291 292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f16, Custom); 293 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 294 295 // Avoid stack access for these. 296 // TODO: Generalize to more vector types. 297 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom); 298 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom); 299 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); 300 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); 301 302 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 303 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 304 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i8, Custom); 305 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i8, Custom); 306 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i8, Custom); 307 308 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i8, Custom); 309 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom); 310 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i8, Custom); 311 312 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i16, Custom); 313 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f16, Custom); 314 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom); 315 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); 316 317 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, 318 // and output demarshalling 319 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 320 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 321 322 // We can't return success/failure, only the old value, 323 // let LLVM add the comparison 324 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand); 325 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand); 326 327 if (Subtarget->hasFlatAddressSpace()) { 328 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 329 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom); 330 } 331 332 setOperationAction(ISD::BSWAP, MVT::i32, Legal); 333 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 334 335 // On SI this is s_memtime and s_memrealtime on VI. 336 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 337 setOperationAction(ISD::TRAP, MVT::Other, Custom); 338 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom); 339 340 if (Subtarget->has16BitInsts()) { 341 setOperationAction(ISD::FLOG, MVT::f16, Custom); 342 setOperationAction(ISD::FLOG10, MVT::f16, Custom); 343 } 344 345 // v_mad_f32 does not support denormals according to some sources. 346 if (!Subtarget->hasFP32Denormals()) 347 setOperationAction(ISD::FMAD, MVT::f32, Legal); 348 349 if (!Subtarget->hasBFI()) { 350 // fcopysign can be done in a single instruction with BFI. 351 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 352 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 353 } 354 355 if (!Subtarget->hasBCNT(32)) 356 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 357 358 if (!Subtarget->hasBCNT(64)) 359 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 360 361 if (Subtarget->hasFFBH()) 362 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 363 364 if (Subtarget->hasFFBL()) 365 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 366 367 // We only really have 32-bit BFE instructions (and 16-bit on VI). 368 // 369 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 370 // effort to match them now. We want this to be false for i64 cases when the 371 // extraction isn't restricted to the upper or lower half. Ideally we would 372 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 373 // span the midpoint are probably relatively rare, so don't worry about them 374 // for now. 375 if (Subtarget->hasBFE()) 376 setHasExtractBitsInsn(true); 377 378 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 379 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 380 381 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) { 382 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 383 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 384 setOperationAction(ISD::FRINT, MVT::f64, Legal); 385 } else { 386 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 387 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 388 setOperationAction(ISD::FRINT, MVT::f64, Custom); 389 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 390 } 391 392 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 393 394 setOperationAction(ISD::FSIN, MVT::f32, Custom); 395 setOperationAction(ISD::FCOS, MVT::f32, Custom); 396 setOperationAction(ISD::FDIV, MVT::f32, Custom); 397 setOperationAction(ISD::FDIV, MVT::f64, Custom); 398 399 if (Subtarget->has16BitInsts()) { 400 setOperationAction(ISD::Constant, MVT::i16, Legal); 401 402 setOperationAction(ISD::SMIN, MVT::i16, Legal); 403 setOperationAction(ISD::SMAX, MVT::i16, Legal); 404 405 setOperationAction(ISD::UMIN, MVT::i16, Legal); 406 setOperationAction(ISD::UMAX, MVT::i16, Legal); 407 408 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote); 409 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32); 410 411 setOperationAction(ISD::ROTR, MVT::i16, Promote); 412 setOperationAction(ISD::ROTL, MVT::i16, Promote); 413 414 setOperationAction(ISD::SDIV, MVT::i16, Promote); 415 setOperationAction(ISD::UDIV, MVT::i16, Promote); 416 setOperationAction(ISD::SREM, MVT::i16, Promote); 417 setOperationAction(ISD::UREM, MVT::i16, Promote); 418 419 setOperationAction(ISD::BSWAP, MVT::i16, Promote); 420 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote); 421 422 setOperationAction(ISD::CTTZ, MVT::i16, Promote); 423 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote); 424 setOperationAction(ISD::CTLZ, MVT::i16, Promote); 425 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote); 426 setOperationAction(ISD::CTPOP, MVT::i16, Promote); 427 428 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand); 429 430 setOperationAction(ISD::BR_CC, MVT::i16, Expand); 431 432 setOperationAction(ISD::LOAD, MVT::i16, Custom); 433 434 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 435 436 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote); 437 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32); 438 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote); 439 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32); 440 441 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote); 442 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote); 443 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote); 444 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote); 445 446 // F16 - Constant Actions. 447 setOperationAction(ISD::ConstantFP, MVT::f16, Legal); 448 449 // F16 - Load/Store Actions. 450 setOperationAction(ISD::LOAD, MVT::f16, Promote); 451 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16); 452 setOperationAction(ISD::STORE, MVT::f16, Promote); 453 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16); 454 455 // F16 - VOP1 Actions. 456 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom); 457 setOperationAction(ISD::FCOS, MVT::f16, Promote); 458 setOperationAction(ISD::FSIN, MVT::f16, Promote); 459 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote); 460 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote); 461 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote); 462 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote); 463 setOperationAction(ISD::FROUND, MVT::f16, Custom); 464 465 // F16 - VOP2 Actions. 466 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 467 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 468 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 469 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 470 setOperationAction(ISD::FDIV, MVT::f16, Custom); 471 472 // F16 - VOP3 Actions. 473 setOperationAction(ISD::FMA, MVT::f16, Legal); 474 if (!Subtarget->hasFP16Denormals()) 475 setOperationAction(ISD::FMAD, MVT::f16, Legal); 476 477 for (MVT VT : {MVT::v2i16, MVT::v2f16, MVT::v4i16, MVT::v4f16}) { 478 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { 479 switch (Op) { 480 case ISD::LOAD: 481 case ISD::STORE: 482 case ISD::BUILD_VECTOR: 483 case ISD::BITCAST: 484 case ISD::EXTRACT_VECTOR_ELT: 485 case ISD::INSERT_VECTOR_ELT: 486 case ISD::INSERT_SUBVECTOR: 487 case ISD::EXTRACT_SUBVECTOR: 488 case ISD::SCALAR_TO_VECTOR: 489 break; 490 case ISD::CONCAT_VECTORS: 491 setOperationAction(Op, VT, Custom); 492 break; 493 default: 494 setOperationAction(Op, VT, Expand); 495 break; 496 } 497 } 498 } 499 500 // XXX - Do these do anything? Vector constants turn into build_vector. 501 setOperationAction(ISD::Constant, MVT::v2i16, Legal); 502 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal); 503 504 setOperationAction(ISD::UNDEF, MVT::v2i16, Legal); 505 setOperationAction(ISD::UNDEF, MVT::v2f16, Legal); 506 507 setOperationAction(ISD::STORE, MVT::v2i16, Promote); 508 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32); 509 setOperationAction(ISD::STORE, MVT::v2f16, Promote); 510 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32); 511 512 setOperationAction(ISD::LOAD, MVT::v2i16, Promote); 513 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32); 514 setOperationAction(ISD::LOAD, MVT::v2f16, Promote); 515 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32); 516 517 setOperationAction(ISD::AND, MVT::v2i16, Promote); 518 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32); 519 setOperationAction(ISD::OR, MVT::v2i16, Promote); 520 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32); 521 setOperationAction(ISD::XOR, MVT::v2i16, Promote); 522 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32); 523 524 setOperationAction(ISD::LOAD, MVT::v4i16, Promote); 525 AddPromotedToType(ISD::LOAD, MVT::v4i16, MVT::v2i32); 526 setOperationAction(ISD::LOAD, MVT::v4f16, Promote); 527 AddPromotedToType(ISD::LOAD, MVT::v4f16, MVT::v2i32); 528 529 setOperationAction(ISD::STORE, MVT::v4i16, Promote); 530 AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32); 531 setOperationAction(ISD::STORE, MVT::v4f16, Promote); 532 AddPromotedToType(ISD::STORE, MVT::v4f16, MVT::v2i32); 533 534 setOperationAction(ISD::ANY_EXTEND, MVT::v2i32, Expand); 535 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand); 536 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand); 537 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand); 538 539 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Expand); 540 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i32, Expand); 541 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i32, Expand); 542 543 if (!Subtarget->hasVOP3PInsts()) { 544 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom); 545 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom); 546 } 547 548 setOperationAction(ISD::FNEG, MVT::v2f16, Legal); 549 // This isn't really legal, but this avoids the legalizer unrolling it (and 550 // allows matching fneg (fabs x) patterns) 551 setOperationAction(ISD::FABS, MVT::v2f16, Legal); 552 } 553 554 if (Subtarget->hasVOP3PInsts()) { 555 setOperationAction(ISD::ADD, MVT::v2i16, Legal); 556 setOperationAction(ISD::SUB, MVT::v2i16, Legal); 557 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 558 setOperationAction(ISD::SHL, MVT::v2i16, Legal); 559 setOperationAction(ISD::SRL, MVT::v2i16, Legal); 560 setOperationAction(ISD::SRA, MVT::v2i16, Legal); 561 setOperationAction(ISD::SMIN, MVT::v2i16, Legal); 562 setOperationAction(ISD::UMIN, MVT::v2i16, Legal); 563 setOperationAction(ISD::SMAX, MVT::v2i16, Legal); 564 setOperationAction(ISD::UMAX, MVT::v2i16, Legal); 565 566 setOperationAction(ISD::FADD, MVT::v2f16, Legal); 567 setOperationAction(ISD::FMUL, MVT::v2f16, Legal); 568 setOperationAction(ISD::FMA, MVT::v2f16, Legal); 569 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal); 570 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal); 571 setOperationAction(ISD::FCANONICALIZE, MVT::v2f16, Legal); 572 573 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom); 574 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom); 575 576 setOperationAction(ISD::SHL, MVT::v4i16, Custom); 577 setOperationAction(ISD::SRA, MVT::v4i16, Custom); 578 setOperationAction(ISD::SRL, MVT::v4i16, Custom); 579 setOperationAction(ISD::ADD, MVT::v4i16, Custom); 580 setOperationAction(ISD::SUB, MVT::v4i16, Custom); 581 setOperationAction(ISD::MUL, MVT::v4i16, Custom); 582 583 setOperationAction(ISD::SMIN, MVT::v4i16, Custom); 584 setOperationAction(ISD::SMAX, MVT::v4i16, Custom); 585 setOperationAction(ISD::UMIN, MVT::v4i16, Custom); 586 setOperationAction(ISD::UMAX, MVT::v4i16, Custom); 587 588 setOperationAction(ISD::FADD, MVT::v4f16, Custom); 589 setOperationAction(ISD::FMUL, MVT::v4f16, Custom); 590 setOperationAction(ISD::FMINNUM, MVT::v4f16, Custom); 591 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); 592 593 setOperationAction(ISD::SELECT, MVT::v4i16, Custom); 594 setOperationAction(ISD::SELECT, MVT::v4f16, Custom); 595 } 596 597 setOperationAction(ISD::FNEG, MVT::v4f16, Custom); 598 setOperationAction(ISD::FABS, MVT::v4f16, Custom); 599 600 if (Subtarget->has16BitInsts()) { 601 setOperationAction(ISD::SELECT, MVT::v2i16, Promote); 602 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32); 603 setOperationAction(ISD::SELECT, MVT::v2f16, Promote); 604 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32); 605 } else { 606 // Legalization hack. 607 setOperationAction(ISD::SELECT, MVT::v2i16, Custom); 608 setOperationAction(ISD::SELECT, MVT::v2f16, Custom); 609 610 setOperationAction(ISD::FNEG, MVT::v2f16, Custom); 611 setOperationAction(ISD::FABS, MVT::v2f16, Custom); 612 } 613 614 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) { 615 setOperationAction(ISD::SELECT, VT, Custom); 616 } 617 618 setTargetDAGCombine(ISD::ADD); 619 setTargetDAGCombine(ISD::ADDCARRY); 620 setTargetDAGCombine(ISD::SUB); 621 setTargetDAGCombine(ISD::SUBCARRY); 622 setTargetDAGCombine(ISD::FADD); 623 setTargetDAGCombine(ISD::FSUB); 624 setTargetDAGCombine(ISD::FMINNUM); 625 setTargetDAGCombine(ISD::FMAXNUM); 626 setTargetDAGCombine(ISD::FMA); 627 setTargetDAGCombine(ISD::SMIN); 628 setTargetDAGCombine(ISD::SMAX); 629 setTargetDAGCombine(ISD::UMIN); 630 setTargetDAGCombine(ISD::UMAX); 631 setTargetDAGCombine(ISD::SETCC); 632 setTargetDAGCombine(ISD::AND); 633 setTargetDAGCombine(ISD::OR); 634 setTargetDAGCombine(ISD::XOR); 635 setTargetDAGCombine(ISD::SINT_TO_FP); 636 setTargetDAGCombine(ISD::UINT_TO_FP); 637 setTargetDAGCombine(ISD::FCANONICALIZE); 638 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); 639 setTargetDAGCombine(ISD::ZERO_EXTEND); 640 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); 641 setTargetDAGCombine(ISD::BUILD_VECTOR); 642 643 // All memory operations. Some folding on the pointer operand is done to help 644 // matching the constant offsets in the addressing modes. 645 setTargetDAGCombine(ISD::LOAD); 646 setTargetDAGCombine(ISD::STORE); 647 setTargetDAGCombine(ISD::ATOMIC_LOAD); 648 setTargetDAGCombine(ISD::ATOMIC_STORE); 649 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP); 650 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); 651 setTargetDAGCombine(ISD::ATOMIC_SWAP); 652 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD); 653 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB); 654 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND); 655 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR); 656 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR); 657 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND); 658 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN); 659 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX); 660 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN); 661 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX); 662 663 setSchedulingPreference(Sched::RegPressure); 664 665 // SI at least has hardware support for floating point exceptions, but no way 666 // of using or handling them is implemented. They are also optional in OpenCL 667 // (Section 7.3) 668 setHasFloatingPointExceptions(Subtarget->hasFPExceptions()); 669 } 670 671 const GCNSubtarget *SITargetLowering::getSubtarget() const { 672 return Subtarget; 673 } 674 675 //===----------------------------------------------------------------------===// 676 // TargetLowering queries 677 //===----------------------------------------------------------------------===// 678 679 // v_mad_mix* support a conversion from f16 to f32. 680 // 681 // There is only one special case when denormals are enabled we don't currently, 682 // where this is OK to use. 683 bool SITargetLowering::isFPExtFoldable(unsigned Opcode, 684 EVT DestVT, EVT SrcVT) const { 685 return ((Opcode == ISD::FMAD && Subtarget->hasMadMixInsts()) || 686 (Opcode == ISD::FMA && Subtarget->hasFmaMixInsts())) && 687 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() && 688 SrcVT.getScalarType() == MVT::f16; 689 } 690 691 bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { 692 // SI has some legal vector types, but no legal vector operations. Say no 693 // shuffles are legal in order to prefer scalarizing some vector operations. 694 return false; 695 } 696 697 MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 698 CallingConv::ID CC, 699 EVT VT) const { 700 // TODO: Consider splitting all arguments into 32-bit pieces. 701 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 702 EVT ScalarVT = VT.getScalarType(); 703 unsigned Size = ScalarVT.getSizeInBits(); 704 if (Size == 32) 705 return ScalarVT.getSimpleVT(); 706 707 if (Size == 64) 708 return MVT::i32; 709 710 if (Size == 16 && 711 Subtarget->has16BitInsts() && 712 isPowerOf2_32(VT.getVectorNumElements())) 713 return VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 714 } 715 716 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 717 } 718 719 unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 720 CallingConv::ID CC, 721 EVT VT) const { 722 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 723 unsigned NumElts = VT.getVectorNumElements(); 724 EVT ScalarVT = VT.getScalarType(); 725 unsigned Size = ScalarVT.getSizeInBits(); 726 727 if (Size == 32) 728 return NumElts; 729 730 if (Size == 64) 731 return 2 * NumElts; 732 733 // FIXME: Fails to break down as we want with v3. 734 if (Size == 16 && Subtarget->has16BitInsts() && isPowerOf2_32(NumElts)) 735 return VT.getVectorNumElements() / 2; 736 } 737 738 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 739 } 740 741 unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv( 742 LLVMContext &Context, CallingConv::ID CC, 743 EVT VT, EVT &IntermediateVT, 744 unsigned &NumIntermediates, MVT &RegisterVT) const { 745 if (CC != CallingConv::AMDGPU_KERNEL && VT.isVector()) { 746 unsigned NumElts = VT.getVectorNumElements(); 747 EVT ScalarVT = VT.getScalarType(); 748 unsigned Size = ScalarVT.getSizeInBits(); 749 if (Size == 32) { 750 RegisterVT = ScalarVT.getSimpleVT(); 751 IntermediateVT = RegisterVT; 752 NumIntermediates = NumElts; 753 return NumIntermediates; 754 } 755 756 if (Size == 64) { 757 RegisterVT = MVT::i32; 758 IntermediateVT = RegisterVT; 759 NumIntermediates = 2 * NumElts; 760 return NumIntermediates; 761 } 762 763 // FIXME: We should fix the ABI to be the same on targets without 16-bit 764 // support, but unless we can properly handle 3-vectors, it will be still be 765 // inconsistent. 766 if (Size == 16 && Subtarget->has16BitInsts() && isPowerOf2_32(NumElts)) { 767 RegisterVT = VT.isInteger() ? MVT::v2i16 : MVT::v2f16; 768 IntermediateVT = RegisterVT; 769 NumIntermediates = NumElts / 2; 770 return NumIntermediates; 771 } 772 } 773 774 return TargetLowering::getVectorTypeBreakdownForCallingConv( 775 Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT); 776 } 777 778 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 779 const CallInst &CI, 780 MachineFunction &MF, 781 unsigned IntrID) const { 782 if (const AMDGPU::RsrcIntrinsic *RsrcIntr = 783 AMDGPU::lookupRsrcIntrinsic(IntrID)) { 784 AttributeList Attr = Intrinsic::getAttributes(CI.getContext(), 785 (Intrinsic::ID)IntrID); 786 if (Attr.hasFnAttribute(Attribute::ReadNone)) 787 return false; 788 789 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 790 791 if (RsrcIntr->IsImage) { 792 Info.ptrVal = MFI->getImagePSV( 793 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 794 CI.getArgOperand(RsrcIntr->RsrcArg)); 795 Info.align = 0; 796 } else { 797 Info.ptrVal = MFI->getBufferPSV( 798 *MF.getSubtarget<GCNSubtarget>().getInstrInfo(), 799 CI.getArgOperand(RsrcIntr->RsrcArg)); 800 } 801 802 Info.flags = MachineMemOperand::MODereferenceable; 803 if (Attr.hasFnAttribute(Attribute::ReadOnly)) { 804 Info.opc = ISD::INTRINSIC_W_CHAIN; 805 Info.memVT = MVT::getVT(CI.getType()); 806 Info.flags |= MachineMemOperand::MOLoad; 807 } else if (Attr.hasFnAttribute(Attribute::WriteOnly)) { 808 Info.opc = ISD::INTRINSIC_VOID; 809 Info.memVT = MVT::getVT(CI.getArgOperand(0)->getType()); 810 Info.flags |= MachineMemOperand::MOStore; 811 } else { 812 // Atomic 813 Info.opc = ISD::INTRINSIC_W_CHAIN; 814 Info.memVT = MVT::getVT(CI.getType()); 815 Info.flags = MachineMemOperand::MOLoad | 816 MachineMemOperand::MOStore | 817 MachineMemOperand::MODereferenceable; 818 819 // XXX - Should this be volatile without known ordering? 820 Info.flags |= MachineMemOperand::MOVolatile; 821 } 822 return true; 823 } 824 825 switch (IntrID) { 826 case Intrinsic::amdgcn_atomic_inc: 827 case Intrinsic::amdgcn_atomic_dec: 828 case Intrinsic::amdgcn_ds_fadd: 829 case Intrinsic::amdgcn_ds_fmin: 830 case Intrinsic::amdgcn_ds_fmax: { 831 Info.opc = ISD::INTRINSIC_W_CHAIN; 832 Info.memVT = MVT::getVT(CI.getType()); 833 Info.ptrVal = CI.getOperand(0); 834 Info.align = 0; 835 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; 836 837 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4)); 838 if (!Vol || !Vol->isZero()) 839 Info.flags |= MachineMemOperand::MOVolatile; 840 841 return true; 842 } 843 844 default: 845 return false; 846 } 847 } 848 849 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II, 850 SmallVectorImpl<Value*> &Ops, 851 Type *&AccessTy) const { 852 switch (II->getIntrinsicID()) { 853 case Intrinsic::amdgcn_atomic_inc: 854 case Intrinsic::amdgcn_atomic_dec: 855 case Intrinsic::amdgcn_ds_fadd: 856 case Intrinsic::amdgcn_ds_fmin: 857 case Intrinsic::amdgcn_ds_fmax: { 858 Value *Ptr = II->getArgOperand(0); 859 AccessTy = II->getType(); 860 Ops.push_back(Ptr); 861 return true; 862 } 863 default: 864 return false; 865 } 866 } 867 868 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const { 869 if (!Subtarget->hasFlatInstOffsets()) { 870 // Flat instructions do not have offsets, and only have the register 871 // address. 872 return AM.BaseOffs == 0 && AM.Scale == 0; 873 } 874 875 // GFX9 added a 13-bit signed offset. When using regular flat instructions, 876 // the sign bit is ignored and is treated as a 12-bit unsigned offset. 877 878 // Just r + i 879 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0; 880 } 881 882 bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { 883 if (Subtarget->hasFlatGlobalInsts()) 884 return isInt<13>(AM.BaseOffs) && AM.Scale == 0; 885 886 if (!Subtarget->hasAddr64() || Subtarget->useFlatForGlobal()) { 887 // Assume the we will use FLAT for all global memory accesses 888 // on VI. 889 // FIXME: This assumption is currently wrong. On VI we still use 890 // MUBUF instructions for the r + i addressing mode. As currently 891 // implemented, the MUBUF instructions only work on buffer < 4GB. 892 // It may be possible to support > 4GB buffers with MUBUF instructions, 893 // by setting the stride value in the resource descriptor which would 894 // increase the size limit to (stride * 4GB). However, this is risky, 895 // because it has never been validated. 896 return isLegalFlatAddressingMode(AM); 897 } 898 899 return isLegalMUBUFAddressingMode(AM); 900 } 901 902 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { 903 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and 904 // additionally can do r + r + i with addr64. 32-bit has more addressing 905 // mode options. Depending on the resource constant, it can also do 906 // (i64 r0) + (i32 r1) * (i14 i). 907 // 908 // Private arrays end up using a scratch buffer most of the time, so also 909 // assume those use MUBUF instructions. Scratch loads / stores are currently 910 // implemented as mubuf instructions with offen bit set, so slightly 911 // different than the normal addr64. 912 if (!isUInt<12>(AM.BaseOffs)) 913 return false; 914 915 // FIXME: Since we can split immediate into soffset and immediate offset, 916 // would it make sense to allow any immediate? 917 918 switch (AM.Scale) { 919 case 0: // r + i or just i, depending on HasBaseReg. 920 return true; 921 case 1: 922 return true; // We have r + r or r + i. 923 case 2: 924 if (AM.HasBaseReg) { 925 // Reject 2 * r + r. 926 return false; 927 } 928 929 // Allow 2 * r as r + r 930 // Or 2 * r + i is allowed as r + r + i. 931 return true; 932 default: // Don't allow n * r 933 return false; 934 } 935 } 936 937 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL, 938 const AddrMode &AM, Type *Ty, 939 unsigned AS, Instruction *I) const { 940 // No global is ever allowed as a base. 941 if (AM.BaseGV) 942 return false; 943 944 if (AS == AMDGPUASI.GLOBAL_ADDRESS) 945 return isLegalGlobalAddressingMode(AM); 946 947 if (AS == AMDGPUASI.CONSTANT_ADDRESS || 948 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) { 949 // If the offset isn't a multiple of 4, it probably isn't going to be 950 // correctly aligned. 951 // FIXME: Can we get the real alignment here? 952 if (AM.BaseOffs % 4 != 0) 953 return isLegalMUBUFAddressingMode(AM); 954 955 // There are no SMRD extloads, so if we have to do a small type access we 956 // will use a MUBUF load. 957 // FIXME?: We also need to do this if unaligned, but we don't know the 958 // alignment here. 959 if (Ty->isSized() && DL.getTypeStoreSize(Ty) < 4) 960 return isLegalGlobalAddressingMode(AM); 961 962 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 963 // SMRD instructions have an 8-bit, dword offset on SI. 964 if (!isUInt<8>(AM.BaseOffs / 4)) 965 return false; 966 } else if (Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) { 967 // On CI+, this can also be a 32-bit literal constant offset. If it fits 968 // in 8-bits, it can use a smaller encoding. 969 if (!isUInt<32>(AM.BaseOffs / 4)) 970 return false; 971 } else if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 972 // On VI, these use the SMEM format and the offset is 20-bit in bytes. 973 if (!isUInt<20>(AM.BaseOffs)) 974 return false; 975 } else 976 llvm_unreachable("unhandled generation"); 977 978 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 979 return true; 980 981 if (AM.Scale == 1 && AM.HasBaseReg) 982 return true; 983 984 return false; 985 986 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 987 return isLegalMUBUFAddressingMode(AM); 988 } else if (AS == AMDGPUASI.LOCAL_ADDRESS || 989 AS == AMDGPUASI.REGION_ADDRESS) { 990 // Basic, single offset DS instructions allow a 16-bit unsigned immediate 991 // field. 992 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have 993 // an 8-bit dword offset but we don't know the alignment here. 994 if (!isUInt<16>(AM.BaseOffs)) 995 return false; 996 997 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg. 998 return true; 999 1000 if (AM.Scale == 1 && AM.HasBaseReg) 1001 return true; 1002 1003 return false; 1004 } else if (AS == AMDGPUASI.FLAT_ADDRESS || 1005 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) { 1006 // For an unknown address space, this usually means that this is for some 1007 // reason being used for pure arithmetic, and not based on some addressing 1008 // computation. We don't have instructions that compute pointers with any 1009 // addressing modes, so treat them as having no offset like flat 1010 // instructions. 1011 return isLegalFlatAddressingMode(AM); 1012 } else { 1013 llvm_unreachable("unhandled address space"); 1014 } 1015 } 1016 1017 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 1018 const SelectionDAG &DAG) const { 1019 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) { 1020 return (MemVT.getSizeInBits() <= 4 * 32); 1021 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 1022 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize(); 1023 return (MemVT.getSizeInBits() <= MaxPrivateBits); 1024 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 1025 return (MemVT.getSizeInBits() <= 2 * 32); 1026 } 1027 return true; 1028 } 1029 1030 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 1031 unsigned AddrSpace, 1032 unsigned Align, 1033 bool *IsFast) const { 1034 if (IsFast) 1035 *IsFast = false; 1036 1037 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96, 1038 // which isn't a simple VT. 1039 // Until MVT is extended to handle this, simply check for the size and 1040 // rely on the condition below: allow accesses if the size is a multiple of 4. 1041 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 && 1042 VT.getStoreSize() > 16)) { 1043 return false; 1044 } 1045 1046 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS || 1047 AddrSpace == AMDGPUASI.REGION_ADDRESS) { 1048 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte 1049 // aligned, 8 byte access in a single operation using ds_read2/write2_b32 1050 // with adjacent offsets. 1051 bool AlignedBy4 = (Align % 4 == 0); 1052 if (IsFast) 1053 *IsFast = AlignedBy4; 1054 1055 return AlignedBy4; 1056 } 1057 1058 // FIXME: We have to be conservative here and assume that flat operations 1059 // will access scratch. If we had access to the IR function, then we 1060 // could determine if any private memory was used in the function. 1061 if (!Subtarget->hasUnalignedScratchAccess() && 1062 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS || 1063 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) { 1064 return false; 1065 } 1066 1067 if (Subtarget->hasUnalignedBufferAccess()) { 1068 // If we have an uniform constant load, it still requires using a slow 1069 // buffer instruction if unaligned. 1070 if (IsFast) { 1071 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS || 1072 AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ? 1073 (Align % 4 == 0) : true; 1074 } 1075 1076 return true; 1077 } 1078 1079 // Smaller than dword value must be aligned. 1080 if (VT.bitsLT(MVT::i32)) 1081 return false; 1082 1083 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the 1084 // byte-address are ignored, thus forcing Dword alignment. 1085 // This applies to private, global, and constant memory. 1086 if (IsFast) 1087 *IsFast = true; 1088 1089 return VT.bitsGT(MVT::i32) && Align % 4 == 0; 1090 } 1091 1092 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 1093 unsigned SrcAlign, bool IsMemset, 1094 bool ZeroMemset, 1095 bool MemcpyStrSrc, 1096 MachineFunction &MF) const { 1097 // FIXME: Should account for address space here. 1098 1099 // The default fallback uses the private pointer size as a guess for a type to 1100 // use. Make sure we switch these to 64-bit accesses. 1101 1102 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global 1103 return MVT::v4i32; 1104 1105 if (Size >= 8 && DstAlign >= 4) 1106 return MVT::v2i32; 1107 1108 // Use the default. 1109 return MVT::Other; 1110 } 1111 1112 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) { 1113 return AS == AMDGPUASI.GLOBAL_ADDRESS || 1114 AS == AMDGPUASI.FLAT_ADDRESS || 1115 AS == AMDGPUASI.CONSTANT_ADDRESS || 1116 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT; 1117 } 1118 1119 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, 1120 unsigned DestAS) const { 1121 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) && 1122 isFlatGlobalAddrSpace(DestAS, AMDGPUASI); 1123 } 1124 1125 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { 1126 const MemSDNode *MemNode = cast<MemSDNode>(N); 1127 const Value *Ptr = MemNode->getMemOperand()->getValue(); 1128 const Instruction *I = dyn_cast_or_null<Instruction>(Ptr); 1129 return I && I->getMetadata("amdgpu.noclobber"); 1130 } 1131 1132 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS, 1133 unsigned DestAS) const { 1134 // Flat -> private/local is a simple truncate. 1135 // Flat -> global is no-op 1136 if (SrcAS == AMDGPUASI.FLAT_ADDRESS) 1137 return true; 1138 1139 return isNoopAddrSpaceCast(SrcAS, DestAS); 1140 } 1141 1142 bool SITargetLowering::isMemOpUniform(const SDNode *N) const { 1143 const MemSDNode *MemNode = cast<MemSDNode>(N); 1144 1145 return AMDGPUInstrInfo::isUniformMMO(MemNode->getMemOperand()); 1146 } 1147 1148 TargetLoweringBase::LegalizeTypeAction 1149 SITargetLowering::getPreferredVectorAction(EVT VT) const { 1150 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16)) 1151 return TypeSplitVector; 1152 1153 return TargetLoweringBase::getPreferredVectorAction(VT); 1154 } 1155 1156 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 1157 Type *Ty) const { 1158 // FIXME: Could be smarter if called for vector constants. 1159 return true; 1160 } 1161 1162 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { 1163 if (Subtarget->has16BitInsts() && VT == MVT::i16) { 1164 switch (Op) { 1165 case ISD::LOAD: 1166 case ISD::STORE: 1167 1168 // These operations are done with 32-bit instructions anyway. 1169 case ISD::AND: 1170 case ISD::OR: 1171 case ISD::XOR: 1172 case ISD::SELECT: 1173 // TODO: Extensions? 1174 return true; 1175 default: 1176 return false; 1177 } 1178 } 1179 1180 // SimplifySetCC uses this function to determine whether or not it should 1181 // create setcc with i1 operands. We don't have instructions for i1 setcc. 1182 if (VT == MVT::i1 && Op == ISD::SETCC) 1183 return false; 1184 1185 return TargetLowering::isTypeDesirableForOp(Op, VT); 1186 } 1187 1188 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG, 1189 const SDLoc &SL, 1190 SDValue Chain, 1191 uint64_t Offset) const { 1192 const DataLayout &DL = DAG.getDataLayout(); 1193 MachineFunction &MF = DAG.getMachineFunction(); 1194 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1195 1196 const ArgDescriptor *InputPtrReg; 1197 const TargetRegisterClass *RC; 1198 1199 std::tie(InputPtrReg, RC) 1200 = Info->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1201 1202 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1203 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS); 1204 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL, 1205 MRI.getLiveInVirtReg(InputPtrReg->getRegister()), PtrVT); 1206 1207 return DAG.getObjectPtrOffset(SL, BasePtr, Offset); 1208 } 1209 1210 SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG, 1211 const SDLoc &SL) const { 1212 uint64_t Offset = getImplicitParameterOffset(DAG.getMachineFunction(), 1213 FIRST_IMPLICIT); 1214 return lowerKernArgParameterPtr(DAG, SL, DAG.getEntryNode(), Offset); 1215 } 1216 1217 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT, 1218 const SDLoc &SL, SDValue Val, 1219 bool Signed, 1220 const ISD::InputArg *Arg) const { 1221 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) && 1222 VT.bitsLT(MemVT)) { 1223 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext; 1224 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT)); 1225 } 1226 1227 if (MemVT.isFloatingPoint()) 1228 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT); 1229 else if (Signed) 1230 Val = DAG.getSExtOrTrunc(Val, SL, VT); 1231 else 1232 Val = DAG.getZExtOrTrunc(Val, SL, VT); 1233 1234 return Val; 1235 } 1236 1237 SDValue SITargetLowering::lowerKernargMemParameter( 1238 SelectionDAG &DAG, EVT VT, EVT MemVT, 1239 const SDLoc &SL, SDValue Chain, 1240 uint64_t Offset, unsigned Align, bool Signed, 1241 const ISD::InputArg *Arg) const { 1242 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 1243 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 1244 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 1245 1246 // Try to avoid using an extload by loading earlier than the argument address, 1247 // and extracting the relevant bits. The load should hopefully be merged with 1248 // the previous argument. 1249 if (MemVT.getStoreSize() < 4 && Align < 4) { 1250 // TODO: Handle align < 4 and size >= 4 (can happen with packed structs). 1251 int64_t AlignDownOffset = alignDown(Offset, 4); 1252 int64_t OffsetDiff = Offset - AlignDownOffset; 1253 1254 EVT IntVT = MemVT.changeTypeToInteger(); 1255 1256 // TODO: If we passed in the base kernel offset we could have a better 1257 // alignment than 4, but we don't really need it. 1258 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, AlignDownOffset); 1259 SDValue Load = DAG.getLoad(MVT::i32, SL, Chain, Ptr, PtrInfo, 4, 1260 MachineMemOperand::MODereferenceable | 1261 MachineMemOperand::MOInvariant); 1262 1263 SDValue ShiftAmt = DAG.getConstant(OffsetDiff * 8, SL, MVT::i32); 1264 SDValue Extract = DAG.getNode(ISD::SRL, SL, MVT::i32, Load, ShiftAmt); 1265 1266 SDValue ArgVal = DAG.getNode(ISD::TRUNCATE, SL, IntVT, Extract); 1267 ArgVal = DAG.getNode(ISD::BITCAST, SL, MemVT, ArgVal); 1268 ArgVal = convertArgType(DAG, VT, MemVT, SL, ArgVal, Signed, Arg); 1269 1270 1271 return DAG.getMergeValues({ ArgVal, Load.getValue(1) }, SL); 1272 } 1273 1274 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset); 1275 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align, 1276 MachineMemOperand::MODereferenceable | 1277 MachineMemOperand::MOInvariant); 1278 1279 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg); 1280 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL); 1281 } 1282 1283 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA, 1284 const SDLoc &SL, SDValue Chain, 1285 const ISD::InputArg &Arg) const { 1286 MachineFunction &MF = DAG.getMachineFunction(); 1287 MachineFrameInfo &MFI = MF.getFrameInfo(); 1288 1289 if (Arg.Flags.isByVal()) { 1290 unsigned Size = Arg.Flags.getByValSize(); 1291 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false); 1292 return DAG.getFrameIndex(FrameIdx, MVT::i32); 1293 } 1294 1295 unsigned ArgOffset = VA.getLocMemOffset(); 1296 unsigned ArgSize = VA.getValVT().getStoreSize(); 1297 1298 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true); 1299 1300 // Create load nodes to retrieve arguments from the stack. 1301 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1302 SDValue ArgValue; 1303 1304 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 1305 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 1306 MVT MemVT = VA.getValVT(); 1307 1308 switch (VA.getLocInfo()) { 1309 default: 1310 break; 1311 case CCValAssign::BCvt: 1312 MemVT = VA.getLocVT(); 1313 break; 1314 case CCValAssign::SExt: 1315 ExtType = ISD::SEXTLOAD; 1316 break; 1317 case CCValAssign::ZExt: 1318 ExtType = ISD::ZEXTLOAD; 1319 break; 1320 case CCValAssign::AExt: 1321 ExtType = ISD::EXTLOAD; 1322 break; 1323 } 1324 1325 ArgValue = DAG.getExtLoad( 1326 ExtType, SL, VA.getLocVT(), Chain, FIN, 1327 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 1328 MemVT); 1329 return ArgValue; 1330 } 1331 1332 SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG, 1333 const SIMachineFunctionInfo &MFI, 1334 EVT VT, 1335 AMDGPUFunctionArgInfo::PreloadedValue PVID) const { 1336 const ArgDescriptor *Reg; 1337 const TargetRegisterClass *RC; 1338 1339 std::tie(Reg, RC) = MFI.getPreloadedValue(PVID); 1340 return CreateLiveInRegister(DAG, RC, Reg->getRegister(), VT); 1341 } 1342 1343 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits, 1344 CallingConv::ID CallConv, 1345 ArrayRef<ISD::InputArg> Ins, 1346 BitVector &Skipped, 1347 FunctionType *FType, 1348 SIMachineFunctionInfo *Info) { 1349 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) { 1350 const ISD::InputArg *Arg = &Ins[I]; 1351 1352 assert(!Arg->VT.isVector() && "vector type argument should have been split"); 1353 1354 // First check if it's a PS input addr. 1355 if (CallConv == CallingConv::AMDGPU_PS && 1356 !Arg->Flags.isInReg() && !Arg->Flags.isByVal() && PSInputNum <= 15) { 1357 1358 bool SkipArg = !Arg->Used && !Info->isPSInputAllocated(PSInputNum); 1359 1360 // Inconveniently only the first part of the split is marked as isSplit, 1361 // so skip to the end. We only want to increment PSInputNum once for the 1362 // entire split argument. 1363 if (Arg->Flags.isSplit()) { 1364 while (!Arg->Flags.isSplitEnd()) { 1365 assert(!Arg->VT.isVector() && 1366 "unexpected vector split in ps argument type"); 1367 if (!SkipArg) 1368 Splits.push_back(*Arg); 1369 Arg = &Ins[++I]; 1370 } 1371 } 1372 1373 if (SkipArg) { 1374 // We can safely skip PS inputs. 1375 Skipped.set(Arg->getOrigArgIndex()); 1376 ++PSInputNum; 1377 continue; 1378 } 1379 1380 Info->markPSInputAllocated(PSInputNum); 1381 if (Arg->Used) 1382 Info->markPSInputEnabled(PSInputNum); 1383 1384 ++PSInputNum; 1385 } 1386 1387 Splits.push_back(*Arg); 1388 } 1389 } 1390 1391 // Allocate special inputs passed in VGPRs. 1392 static void allocateSpecialEntryInputVGPRs(CCState &CCInfo, 1393 MachineFunction &MF, 1394 const SIRegisterInfo &TRI, 1395 SIMachineFunctionInfo &Info) { 1396 if (Info.hasWorkItemIDX()) { 1397 unsigned Reg = AMDGPU::VGPR0; 1398 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1399 1400 CCInfo.AllocateReg(Reg); 1401 Info.setWorkItemIDX(ArgDescriptor::createRegister(Reg)); 1402 } 1403 1404 if (Info.hasWorkItemIDY()) { 1405 unsigned Reg = AMDGPU::VGPR1; 1406 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1407 1408 CCInfo.AllocateReg(Reg); 1409 Info.setWorkItemIDY(ArgDescriptor::createRegister(Reg)); 1410 } 1411 1412 if (Info.hasWorkItemIDZ()) { 1413 unsigned Reg = AMDGPU::VGPR2; 1414 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1415 1416 CCInfo.AllocateReg(Reg); 1417 Info.setWorkItemIDZ(ArgDescriptor::createRegister(Reg)); 1418 } 1419 } 1420 1421 // Try to allocate a VGPR at the end of the argument list, or if no argument 1422 // VGPRs are left allocating a stack slot. 1423 static ArgDescriptor allocateVGPR32Input(CCState &CCInfo) { 1424 ArrayRef<MCPhysReg> ArgVGPRs 1425 = makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), 32); 1426 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); 1427 if (RegIdx == ArgVGPRs.size()) { 1428 // Spill to stack required. 1429 int64_t Offset = CCInfo.AllocateStack(4, 4); 1430 1431 return ArgDescriptor::createStack(Offset); 1432 } 1433 1434 unsigned Reg = ArgVGPRs[RegIdx]; 1435 Reg = CCInfo.AllocateReg(Reg); 1436 assert(Reg != AMDGPU::NoRegister); 1437 1438 MachineFunction &MF = CCInfo.getMachineFunction(); 1439 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass); 1440 return ArgDescriptor::createRegister(Reg); 1441 } 1442 1443 static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo, 1444 const TargetRegisterClass *RC, 1445 unsigned NumArgRegs) { 1446 ArrayRef<MCPhysReg> ArgSGPRs = makeArrayRef(RC->begin(), 32); 1447 unsigned RegIdx = CCInfo.getFirstUnallocated(ArgSGPRs); 1448 if (RegIdx == ArgSGPRs.size()) 1449 report_fatal_error("ran out of SGPRs for arguments"); 1450 1451 unsigned Reg = ArgSGPRs[RegIdx]; 1452 Reg = CCInfo.AllocateReg(Reg); 1453 assert(Reg != AMDGPU::NoRegister); 1454 1455 MachineFunction &MF = CCInfo.getMachineFunction(); 1456 MF.addLiveIn(Reg, RC); 1457 return ArgDescriptor::createRegister(Reg); 1458 } 1459 1460 static ArgDescriptor allocateSGPR32Input(CCState &CCInfo) { 1461 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_32RegClass, 32); 1462 } 1463 1464 static ArgDescriptor allocateSGPR64Input(CCState &CCInfo) { 1465 return allocateSGPR32InputImpl(CCInfo, &AMDGPU::SGPR_64RegClass, 16); 1466 } 1467 1468 static void allocateSpecialInputVGPRs(CCState &CCInfo, 1469 MachineFunction &MF, 1470 const SIRegisterInfo &TRI, 1471 SIMachineFunctionInfo &Info) { 1472 if (Info.hasWorkItemIDX()) 1473 Info.setWorkItemIDX(allocateVGPR32Input(CCInfo)); 1474 1475 if (Info.hasWorkItemIDY()) 1476 Info.setWorkItemIDY(allocateVGPR32Input(CCInfo)); 1477 1478 if (Info.hasWorkItemIDZ()) 1479 Info.setWorkItemIDZ(allocateVGPR32Input(CCInfo)); 1480 } 1481 1482 static void allocateSpecialInputSGPRs(CCState &CCInfo, 1483 MachineFunction &MF, 1484 const SIRegisterInfo &TRI, 1485 SIMachineFunctionInfo &Info) { 1486 auto &ArgInfo = Info.getArgInfo(); 1487 1488 // TODO: Unify handling with private memory pointers. 1489 1490 if (Info.hasDispatchPtr()) 1491 ArgInfo.DispatchPtr = allocateSGPR64Input(CCInfo); 1492 1493 if (Info.hasQueuePtr()) 1494 ArgInfo.QueuePtr = allocateSGPR64Input(CCInfo); 1495 1496 if (Info.hasKernargSegmentPtr()) 1497 ArgInfo.KernargSegmentPtr = allocateSGPR64Input(CCInfo); 1498 1499 if (Info.hasDispatchID()) 1500 ArgInfo.DispatchID = allocateSGPR64Input(CCInfo); 1501 1502 // flat_scratch_init is not applicable for non-kernel functions. 1503 1504 if (Info.hasWorkGroupIDX()) 1505 ArgInfo.WorkGroupIDX = allocateSGPR32Input(CCInfo); 1506 1507 if (Info.hasWorkGroupIDY()) 1508 ArgInfo.WorkGroupIDY = allocateSGPR32Input(CCInfo); 1509 1510 if (Info.hasWorkGroupIDZ()) 1511 ArgInfo.WorkGroupIDZ = allocateSGPR32Input(CCInfo); 1512 1513 if (Info.hasImplicitArgPtr()) 1514 ArgInfo.ImplicitArgPtr = allocateSGPR64Input(CCInfo); 1515 } 1516 1517 // Allocate special inputs passed in user SGPRs. 1518 static void allocateHSAUserSGPRs(CCState &CCInfo, 1519 MachineFunction &MF, 1520 const SIRegisterInfo &TRI, 1521 SIMachineFunctionInfo &Info) { 1522 if (Info.hasImplicitBufferPtr()) { 1523 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI); 1524 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 1525 CCInfo.AllocateReg(ImplicitBufferPtrReg); 1526 } 1527 1528 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 1529 if (Info.hasPrivateSegmentBuffer()) { 1530 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 1531 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 1532 CCInfo.AllocateReg(PrivateSegmentBufferReg); 1533 } 1534 1535 if (Info.hasDispatchPtr()) { 1536 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 1537 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 1538 CCInfo.AllocateReg(DispatchPtrReg); 1539 } 1540 1541 if (Info.hasQueuePtr()) { 1542 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 1543 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 1544 CCInfo.AllocateReg(QueuePtrReg); 1545 } 1546 1547 if (Info.hasKernargSegmentPtr()) { 1548 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI); 1549 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass); 1550 CCInfo.AllocateReg(InputPtrReg); 1551 } 1552 1553 if (Info.hasDispatchID()) { 1554 unsigned DispatchIDReg = Info.addDispatchID(TRI); 1555 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 1556 CCInfo.AllocateReg(DispatchIDReg); 1557 } 1558 1559 if (Info.hasFlatScratchInit()) { 1560 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 1561 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 1562 CCInfo.AllocateReg(FlatScratchInitReg); 1563 } 1564 1565 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 1566 // these from the dispatch pointer. 1567 } 1568 1569 // Allocate special input registers that are initialized per-wave. 1570 static void allocateSystemSGPRs(CCState &CCInfo, 1571 MachineFunction &MF, 1572 SIMachineFunctionInfo &Info, 1573 CallingConv::ID CallConv, 1574 bool IsShader) { 1575 if (Info.hasWorkGroupIDX()) { 1576 unsigned Reg = Info.addWorkGroupIDX(); 1577 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1578 CCInfo.AllocateReg(Reg); 1579 } 1580 1581 if (Info.hasWorkGroupIDY()) { 1582 unsigned Reg = Info.addWorkGroupIDY(); 1583 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1584 CCInfo.AllocateReg(Reg); 1585 } 1586 1587 if (Info.hasWorkGroupIDZ()) { 1588 unsigned Reg = Info.addWorkGroupIDZ(); 1589 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1590 CCInfo.AllocateReg(Reg); 1591 } 1592 1593 if (Info.hasWorkGroupInfo()) { 1594 unsigned Reg = Info.addWorkGroupInfo(); 1595 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass); 1596 CCInfo.AllocateReg(Reg); 1597 } 1598 1599 if (Info.hasPrivateSegmentWaveByteOffset()) { 1600 // Scratch wave offset passed in system SGPR. 1601 unsigned PrivateSegmentWaveByteOffsetReg; 1602 1603 if (IsShader) { 1604 PrivateSegmentWaveByteOffsetReg = 1605 Info.getPrivateSegmentWaveByteOffsetSystemSGPR(); 1606 1607 // This is true if the scratch wave byte offset doesn't have a fixed 1608 // location. 1609 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) { 1610 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo); 1611 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg); 1612 } 1613 } else 1614 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset(); 1615 1616 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass); 1617 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg); 1618 } 1619 } 1620 1621 static void reservePrivateMemoryRegs(const TargetMachine &TM, 1622 MachineFunction &MF, 1623 const SIRegisterInfo &TRI, 1624 SIMachineFunctionInfo &Info) { 1625 // Now that we've figured out where the scratch register inputs are, see if 1626 // should reserve the arguments and use them directly. 1627 MachineFrameInfo &MFI = MF.getFrameInfo(); 1628 bool HasStackObjects = MFI.hasStackObjects(); 1629 1630 // Record that we know we have non-spill stack objects so we don't need to 1631 // check all stack objects later. 1632 if (HasStackObjects) 1633 Info.setHasNonSpillStackObjects(true); 1634 1635 // Everything live out of a block is spilled with fast regalloc, so it's 1636 // almost certain that spilling will be required. 1637 if (TM.getOptLevel() == CodeGenOpt::None) 1638 HasStackObjects = true; 1639 1640 // For now assume stack access is needed in any callee functions, so we need 1641 // the scratch registers to pass in. 1642 bool RequiresStackAccess = HasStackObjects || MFI.hasCalls(); 1643 1644 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1645 if (ST.isAmdCodeObjectV2(MF.getFunction())) { 1646 if (RequiresStackAccess) { 1647 // If we have stack objects, we unquestionably need the private buffer 1648 // resource. For the Code Object V2 ABI, this will be the first 4 user 1649 // SGPR inputs. We can reserve those and use them directly. 1650 1651 unsigned PrivateSegmentBufferReg = Info.getPreloadedReg( 1652 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER); 1653 Info.setScratchRSrcReg(PrivateSegmentBufferReg); 1654 1655 if (MFI.hasCalls()) { 1656 // If we have calls, we need to keep the frame register in a register 1657 // that won't be clobbered by a call, so ensure it is copied somewhere. 1658 1659 // This is not a problem for the scratch wave offset, because the same 1660 // registers are reserved in all functions. 1661 1662 // FIXME: Nothing is really ensuring this is a call preserved register, 1663 // it's just selected from the end so it happens to be. 1664 unsigned ReservedOffsetReg 1665 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1666 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1667 } else { 1668 unsigned PrivateSegmentWaveByteOffsetReg = Info.getPreloadedReg( 1669 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1670 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg); 1671 } 1672 } else { 1673 unsigned ReservedBufferReg 1674 = TRI.reservedPrivateSegmentBufferReg(MF); 1675 unsigned ReservedOffsetReg 1676 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1677 1678 // We tentatively reserve the last registers (skipping the last two 1679 // which may contain VCC). After register allocation, we'll replace 1680 // these with the ones immediately after those which were really 1681 // allocated. In the prologue copies will be inserted from the argument 1682 // to these reserved registers. 1683 Info.setScratchRSrcReg(ReservedBufferReg); 1684 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1685 } 1686 } else { 1687 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF); 1688 1689 // Without HSA, relocations are used for the scratch pointer and the 1690 // buffer resource setup is always inserted in the prologue. Scratch wave 1691 // offset is still in an input SGPR. 1692 Info.setScratchRSrcReg(ReservedBufferReg); 1693 1694 if (HasStackObjects && !MFI.hasCalls()) { 1695 unsigned ScratchWaveOffsetReg = Info.getPreloadedReg( 1696 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); 1697 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg); 1698 } else { 1699 unsigned ReservedOffsetReg 1700 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF); 1701 Info.setScratchWaveOffsetReg(ReservedOffsetReg); 1702 } 1703 } 1704 } 1705 1706 bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { 1707 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1708 return !Info->isEntryFunction(); 1709 } 1710 1711 void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 1712 1713 } 1714 1715 void SITargetLowering::insertCopiesSplitCSR( 1716 MachineBasicBlock *Entry, 1717 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 1718 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1719 1720 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 1721 if (!IStart) 1722 return; 1723 1724 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1725 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 1726 MachineBasicBlock::iterator MBBI = Entry->begin(); 1727 for (const MCPhysReg *I = IStart; *I; ++I) { 1728 const TargetRegisterClass *RC = nullptr; 1729 if (AMDGPU::SReg_64RegClass.contains(*I)) 1730 RC = &AMDGPU::SGPR_64RegClass; 1731 else if (AMDGPU::SReg_32RegClass.contains(*I)) 1732 RC = &AMDGPU::SGPR_32RegClass; 1733 else 1734 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 1735 1736 unsigned NewVR = MRI->createVirtualRegister(RC); 1737 // Create copy from CSR to a virtual register. 1738 Entry->addLiveIn(*I); 1739 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 1740 .addReg(*I); 1741 1742 // Insert the copy-back instructions right before the terminator. 1743 for (auto *Exit : Exits) 1744 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 1745 TII->get(TargetOpcode::COPY), *I) 1746 .addReg(NewVR); 1747 } 1748 } 1749 1750 SDValue SITargetLowering::LowerFormalArguments( 1751 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1752 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1753 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1754 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 1755 1756 MachineFunction &MF = DAG.getMachineFunction(); 1757 const Function &Fn = MF.getFunction(); 1758 FunctionType *FType = MF.getFunction().getFunctionType(); 1759 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 1760 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1761 1762 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) { 1763 DiagnosticInfoUnsupported NoGraphicsHSA( 1764 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc()); 1765 DAG.getContext()->diagnose(NoGraphicsHSA); 1766 return DAG.getEntryNode(); 1767 } 1768 1769 // Create stack objects that are used for emitting debugger prologue if 1770 // "amdgpu-debugger-emit-prologue" attribute was specified. 1771 if (ST.debuggerEmitPrologue()) 1772 createDebuggerPrologueStackObjects(MF); 1773 1774 SmallVector<ISD::InputArg, 16> Splits; 1775 SmallVector<CCValAssign, 16> ArgLocs; 1776 BitVector Skipped(Ins.size()); 1777 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1778 *DAG.getContext()); 1779 1780 bool IsShader = AMDGPU::isShader(CallConv); 1781 bool IsKernel = AMDGPU::isKernel(CallConv); 1782 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv); 1783 1784 if (!IsEntryFunc) { 1785 // 4 bytes are reserved at offset 0 for the emergency stack slot. Skip over 1786 // this when allocating argument fixed offsets. 1787 CCInfo.AllocateStack(4, 4); 1788 } 1789 1790 if (IsShader) { 1791 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info); 1792 1793 // At least one interpolation mode must be enabled or else the GPU will 1794 // hang. 1795 // 1796 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 1797 // set PSInputAddr, the user wants to enable some bits after the compilation 1798 // based on run-time states. Since we can't know what the final PSInputEna 1799 // will look like, so we shouldn't do anything here and the user should take 1800 // responsibility for the correct programming. 1801 // 1802 // Otherwise, the following restrictions apply: 1803 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 1804 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 1805 // enabled too. 1806 if (CallConv == CallingConv::AMDGPU_PS) { 1807 if ((Info->getPSInputAddr() & 0x7F) == 0 || 1808 ((Info->getPSInputAddr() & 0xF) == 0 && 1809 Info->isPSInputAllocated(11))) { 1810 CCInfo.AllocateReg(AMDGPU::VGPR0); 1811 CCInfo.AllocateReg(AMDGPU::VGPR1); 1812 Info->markPSInputAllocated(0); 1813 Info->markPSInputEnabled(0); 1814 } 1815 if (Subtarget->isAmdPalOS()) { 1816 // For isAmdPalOS, the user does not enable some bits after compilation 1817 // based on run-time states; the register values being generated here are 1818 // the final ones set in hardware. Therefore we need to apply the 1819 // workaround to PSInputAddr and PSInputEnable together. (The case where 1820 // a bit is set in PSInputAddr but not PSInputEnable is where the 1821 // frontend set up an input arg for a particular interpolation mode, but 1822 // nothing uses that input arg. Really we should have an earlier pass 1823 // that removes such an arg.) 1824 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 1825 if ((PsInputBits & 0x7F) == 0 || 1826 ((PsInputBits & 0xF) == 0 && 1827 (PsInputBits >> 11 & 1))) 1828 Info->markPSInputEnabled( 1829 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 1830 } 1831 } 1832 1833 assert(!Info->hasDispatchPtr() && 1834 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() && 1835 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() && 1836 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() && 1837 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() && 1838 !Info->hasWorkItemIDZ()); 1839 } else if (IsKernel) { 1840 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX()); 1841 } else { 1842 Splits.append(Ins.begin(), Ins.end()); 1843 } 1844 1845 if (IsEntryFunc) { 1846 allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 1847 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info); 1848 } 1849 1850 if (IsKernel) { 1851 analyzeFormalArgumentsCompute(CCInfo, Ins); 1852 } else { 1853 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg); 1854 CCInfo.AnalyzeFormalArguments(Splits, AssignFn); 1855 } 1856 1857 SmallVector<SDValue, 16> Chains; 1858 1859 // FIXME: This is the minimum kernel argument alignment. We should improve 1860 // this to the maximum alignment of the arguments. 1861 // 1862 // FIXME: Alignment of explicit arguments totally broken with non-0 explicit 1863 // kern arg offset. 1864 const unsigned KernelArgBaseAlign = 16; 1865 1866 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { 1867 const ISD::InputArg &Arg = Ins[i]; 1868 if (Arg.isOrigArg() && Skipped[Arg.getOrigArgIndex()]) { 1869 InVals.push_back(DAG.getUNDEF(Arg.VT)); 1870 continue; 1871 } 1872 1873 CCValAssign &VA = ArgLocs[ArgIdx++]; 1874 MVT VT = VA.getLocVT(); 1875 1876 if (IsEntryFunc && VA.isMemLoc()) { 1877 VT = Ins[i].VT; 1878 EVT MemVT = VA.getLocVT(); 1879 1880 const uint64_t Offset = VA.getLocMemOffset(); 1881 unsigned Align = MinAlign(KernelArgBaseAlign, Offset); 1882 1883 SDValue Arg = lowerKernargMemParameter( 1884 DAG, VT, MemVT, DL, Chain, Offset, Align, Ins[i].Flags.isSExt(), &Ins[i]); 1885 Chains.push_back(Arg.getValue(1)); 1886 1887 auto *ParamTy = 1888 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex())); 1889 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 1890 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 1891 // On SI local pointers are just offsets into LDS, so they are always 1892 // less than 16-bits. On CI and newer they could potentially be 1893 // real pointers, so we can't guarantee their size. 1894 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg, 1895 DAG.getValueType(MVT::i16)); 1896 } 1897 1898 InVals.push_back(Arg); 1899 continue; 1900 } else if (!IsEntryFunc && VA.isMemLoc()) { 1901 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg); 1902 InVals.push_back(Val); 1903 if (!Arg.Flags.isByVal()) 1904 Chains.push_back(Val.getValue(1)); 1905 continue; 1906 } 1907 1908 assert(VA.isRegLoc() && "Parameter must be in a register!"); 1909 1910 unsigned Reg = VA.getLocReg(); 1911 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT); 1912 EVT ValVT = VA.getValVT(); 1913 1914 Reg = MF.addLiveIn(Reg, RC); 1915 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1916 1917 if (Arg.Flags.isSRet() && !getSubtarget()->enableHugePrivateBuffer()) { 1918 // The return object should be reasonably addressable. 1919 1920 // FIXME: This helps when the return is a real sret. If it is a 1921 // automatically inserted sret (i.e. CanLowerReturn returns false), an 1922 // extra copy is inserted in SelectionDAGBuilder which obscures this. 1923 unsigned NumBits = 32 - AssumeFrameIndexHighZeroBits; 1924 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 1925 DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), NumBits))); 1926 } 1927 1928 // If this is an 8 or 16-bit value, it is really passed promoted 1929 // to 32 bits. Insert an assert[sz]ext to capture this, then 1930 // truncate to the right size. 1931 switch (VA.getLocInfo()) { 1932 case CCValAssign::Full: 1933 break; 1934 case CCValAssign::BCvt: 1935 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val); 1936 break; 1937 case CCValAssign::SExt: 1938 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val, 1939 DAG.getValueType(ValVT)); 1940 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1941 break; 1942 case CCValAssign::ZExt: 1943 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val, 1944 DAG.getValueType(ValVT)); 1945 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1946 break; 1947 case CCValAssign::AExt: 1948 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val); 1949 break; 1950 default: 1951 llvm_unreachable("Unknown loc info!"); 1952 } 1953 1954 if (IsShader && Arg.VT.isVector()) { 1955 // Build a vector from the registers 1956 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex()); 1957 unsigned NumElements = ParamType->getVectorNumElements(); 1958 1959 SmallVector<SDValue, 4> Regs; 1960 Regs.push_back(Val); 1961 for (unsigned j = 1; j != NumElements; ++j) { 1962 Reg = ArgLocs[ArgIdx++].getLocReg(); 1963 Reg = MF.addLiveIn(Reg, RC); 1964 1965 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1966 Regs.push_back(Copy); 1967 } 1968 1969 // Fill up the missing vector elements 1970 NumElements = Arg.VT.getVectorNumElements() - NumElements; 1971 Regs.append(NumElements, DAG.getUNDEF(VT)); 1972 1973 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs)); 1974 continue; 1975 } 1976 1977 InVals.push_back(Val); 1978 } 1979 1980 if (!IsEntryFunc) { 1981 // Special inputs come after user arguments. 1982 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 1983 } 1984 1985 // Start adding system SGPRs. 1986 if (IsEntryFunc) { 1987 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader); 1988 } else { 1989 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 1990 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 1991 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 1992 allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 1993 } 1994 1995 auto &ArgUsageInfo = 1996 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 1997 ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); 1998 1999 unsigned StackArgSize = CCInfo.getNextStackOffset(); 2000 Info->setBytesInStackArgArea(StackArgSize); 2001 2002 return Chains.empty() ? Chain : 2003 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 2004 } 2005 2006 // TODO: If return values can't fit in registers, we should return as many as 2007 // possible in registers before passing on stack. 2008 bool SITargetLowering::CanLowerReturn( 2009 CallingConv::ID CallConv, 2010 MachineFunction &MF, bool IsVarArg, 2011 const SmallVectorImpl<ISD::OutputArg> &Outs, 2012 LLVMContext &Context) const { 2013 // Replacing returns with sret/stack usage doesn't make sense for shaders. 2014 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn 2015 // for shaders. Vector types should be explicitly handled by CC. 2016 if (AMDGPU::isEntryFunctionCC(CallConv)) 2017 return true; 2018 2019 SmallVector<CCValAssign, 16> RVLocs; 2020 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2021 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg)); 2022 } 2023 2024 SDValue 2025 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2026 bool isVarArg, 2027 const SmallVectorImpl<ISD::OutputArg> &Outs, 2028 const SmallVectorImpl<SDValue> &OutVals, 2029 const SDLoc &DL, SelectionDAG &DAG) const { 2030 MachineFunction &MF = DAG.getMachineFunction(); 2031 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2032 2033 if (AMDGPU::isKernel(CallConv)) { 2034 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs, 2035 OutVals, DL, DAG); 2036 } 2037 2038 bool IsShader = AMDGPU::isShader(CallConv); 2039 2040 Info->setIfReturnsVoid(Outs.size() == 0); 2041 bool IsWaveEnd = Info->returnsVoid() && IsShader; 2042 2043 SmallVector<ISD::OutputArg, 48> Splits; 2044 SmallVector<SDValue, 48> SplitVals; 2045 2046 // Split vectors into their elements. 2047 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2048 const ISD::OutputArg &Out = Outs[i]; 2049 2050 if (IsShader && Out.VT.isVector()) { 2051 MVT VT = Out.VT.getVectorElementType(); 2052 ISD::OutputArg NewOut = Out; 2053 NewOut.Flags.setSplit(); 2054 NewOut.VT = VT; 2055 2056 // We want the original number of vector elements here, e.g. 2057 // three or five, not four or eight. 2058 unsigned NumElements = Out.ArgVT.getVectorNumElements(); 2059 2060 for (unsigned j = 0; j != NumElements; ++j) { 2061 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i], 2062 DAG.getConstant(j, DL, MVT::i32)); 2063 SplitVals.push_back(Elem); 2064 Splits.push_back(NewOut); 2065 NewOut.PartOffset += NewOut.VT.getStoreSize(); 2066 } 2067 } else { 2068 SplitVals.push_back(OutVals[i]); 2069 Splits.push_back(Out); 2070 } 2071 } 2072 2073 // CCValAssign - represent the assignment of the return value to a location. 2074 SmallVector<CCValAssign, 48> RVLocs; 2075 2076 // CCState - Info about the registers and stack slots. 2077 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 2078 *DAG.getContext()); 2079 2080 // Analyze outgoing return values. 2081 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg)); 2082 2083 SDValue Flag; 2084 SmallVector<SDValue, 48> RetOps; 2085 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2086 2087 // Add return address for callable functions. 2088 if (!Info->isEntryFunction()) { 2089 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2090 SDValue ReturnAddrReg = CreateLiveInRegister( 2091 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2092 2093 // FIXME: Should be able to use a vreg here, but need a way to prevent it 2094 // from being allcoated to a CSR. 2095 2096 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2097 MVT::i64); 2098 2099 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag); 2100 Flag = Chain.getValue(1); 2101 2102 RetOps.push_back(PhysReturnAddrReg); 2103 } 2104 2105 // Copy the result values into the output registers. 2106 for (unsigned i = 0, realRVLocIdx = 0; 2107 i != RVLocs.size(); 2108 ++i, ++realRVLocIdx) { 2109 CCValAssign &VA = RVLocs[i]; 2110 assert(VA.isRegLoc() && "Can only return in registers!"); 2111 // TODO: Partially return in registers if return values don't fit. 2112 2113 SDValue Arg = SplitVals[realRVLocIdx]; 2114 2115 // Copied from other backends. 2116 switch (VA.getLocInfo()) { 2117 case CCValAssign::Full: 2118 break; 2119 case CCValAssign::BCvt: 2120 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2121 break; 2122 case CCValAssign::SExt: 2123 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2124 break; 2125 case CCValAssign::ZExt: 2126 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2127 break; 2128 case CCValAssign::AExt: 2129 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2130 break; 2131 default: 2132 llvm_unreachable("Unknown loc info!"); 2133 } 2134 2135 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 2136 Flag = Chain.getValue(1); 2137 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2138 } 2139 2140 // FIXME: Does sret work properly? 2141 if (!Info->isEntryFunction()) { 2142 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2143 const MCPhysReg *I = 2144 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 2145 if (I) { 2146 for (; *I; ++I) { 2147 if (AMDGPU::SReg_64RegClass.contains(*I)) 2148 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 2149 else if (AMDGPU::SReg_32RegClass.contains(*I)) 2150 RetOps.push_back(DAG.getRegister(*I, MVT::i32)); 2151 else 2152 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 2153 } 2154 } 2155 } 2156 2157 // Update chain and glue. 2158 RetOps[0] = Chain; 2159 if (Flag.getNode()) 2160 RetOps.push_back(Flag); 2161 2162 unsigned Opc = AMDGPUISD::ENDPGM; 2163 if (!IsWaveEnd) 2164 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG; 2165 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 2166 } 2167 2168 SDValue SITargetLowering::LowerCallResult( 2169 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 2170 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2171 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn, 2172 SDValue ThisVal) const { 2173 CCAssignFn *RetCC = CCAssignFnForReturn(CallConv, IsVarArg); 2174 2175 // Assign locations to each value returned by this call. 2176 SmallVector<CCValAssign, 16> RVLocs; 2177 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2178 *DAG.getContext()); 2179 CCInfo.AnalyzeCallResult(Ins, RetCC); 2180 2181 // Copy all of the result registers out of their specified physreg. 2182 for (unsigned i = 0; i != RVLocs.size(); ++i) { 2183 CCValAssign VA = RVLocs[i]; 2184 SDValue Val; 2185 2186 if (VA.isRegLoc()) { 2187 Val = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 2188 Chain = Val.getValue(1); 2189 InFlag = Val.getValue(2); 2190 } else if (VA.isMemLoc()) { 2191 report_fatal_error("TODO: return values in memory"); 2192 } else 2193 llvm_unreachable("unknown argument location type"); 2194 2195 switch (VA.getLocInfo()) { 2196 case CCValAssign::Full: 2197 break; 2198 case CCValAssign::BCvt: 2199 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2200 break; 2201 case CCValAssign::ZExt: 2202 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, 2203 DAG.getValueType(VA.getValVT())); 2204 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2205 break; 2206 case CCValAssign::SExt: 2207 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, 2208 DAG.getValueType(VA.getValVT())); 2209 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2210 break; 2211 case CCValAssign::AExt: 2212 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); 2213 break; 2214 default: 2215 llvm_unreachable("Unknown loc info!"); 2216 } 2217 2218 InVals.push_back(Val); 2219 } 2220 2221 return Chain; 2222 } 2223 2224 // Add code to pass special inputs required depending on used features separate 2225 // from the explicit user arguments present in the IR. 2226 void SITargetLowering::passSpecialInputs( 2227 CallLoweringInfo &CLI, 2228 const SIMachineFunctionInfo &Info, 2229 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, 2230 SmallVectorImpl<SDValue> &MemOpChains, 2231 SDValue Chain, 2232 SDValue StackPtr) const { 2233 // If we don't have a call site, this was a call inserted by 2234 // legalization. These can never use special inputs. 2235 if (!CLI.CS) 2236 return; 2237 2238 const Function *CalleeFunc = CLI.CS.getCalledFunction(); 2239 assert(CalleeFunc); 2240 2241 SelectionDAG &DAG = CLI.DAG; 2242 const SDLoc &DL = CLI.DL; 2243 2244 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 2245 2246 auto &ArgUsageInfo = 2247 DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>(); 2248 const AMDGPUFunctionArgInfo &CalleeArgInfo 2249 = ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc); 2250 2251 const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo(); 2252 2253 // TODO: Unify with private memory register handling. This is complicated by 2254 // the fact that at least in kernels, the input argument is not necessarily 2255 // in the same location as the input. 2256 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 2257 AMDGPUFunctionArgInfo::DISPATCH_PTR, 2258 AMDGPUFunctionArgInfo::QUEUE_PTR, 2259 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR, 2260 AMDGPUFunctionArgInfo::DISPATCH_ID, 2261 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 2262 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 2263 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z, 2264 AMDGPUFunctionArgInfo::WORKITEM_ID_X, 2265 AMDGPUFunctionArgInfo::WORKITEM_ID_Y, 2266 AMDGPUFunctionArgInfo::WORKITEM_ID_Z, 2267 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR 2268 }; 2269 2270 for (auto InputID : InputRegs) { 2271 const ArgDescriptor *OutgoingArg; 2272 const TargetRegisterClass *ArgRC; 2273 2274 std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID); 2275 if (!OutgoingArg) 2276 continue; 2277 2278 const ArgDescriptor *IncomingArg; 2279 const TargetRegisterClass *IncomingArgRC; 2280 std::tie(IncomingArg, IncomingArgRC) 2281 = CallerArgInfo.getPreloadedValue(InputID); 2282 assert(IncomingArgRC == ArgRC); 2283 2284 // All special arguments are ints for now. 2285 EVT ArgVT = TRI->getSpillSize(*ArgRC) == 8 ? MVT::i64 : MVT::i32; 2286 SDValue InputReg; 2287 2288 if (IncomingArg) { 2289 InputReg = loadInputValue(DAG, ArgRC, ArgVT, DL, *IncomingArg); 2290 } else { 2291 // The implicit arg ptr is special because it doesn't have a corresponding 2292 // input for kernels, and is computed from the kernarg segment pointer. 2293 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 2294 InputReg = getImplicitArgPtr(DAG, DL); 2295 } 2296 2297 if (OutgoingArg->isRegister()) { 2298 RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); 2299 } else { 2300 SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, StackPtr, 2301 InputReg, 2302 OutgoingArg->getStackOffset()); 2303 MemOpChains.push_back(ArgStore); 2304 } 2305 } 2306 } 2307 2308 static bool canGuaranteeTCO(CallingConv::ID CC) { 2309 return CC == CallingConv::Fast; 2310 } 2311 2312 /// Return true if we might ever do TCO for calls with this calling convention. 2313 static bool mayTailCallThisCC(CallingConv::ID CC) { 2314 switch (CC) { 2315 case CallingConv::C: 2316 return true; 2317 default: 2318 return canGuaranteeTCO(CC); 2319 } 2320 } 2321 2322 bool SITargetLowering::isEligibleForTailCallOptimization( 2323 SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg, 2324 const SmallVectorImpl<ISD::OutputArg> &Outs, 2325 const SmallVectorImpl<SDValue> &OutVals, 2326 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 2327 if (!mayTailCallThisCC(CalleeCC)) 2328 return false; 2329 2330 MachineFunction &MF = DAG.getMachineFunction(); 2331 const Function &CallerF = MF.getFunction(); 2332 CallingConv::ID CallerCC = CallerF.getCallingConv(); 2333 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2334 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2335 2336 // Kernels aren't callable, and don't have a live in return address so it 2337 // doesn't make sense to do a tail call with entry functions. 2338 if (!CallerPreserved) 2339 return false; 2340 2341 bool CCMatch = CallerCC == CalleeCC; 2342 2343 if (DAG.getTarget().Options.GuaranteedTailCallOpt) { 2344 if (canGuaranteeTCO(CalleeCC) && CCMatch) 2345 return true; 2346 return false; 2347 } 2348 2349 // TODO: Can we handle var args? 2350 if (IsVarArg) 2351 return false; 2352 2353 for (const Argument &Arg : CallerF.args()) { 2354 if (Arg.hasByValAttr()) 2355 return false; 2356 } 2357 2358 LLVMContext &Ctx = *DAG.getContext(); 2359 2360 // Check that the call results are passed in the same way. 2361 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, Ctx, Ins, 2362 CCAssignFnForCall(CalleeCC, IsVarArg), 2363 CCAssignFnForCall(CallerCC, IsVarArg))) 2364 return false; 2365 2366 // The callee has to preserve all registers the caller needs to preserve. 2367 if (!CCMatch) { 2368 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2369 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2370 return false; 2371 } 2372 2373 // Nothing more to check if the callee is taking no arguments. 2374 if (Outs.empty()) 2375 return true; 2376 2377 SmallVector<CCValAssign, 16> ArgLocs; 2378 CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, Ctx); 2379 2380 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, IsVarArg)); 2381 2382 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 2383 // If the stack arguments for this call do not fit into our own save area then 2384 // the call cannot be made tail. 2385 // TODO: Is this really necessary? 2386 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 2387 return false; 2388 2389 const MachineRegisterInfo &MRI = MF.getRegInfo(); 2390 return parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals); 2391 } 2392 2393 bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2394 if (!CI->isTailCall()) 2395 return false; 2396 2397 const Function *ParentFn = CI->getParent()->getParent(); 2398 if (AMDGPU::isEntryFunctionCC(ParentFn->getCallingConv())) 2399 return false; 2400 2401 auto Attr = ParentFn->getFnAttribute("disable-tail-calls"); 2402 return (Attr.getValueAsString() != "true"); 2403 } 2404 2405 // The wave scratch offset register is used as the global base pointer. 2406 SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI, 2407 SmallVectorImpl<SDValue> &InVals) const { 2408 SelectionDAG &DAG = CLI.DAG; 2409 const SDLoc &DL = CLI.DL; 2410 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 2411 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 2412 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 2413 SDValue Chain = CLI.Chain; 2414 SDValue Callee = CLI.Callee; 2415 bool &IsTailCall = CLI.IsTailCall; 2416 CallingConv::ID CallConv = CLI.CallConv; 2417 bool IsVarArg = CLI.IsVarArg; 2418 bool IsSibCall = false; 2419 bool IsThisReturn = false; 2420 MachineFunction &MF = DAG.getMachineFunction(); 2421 2422 if (IsVarArg) { 2423 return lowerUnhandledCall(CLI, InVals, 2424 "unsupported call to variadic function "); 2425 } 2426 2427 if (!CLI.CS.getCalledFunction()) { 2428 return lowerUnhandledCall(CLI, InVals, 2429 "unsupported indirect call to function "); 2430 } 2431 2432 if (IsTailCall && MF.getTarget().Options.GuaranteedTailCallOpt) { 2433 return lowerUnhandledCall(CLI, InVals, 2434 "unsupported required tail call to function "); 2435 } 2436 2437 if (AMDGPU::isShader(MF.getFunction().getCallingConv())) { 2438 // Note the issue is with the CC of the calling function, not of the call 2439 // itself. 2440 return lowerUnhandledCall(CLI, InVals, 2441 "unsupported call from graphics shader of function "); 2442 } 2443 2444 // The first 4 bytes are reserved for the callee's emergency stack slot. 2445 const unsigned CalleeUsableStackOffset = 4; 2446 2447 if (IsTailCall) { 2448 IsTailCall = isEligibleForTailCallOptimization( 2449 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 2450 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) { 2451 report_fatal_error("failed to perform tail call elimination on a call " 2452 "site marked musttail"); 2453 } 2454 2455 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 2456 2457 // A sibling call is one where we're under the usual C ABI and not planning 2458 // to change that but can still do a tail call: 2459 if (!TailCallOpt && IsTailCall) 2460 IsSibCall = true; 2461 2462 if (IsTailCall) 2463 ++NumTailCalls; 2464 } 2465 2466 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee)) { 2467 // FIXME: Remove this hack for function pointer types after removing 2468 // support of old address space mapping. In the new address space 2469 // mapping the pointer in default address space is 64 bit, therefore 2470 // does not need this hack. 2471 if (Callee.getValueType() == MVT::i32) { 2472 const GlobalValue *GV = GA->getGlobal(); 2473 Callee = DAG.getGlobalAddress(GV, DL, MVT::i64, GA->getOffset(), false, 2474 GA->getTargetFlags()); 2475 } 2476 } 2477 assert(Callee.getValueType() == MVT::i64); 2478 2479 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 2480 2481 // Analyze operands of the call, assigning locations to each operand. 2482 SmallVector<CCValAssign, 16> ArgLocs; 2483 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2484 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, IsVarArg); 2485 CCInfo.AnalyzeCallOperands(Outs, AssignFn); 2486 2487 // Get a count of how many bytes are to be pushed on the stack. 2488 unsigned NumBytes = CCInfo.getNextStackOffset(); 2489 2490 if (IsSibCall) { 2491 // Since we're not changing the ABI to make this a tail call, the memory 2492 // operands are already available in the caller's incoming argument space. 2493 NumBytes = 0; 2494 } 2495 2496 // FPDiff is the byte offset of the call's argument area from the callee's. 2497 // Stores to callee stack arguments will be placed in FixedStackSlots offset 2498 // by this amount for a tail call. In a sibling call it must be 0 because the 2499 // caller will deallocate the entire stack and the callee still expects its 2500 // arguments to begin at SP+0. Completely unused for non-tail calls. 2501 int32_t FPDiff = 0; 2502 MachineFrameInfo &MFI = MF.getFrameInfo(); 2503 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 2504 2505 SDValue CallerSavedFP; 2506 2507 // Adjust the stack pointer for the new arguments... 2508 // These operations are automatically eliminated by the prolog/epilog pass 2509 if (!IsSibCall) { 2510 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); 2511 2512 unsigned OffsetReg = Info->getScratchWaveOffsetReg(); 2513 2514 // In the HSA case, this should be an identity copy. 2515 SDValue ScratchRSrcReg 2516 = DAG.getCopyFromReg(Chain, DL, Info->getScratchRSrcReg(), MVT::v4i32); 2517 RegsToPass.emplace_back(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 2518 2519 // TODO: Don't hardcode these registers and get from the callee function. 2520 SDValue ScratchWaveOffsetReg 2521 = DAG.getCopyFromReg(Chain, DL, OffsetReg, MVT::i32); 2522 RegsToPass.emplace_back(AMDGPU::SGPR4, ScratchWaveOffsetReg); 2523 2524 if (!Info->isEntryFunction()) { 2525 // Avoid clobbering this function's FP value. In the current convention 2526 // callee will overwrite this, so do save/restore around the call site. 2527 CallerSavedFP = DAG.getCopyFromReg(Chain, DL, 2528 Info->getFrameOffsetReg(), MVT::i32); 2529 } 2530 } 2531 2532 // Stack pointer relative accesses are done by changing the offset SGPR. This 2533 // is just the VGPR offset component. 2534 SDValue StackPtr = DAG.getConstant(CalleeUsableStackOffset, DL, MVT::i32); 2535 2536 SmallVector<SDValue, 8> MemOpChains; 2537 MVT PtrVT = MVT::i32; 2538 2539 // Walk the register/memloc assignments, inserting copies/loads. 2540 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; 2541 ++i, ++realArgIdx) { 2542 CCValAssign &VA = ArgLocs[i]; 2543 SDValue Arg = OutVals[realArgIdx]; 2544 2545 // Promote the value if needed. 2546 switch (VA.getLocInfo()) { 2547 case CCValAssign::Full: 2548 break; 2549 case CCValAssign::BCvt: 2550 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 2551 break; 2552 case CCValAssign::ZExt: 2553 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 2554 break; 2555 case CCValAssign::SExt: 2556 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 2557 break; 2558 case CCValAssign::AExt: 2559 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 2560 break; 2561 case CCValAssign::FPExt: 2562 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 2563 break; 2564 default: 2565 llvm_unreachable("Unknown loc info!"); 2566 } 2567 2568 if (VA.isRegLoc()) { 2569 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 2570 } else { 2571 assert(VA.isMemLoc()); 2572 2573 SDValue DstAddr; 2574 MachinePointerInfo DstInfo; 2575 2576 unsigned LocMemOffset = VA.getLocMemOffset(); 2577 int32_t Offset = LocMemOffset; 2578 2579 SDValue PtrOff = DAG.getObjectPtrOffset(DL, StackPtr, Offset); 2580 2581 if (IsTailCall) { 2582 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2583 unsigned OpSize = Flags.isByVal() ? 2584 Flags.getByValSize() : VA.getValVT().getStoreSize(); 2585 2586 Offset = Offset + FPDiff; 2587 int FI = MFI.CreateFixedObject(OpSize, Offset, true); 2588 2589 DstAddr = DAG.getObjectPtrOffset(DL, DAG.getFrameIndex(FI, PtrVT), 2590 StackPtr); 2591 DstInfo = MachinePointerInfo::getFixedStack(MF, FI); 2592 2593 // Make sure any stack arguments overlapping with where we're storing 2594 // are loaded before this eventual operation. Otherwise they'll be 2595 // clobbered. 2596 2597 // FIXME: Why is this really necessary? This seems to just result in a 2598 // lot of code to copy the stack and write them back to the same 2599 // locations, which are supposed to be immutable? 2600 Chain = addTokenForArgument(Chain, DAG, MFI, FI); 2601 } else { 2602 DstAddr = PtrOff; 2603 DstInfo = MachinePointerInfo::getStack(MF, LocMemOffset); 2604 } 2605 2606 if (Outs[i].Flags.isByVal()) { 2607 SDValue SizeNode = 2608 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i32); 2609 SDValue Cpy = DAG.getMemcpy( 2610 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), 2611 /*isVol = */ false, /*AlwaysInline = */ true, 2612 /*isTailCall = */ false, DstInfo, 2613 MachinePointerInfo(UndefValue::get(Type::getInt8PtrTy( 2614 *DAG.getContext(), AMDGPUASI.PRIVATE_ADDRESS)))); 2615 2616 MemOpChains.push_back(Cpy); 2617 } else { 2618 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); 2619 MemOpChains.push_back(Store); 2620 } 2621 } 2622 } 2623 2624 // Copy special input registers after user input arguments. 2625 passSpecialInputs(CLI, *Info, RegsToPass, MemOpChains, Chain, StackPtr); 2626 2627 if (!MemOpChains.empty()) 2628 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2629 2630 // Build a sequence of copy-to-reg nodes chained together with token chain 2631 // and flag operands which copy the outgoing args into the appropriate regs. 2632 SDValue InFlag; 2633 for (auto &RegToPass : RegsToPass) { 2634 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 2635 RegToPass.second, InFlag); 2636 InFlag = Chain.getValue(1); 2637 } 2638 2639 2640 SDValue PhysReturnAddrReg; 2641 if (IsTailCall) { 2642 // Since the return is being combined with the call, we need to pass on the 2643 // return address. 2644 2645 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); 2646 SDValue ReturnAddrReg = CreateLiveInRegister( 2647 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64); 2648 2649 PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF), 2650 MVT::i64); 2651 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag); 2652 InFlag = Chain.getValue(1); 2653 } 2654 2655 // We don't usually want to end the call-sequence here because we would tidy 2656 // the frame up *after* the call, however in the ABI-changing tail-call case 2657 // we've carefully laid out the parameters so that when sp is reset they'll be 2658 // in the correct location. 2659 if (IsTailCall && !IsSibCall) { 2660 Chain = DAG.getCALLSEQ_END(Chain, 2661 DAG.getTargetConstant(NumBytes, DL, MVT::i32), 2662 DAG.getTargetConstant(0, DL, MVT::i32), 2663 InFlag, DL); 2664 InFlag = Chain.getValue(1); 2665 } 2666 2667 std::vector<SDValue> Ops; 2668 Ops.push_back(Chain); 2669 Ops.push_back(Callee); 2670 2671 if (IsTailCall) { 2672 // Each tail call may have to adjust the stack by a different amount, so 2673 // this information must travel along with the operation for eventual 2674 // consumption by emitEpilogue. 2675 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 2676 2677 Ops.push_back(PhysReturnAddrReg); 2678 } 2679 2680 // Add argument registers to the end of the list so that they are known live 2681 // into the call. 2682 for (auto &RegToPass : RegsToPass) { 2683 Ops.push_back(DAG.getRegister(RegToPass.first, 2684 RegToPass.second.getValueType())); 2685 } 2686 2687 // Add a register mask operand representing the call-preserved registers. 2688 2689 auto *TRI = static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo()); 2690 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2691 assert(Mask && "Missing call preserved mask for calling convention"); 2692 Ops.push_back(DAG.getRegisterMask(Mask)); 2693 2694 if (InFlag.getNode()) 2695 Ops.push_back(InFlag); 2696 2697 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2698 2699 // If we're doing a tall call, use a TC_RETURN here rather than an 2700 // actual call instruction. 2701 if (IsTailCall) { 2702 MFI.setHasTailCall(); 2703 return DAG.getNode(AMDGPUISD::TC_RETURN, DL, NodeTys, Ops); 2704 } 2705 2706 // Returns a chain and a flag for retval copy to use. 2707 SDValue Call = DAG.getNode(AMDGPUISD::CALL, DL, NodeTys, Ops); 2708 Chain = Call.getValue(0); 2709 InFlag = Call.getValue(1); 2710 2711 if (CallerSavedFP) { 2712 SDValue FPReg = DAG.getRegister(Info->getFrameOffsetReg(), MVT::i32); 2713 Chain = DAG.getCopyToReg(Chain, DL, FPReg, CallerSavedFP, InFlag); 2714 InFlag = Chain.getValue(1); 2715 } 2716 2717 uint64_t CalleePopBytes = NumBytes; 2718 Chain = DAG.getCALLSEQ_END(Chain, DAG.getTargetConstant(0, DL, MVT::i32), 2719 DAG.getTargetConstant(CalleePopBytes, DL, MVT::i32), 2720 InFlag, DL); 2721 if (!Ins.empty()) 2722 InFlag = Chain.getValue(1); 2723 2724 // Handle result values, copying them out of physregs into vregs that we 2725 // return. 2726 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 2727 InVals, IsThisReturn, 2728 IsThisReturn ? OutVals[0] : SDValue()); 2729 } 2730 2731 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT, 2732 SelectionDAG &DAG) const { 2733 unsigned Reg = StringSwitch<unsigned>(RegName) 2734 .Case("m0", AMDGPU::M0) 2735 .Case("exec", AMDGPU::EXEC) 2736 .Case("exec_lo", AMDGPU::EXEC_LO) 2737 .Case("exec_hi", AMDGPU::EXEC_HI) 2738 .Case("flat_scratch", AMDGPU::FLAT_SCR) 2739 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO) 2740 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI) 2741 .Default(AMDGPU::NoRegister); 2742 2743 if (Reg == AMDGPU::NoRegister) { 2744 report_fatal_error(Twine("invalid register name \"" 2745 + StringRef(RegName) + "\".")); 2746 2747 } 2748 2749 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && 2750 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) { 2751 report_fatal_error(Twine("invalid register \"" 2752 + StringRef(RegName) + "\" for subtarget.")); 2753 } 2754 2755 switch (Reg) { 2756 case AMDGPU::M0: 2757 case AMDGPU::EXEC_LO: 2758 case AMDGPU::EXEC_HI: 2759 case AMDGPU::FLAT_SCR_LO: 2760 case AMDGPU::FLAT_SCR_HI: 2761 if (VT.getSizeInBits() == 32) 2762 return Reg; 2763 break; 2764 case AMDGPU::EXEC: 2765 case AMDGPU::FLAT_SCR: 2766 if (VT.getSizeInBits() == 64) 2767 return Reg; 2768 break; 2769 default: 2770 llvm_unreachable("missing register type checking"); 2771 } 2772 2773 report_fatal_error(Twine("invalid type for register \"" 2774 + StringRef(RegName) + "\".")); 2775 } 2776 2777 // If kill is not the last instruction, split the block so kill is always a 2778 // proper terminator. 2779 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI, 2780 MachineBasicBlock *BB) const { 2781 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 2782 2783 MachineBasicBlock::iterator SplitPoint(&MI); 2784 ++SplitPoint; 2785 2786 if (SplitPoint == BB->end()) { 2787 // Don't bother with a new block. 2788 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2789 return BB; 2790 } 2791 2792 MachineFunction *MF = BB->getParent(); 2793 MachineBasicBlock *SplitBB 2794 = MF->CreateMachineBasicBlock(BB->getBasicBlock()); 2795 2796 MF->insert(++MachineFunction::iterator(BB), SplitBB); 2797 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end()); 2798 2799 SplitBB->transferSuccessorsAndUpdatePHIs(BB); 2800 BB->addSuccessor(SplitBB); 2801 2802 MI.setDesc(TII->getKillTerminatorFromPseudo(MI.getOpcode())); 2803 return SplitBB; 2804 } 2805 2806 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the 2807 // wavefront. If the value is uniform and just happens to be in a VGPR, this 2808 // will only do one iteration. In the worst case, this will loop 64 times. 2809 // 2810 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value. 2811 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop( 2812 const SIInstrInfo *TII, 2813 MachineRegisterInfo &MRI, 2814 MachineBasicBlock &OrigBB, 2815 MachineBasicBlock &LoopBB, 2816 const DebugLoc &DL, 2817 const MachineOperand &IdxReg, 2818 unsigned InitReg, 2819 unsigned ResultReg, 2820 unsigned PhiReg, 2821 unsigned InitSaveExecReg, 2822 int Offset, 2823 bool UseGPRIdxMode, 2824 bool IsIndirectSrc) { 2825 MachineBasicBlock::iterator I = LoopBB.begin(); 2826 2827 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2828 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2829 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2830 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2831 2832 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg) 2833 .addReg(InitReg) 2834 .addMBB(&OrigBB) 2835 .addReg(ResultReg) 2836 .addMBB(&LoopBB); 2837 2838 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec) 2839 .addReg(InitSaveExecReg) 2840 .addMBB(&OrigBB) 2841 .addReg(NewExec) 2842 .addMBB(&LoopBB); 2843 2844 // Read the next variant <- also loop target. 2845 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg) 2846 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef())); 2847 2848 // Compare the just read M0 value to all possible Idx values. 2849 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg) 2850 .addReg(CurrentIdxReg) 2851 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg()); 2852 2853 // Update EXEC, save the original EXEC value to VCC. 2854 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec) 2855 .addReg(CondReg, RegState::Kill); 2856 2857 MRI.setSimpleHint(NewExec, CondReg); 2858 2859 if (UseGPRIdxMode) { 2860 unsigned IdxReg; 2861 if (Offset == 0) { 2862 IdxReg = CurrentIdxReg; 2863 } else { 2864 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2865 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg) 2866 .addReg(CurrentIdxReg, RegState::Kill) 2867 .addImm(Offset); 2868 } 2869 unsigned IdxMode = IsIndirectSrc ? 2870 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 2871 MachineInstr *SetOn = 2872 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 2873 .addReg(IdxReg, RegState::Kill) 2874 .addImm(IdxMode); 2875 SetOn->getOperand(3).setIsUndef(); 2876 } else { 2877 // Move index from VCC into M0 2878 if (Offset == 0) { 2879 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2880 .addReg(CurrentIdxReg, RegState::Kill); 2881 } else { 2882 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 2883 .addReg(CurrentIdxReg, RegState::Kill) 2884 .addImm(Offset); 2885 } 2886 } 2887 2888 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 2889 MachineInstr *InsertPt = 2890 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 2891 .addReg(AMDGPU::EXEC) 2892 .addReg(NewExec); 2893 2894 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use 2895 // s_cbranch_scc0? 2896 2897 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover. 2898 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 2899 .addMBB(&LoopBB); 2900 2901 return InsertPt->getIterator(); 2902 } 2903 2904 // This has slightly sub-optimal regalloc when the source vector is killed by 2905 // the read. The register allocator does not understand that the kill is 2906 // per-workitem, so is kept alive for the whole loop so we end up not re-using a 2907 // subregister from it, using 1 more VGPR than necessary. This was saved when 2908 // this was expanded after register allocation. 2909 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII, 2910 MachineBasicBlock &MBB, 2911 MachineInstr &MI, 2912 unsigned InitResultReg, 2913 unsigned PhiReg, 2914 int Offset, 2915 bool UseGPRIdxMode, 2916 bool IsIndirectSrc) { 2917 MachineFunction *MF = MBB.getParent(); 2918 MachineRegisterInfo &MRI = MF->getRegInfo(); 2919 const DebugLoc &DL = MI.getDebugLoc(); 2920 MachineBasicBlock::iterator I(&MI); 2921 2922 unsigned DstReg = MI.getOperand(0).getReg(); 2923 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 2924 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 2925 2926 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec); 2927 2928 // Save the EXEC mask 2929 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec) 2930 .addReg(AMDGPU::EXEC); 2931 2932 // To insert the loop we need to split the block. Move everything after this 2933 // point to a new block, and insert a new empty block between the two. 2934 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock(); 2935 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock(); 2936 MachineFunction::iterator MBBI(MBB); 2937 ++MBBI; 2938 2939 MF->insert(MBBI, LoopBB); 2940 MF->insert(MBBI, RemainderBB); 2941 2942 LoopBB->addSuccessor(LoopBB); 2943 LoopBB->addSuccessor(RemainderBB); 2944 2945 // Move the rest of the block into a new block. 2946 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 2947 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 2948 2949 MBB.addSuccessor(LoopBB); 2950 2951 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 2952 2953 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx, 2954 InitResultReg, DstReg, PhiReg, TmpExec, 2955 Offset, UseGPRIdxMode, IsIndirectSrc); 2956 2957 MachineBasicBlock::iterator First = RemainderBB->begin(); 2958 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 2959 .addReg(SaveExec); 2960 2961 return InsPt; 2962 } 2963 2964 // Returns subreg index, offset 2965 static std::pair<unsigned, int> 2966 computeIndirectRegAndOffset(const SIRegisterInfo &TRI, 2967 const TargetRegisterClass *SuperRC, 2968 unsigned VecReg, 2969 int Offset) { 2970 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32; 2971 2972 // Skip out of bounds offsets, or else we would end up using an undefined 2973 // register. 2974 if (Offset >= NumElts || Offset < 0) 2975 return std::make_pair(AMDGPU::sub0, Offset); 2976 2977 return std::make_pair(AMDGPU::sub0 + Offset, 0); 2978 } 2979 2980 // Return true if the index is an SGPR and was set. 2981 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, 2982 MachineRegisterInfo &MRI, 2983 MachineInstr &MI, 2984 int Offset, 2985 bool UseGPRIdxMode, 2986 bool IsIndirectSrc) { 2987 MachineBasicBlock *MBB = MI.getParent(); 2988 const DebugLoc &DL = MI.getDebugLoc(); 2989 MachineBasicBlock::iterator I(&MI); 2990 2991 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 2992 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg()); 2993 2994 assert(Idx->getReg() != AMDGPU::NoRegister); 2995 2996 if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) 2997 return false; 2998 2999 if (UseGPRIdxMode) { 3000 unsigned IdxMode = IsIndirectSrc ? 3001 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; 3002 if (Offset == 0) { 3003 MachineInstr *SetOn = 3004 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3005 .add(*Idx) 3006 .addImm(IdxMode); 3007 3008 SetOn->getOperand(3).setIsUndef(); 3009 } else { 3010 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3011 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) 3012 .add(*Idx) 3013 .addImm(Offset); 3014 MachineInstr *SetOn = 3015 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) 3016 .addReg(Tmp, RegState::Kill) 3017 .addImm(IdxMode); 3018 3019 SetOn->getOperand(3).setIsUndef(); 3020 } 3021 3022 return true; 3023 } 3024 3025 if (Offset == 0) { 3026 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3027 .add(*Idx); 3028 } else { 3029 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) 3030 .add(*Idx) 3031 .addImm(Offset); 3032 } 3033 3034 return true; 3035 } 3036 3037 // Control flow needs to be inserted if indexing with a VGPR. 3038 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI, 3039 MachineBasicBlock &MBB, 3040 const GCNSubtarget &ST) { 3041 const SIInstrInfo *TII = ST.getInstrInfo(); 3042 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3043 MachineFunction *MF = MBB.getParent(); 3044 MachineRegisterInfo &MRI = MF->getRegInfo(); 3045 3046 unsigned Dst = MI.getOperand(0).getReg(); 3047 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg(); 3048 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3049 3050 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg); 3051 3052 unsigned SubReg; 3053 std::tie(SubReg, Offset) 3054 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset); 3055 3056 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3057 3058 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) { 3059 MachineBasicBlock::iterator I(&MI); 3060 const DebugLoc &DL = MI.getDebugLoc(); 3061 3062 if (UseGPRIdxMode) { 3063 // TODO: Look at the uses to avoid the copy. This may require rescheduling 3064 // to avoid interfering with other uses, so probably requires a new 3065 // optimization pass. 3066 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3067 .addReg(SrcReg, RegState::Undef, SubReg) 3068 .addReg(SrcReg, RegState::Implicit) 3069 .addReg(AMDGPU::M0, RegState::Implicit); 3070 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3071 } else { 3072 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3073 .addReg(SrcReg, RegState::Undef, SubReg) 3074 .addReg(SrcReg, RegState::Implicit); 3075 } 3076 3077 MI.eraseFromParent(); 3078 3079 return &MBB; 3080 } 3081 3082 const DebugLoc &DL = MI.getDebugLoc(); 3083 MachineBasicBlock::iterator I(&MI); 3084 3085 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3086 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3087 3088 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg); 3089 3090 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, 3091 Offset, UseGPRIdxMode, true); 3092 MachineBasicBlock *LoopBB = InsPt->getParent(); 3093 3094 if (UseGPRIdxMode) { 3095 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst) 3096 .addReg(SrcReg, RegState::Undef, SubReg) 3097 .addReg(SrcReg, RegState::Implicit) 3098 .addReg(AMDGPU::M0, RegState::Implicit); 3099 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3100 } else { 3101 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 3102 .addReg(SrcReg, RegState::Undef, SubReg) 3103 .addReg(SrcReg, RegState::Implicit); 3104 } 3105 3106 MI.eraseFromParent(); 3107 3108 return LoopBB; 3109 } 3110 3111 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI, 3112 const TargetRegisterClass *VecRC) { 3113 switch (TRI.getRegSizeInBits(*VecRC)) { 3114 case 32: // 4 bytes 3115 return AMDGPU::V_MOVRELD_B32_V1; 3116 case 64: // 8 bytes 3117 return AMDGPU::V_MOVRELD_B32_V2; 3118 case 128: // 16 bytes 3119 return AMDGPU::V_MOVRELD_B32_V4; 3120 case 256: // 32 bytes 3121 return AMDGPU::V_MOVRELD_B32_V8; 3122 case 512: // 64 bytes 3123 return AMDGPU::V_MOVRELD_B32_V16; 3124 default: 3125 llvm_unreachable("unsupported size for MOVRELD pseudos"); 3126 } 3127 } 3128 3129 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, 3130 MachineBasicBlock &MBB, 3131 const GCNSubtarget &ST) { 3132 const SIInstrInfo *TII = ST.getInstrInfo(); 3133 const SIRegisterInfo &TRI = TII->getRegisterInfo(); 3134 MachineFunction *MF = MBB.getParent(); 3135 MachineRegisterInfo &MRI = MF->getRegInfo(); 3136 3137 unsigned Dst = MI.getOperand(0).getReg(); 3138 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); 3139 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); 3140 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); 3141 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); 3142 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg()); 3143 3144 // This can be an immediate, but will be folded later. 3145 assert(Val->getReg()); 3146 3147 unsigned SubReg; 3148 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, 3149 SrcVec->getReg(), 3150 Offset); 3151 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode); 3152 3153 if (Idx->getReg() == AMDGPU::NoRegister) { 3154 MachineBasicBlock::iterator I(&MI); 3155 const DebugLoc &DL = MI.getDebugLoc(); 3156 3157 assert(Offset == 0); 3158 3159 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) 3160 .add(*SrcVec) 3161 .add(*Val) 3162 .addImm(SubReg); 3163 3164 MI.eraseFromParent(); 3165 return &MBB; 3166 } 3167 3168 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) { 3169 MachineBasicBlock::iterator I(&MI); 3170 const DebugLoc &DL = MI.getDebugLoc(); 3171 3172 if (UseGPRIdxMode) { 3173 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3174 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst 3175 .add(*Val) 3176 .addReg(Dst, RegState::ImplicitDefine) 3177 .addReg(SrcVec->getReg(), RegState::Implicit) 3178 .addReg(AMDGPU::M0, RegState::Implicit); 3179 3180 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3181 } else { 3182 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3183 3184 BuildMI(MBB, I, DL, MovRelDesc) 3185 .addReg(Dst, RegState::Define) 3186 .addReg(SrcVec->getReg()) 3187 .add(*Val) 3188 .addImm(SubReg - AMDGPU::sub0); 3189 } 3190 3191 MI.eraseFromParent(); 3192 return &MBB; 3193 } 3194 3195 if (Val->isReg()) 3196 MRI.clearKillFlags(Val->getReg()); 3197 3198 const DebugLoc &DL = MI.getDebugLoc(); 3199 3200 unsigned PhiReg = MRI.createVirtualRegister(VecRC); 3201 3202 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg, 3203 Offset, UseGPRIdxMode, false); 3204 MachineBasicBlock *LoopBB = InsPt->getParent(); 3205 3206 if (UseGPRIdxMode) { 3207 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) 3208 .addReg(PhiReg, RegState::Undef, SubReg) // vdst 3209 .add(*Val) // src0 3210 .addReg(Dst, RegState::ImplicitDefine) 3211 .addReg(PhiReg, RegState::Implicit) 3212 .addReg(AMDGPU::M0, RegState::Implicit); 3213 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); 3214 } else { 3215 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC)); 3216 3217 BuildMI(*LoopBB, InsPt, DL, MovRelDesc) 3218 .addReg(Dst, RegState::Define) 3219 .addReg(PhiReg) 3220 .add(*Val) 3221 .addImm(SubReg - AMDGPU::sub0); 3222 } 3223 3224 MI.eraseFromParent(); 3225 3226 return LoopBB; 3227 } 3228 3229 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( 3230 MachineInstr &MI, MachineBasicBlock *BB) const { 3231 3232 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3233 MachineFunction *MF = BB->getParent(); 3234 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 3235 3236 if (TII->isMIMG(MI)) { 3237 if (MI.memoperands_empty() && MI.mayLoadOrStore()) { 3238 report_fatal_error("missing mem operand from MIMG instruction"); 3239 } 3240 // Add a memoperand for mimg instructions so that they aren't assumed to 3241 // be ordered memory instuctions. 3242 3243 return BB; 3244 } 3245 3246 switch (MI.getOpcode()) { 3247 case AMDGPU::S_ADD_U64_PSEUDO: 3248 case AMDGPU::S_SUB_U64_PSEUDO: { 3249 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3250 const DebugLoc &DL = MI.getDebugLoc(); 3251 3252 MachineOperand &Dest = MI.getOperand(0); 3253 MachineOperand &Src0 = MI.getOperand(1); 3254 MachineOperand &Src1 = MI.getOperand(2); 3255 3256 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3257 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3258 3259 MachineOperand Src0Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3260 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3261 &AMDGPU::SReg_32_XM0RegClass); 3262 MachineOperand Src0Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3263 Src0, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3264 &AMDGPU::SReg_32_XM0RegClass); 3265 3266 MachineOperand Src1Sub0 = TII->buildExtractSubRegOrImm(MI, MRI, 3267 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub0, 3268 &AMDGPU::SReg_32_XM0RegClass); 3269 MachineOperand Src1Sub1 = TII->buildExtractSubRegOrImm(MI, MRI, 3270 Src1, &AMDGPU::SReg_64RegClass, AMDGPU::sub1, 3271 &AMDGPU::SReg_32_XM0RegClass); 3272 3273 bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 3274 3275 unsigned LoOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 3276 unsigned HiOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 3277 BuildMI(*BB, MI, DL, TII->get(LoOpc), DestSub0) 3278 .add(Src0Sub0) 3279 .add(Src1Sub0); 3280 BuildMI(*BB, MI, DL, TII->get(HiOpc), DestSub1) 3281 .add(Src0Sub1) 3282 .add(Src1Sub1); 3283 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest.getReg()) 3284 .addReg(DestSub0) 3285 .addImm(AMDGPU::sub0) 3286 .addReg(DestSub1) 3287 .addImm(AMDGPU::sub1); 3288 MI.eraseFromParent(); 3289 return BB; 3290 } 3291 case AMDGPU::SI_INIT_M0: { 3292 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), 3293 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 3294 .add(MI.getOperand(0)); 3295 MI.eraseFromParent(); 3296 return BB; 3297 } 3298 case AMDGPU::SI_INIT_EXEC: 3299 // This should be before all vector instructions. 3300 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), 3301 AMDGPU::EXEC) 3302 .addImm(MI.getOperand(0).getImm()); 3303 MI.eraseFromParent(); 3304 return BB; 3305 3306 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: { 3307 // Extract the thread count from an SGPR input and set EXEC accordingly. 3308 // Since BFM can't shift by 64, handle that case with CMP + CMOV. 3309 // 3310 // S_BFE_U32 count, input, {shift, 7} 3311 // S_BFM_B64 exec, count, 0 3312 // S_CMP_EQ_U32 count, 64 3313 // S_CMOV_B64 exec, -1 3314 MachineInstr *FirstMI = &*BB->begin(); 3315 MachineRegisterInfo &MRI = MF->getRegInfo(); 3316 unsigned InputReg = MI.getOperand(0).getReg(); 3317 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3318 bool Found = false; 3319 3320 // Move the COPY of the input reg to the beginning, so that we can use it. 3321 for (auto I = BB->begin(); I != &MI; I++) { 3322 if (I->getOpcode() != TargetOpcode::COPY || 3323 I->getOperand(0).getReg() != InputReg) 3324 continue; 3325 3326 if (I == FirstMI) { 3327 FirstMI = &*++BB->begin(); 3328 } else { 3329 I->removeFromParent(); 3330 BB->insert(FirstMI, &*I); 3331 } 3332 Found = true; 3333 break; 3334 } 3335 assert(Found); 3336 (void)Found; 3337 3338 // This should be before all vector instructions. 3339 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg) 3340 .addReg(InputReg) 3341 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000); 3342 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64), 3343 AMDGPU::EXEC) 3344 .addReg(CountReg) 3345 .addImm(0); 3346 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32)) 3347 .addReg(CountReg, RegState::Kill) 3348 .addImm(64); 3349 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64), 3350 AMDGPU::EXEC) 3351 .addImm(-1); 3352 MI.eraseFromParent(); 3353 return BB; 3354 } 3355 3356 case AMDGPU::GET_GROUPSTATICSIZE: { 3357 DebugLoc DL = MI.getDebugLoc(); 3358 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) 3359 .add(MI.getOperand(0)) 3360 .addImm(MFI->getLDSSize()); 3361 MI.eraseFromParent(); 3362 return BB; 3363 } 3364 case AMDGPU::SI_INDIRECT_SRC_V1: 3365 case AMDGPU::SI_INDIRECT_SRC_V2: 3366 case AMDGPU::SI_INDIRECT_SRC_V4: 3367 case AMDGPU::SI_INDIRECT_SRC_V8: 3368 case AMDGPU::SI_INDIRECT_SRC_V16: 3369 return emitIndirectSrc(MI, *BB, *getSubtarget()); 3370 case AMDGPU::SI_INDIRECT_DST_V1: 3371 case AMDGPU::SI_INDIRECT_DST_V2: 3372 case AMDGPU::SI_INDIRECT_DST_V4: 3373 case AMDGPU::SI_INDIRECT_DST_V8: 3374 case AMDGPU::SI_INDIRECT_DST_V16: 3375 return emitIndirectDst(MI, *BB, *getSubtarget()); 3376 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 3377 case AMDGPU::SI_KILL_I1_PSEUDO: 3378 return splitKillBlock(MI, BB); 3379 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 3380 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 3381 3382 unsigned Dst = MI.getOperand(0).getReg(); 3383 unsigned Src0 = MI.getOperand(1).getReg(); 3384 unsigned Src1 = MI.getOperand(2).getReg(); 3385 const DebugLoc &DL = MI.getDebugLoc(); 3386 unsigned SrcCond = MI.getOperand(3).getReg(); 3387 3388 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3389 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3390 unsigned SrcCondCopy = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3391 3392 BuildMI(*BB, MI, DL, TII->get(AMDGPU::COPY), SrcCondCopy) 3393 .addReg(SrcCond); 3394 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 3395 .addReg(Src0, 0, AMDGPU::sub0) 3396 .addReg(Src1, 0, AMDGPU::sub0) 3397 .addReg(SrcCondCopy); 3398 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 3399 .addReg(Src0, 0, AMDGPU::sub1) 3400 .addReg(Src1, 0, AMDGPU::sub1) 3401 .addReg(SrcCondCopy); 3402 3403 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst) 3404 .addReg(DstLo) 3405 .addImm(AMDGPU::sub0) 3406 .addReg(DstHi) 3407 .addImm(AMDGPU::sub1); 3408 MI.eraseFromParent(); 3409 return BB; 3410 } 3411 case AMDGPU::SI_BR_UNDEF: { 3412 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3413 const DebugLoc &DL = MI.getDebugLoc(); 3414 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) 3415 .add(MI.getOperand(0)); 3416 Br->getOperand(1).setIsUndef(true); // read undef SCC 3417 MI.eraseFromParent(); 3418 return BB; 3419 } 3420 case AMDGPU::ADJCALLSTACKUP: 3421 case AMDGPU::ADJCALLSTACKDOWN: { 3422 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3423 MachineInstrBuilder MIB(*MF, &MI); 3424 3425 // Add an implicit use of the frame offset reg to prevent the restore copy 3426 // inserted after the call from being reorderd after stack operations in the 3427 // the caller's frame. 3428 MIB.addReg(Info->getStackPtrOffsetReg(), RegState::ImplicitDefine) 3429 .addReg(Info->getStackPtrOffsetReg(), RegState::Implicit) 3430 .addReg(Info->getFrameOffsetReg(), RegState::Implicit); 3431 return BB; 3432 } 3433 case AMDGPU::SI_CALL_ISEL: 3434 case AMDGPU::SI_TCRETURN_ISEL: { 3435 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 3436 const DebugLoc &DL = MI.getDebugLoc(); 3437 unsigned ReturnAddrReg = TII->getRegisterInfo().getReturnAddressReg(*MF); 3438 3439 MachineRegisterInfo &MRI = MF->getRegInfo(); 3440 unsigned GlobalAddrReg = MI.getOperand(0).getReg(); 3441 MachineInstr *PCRel = MRI.getVRegDef(GlobalAddrReg); 3442 assert(PCRel->getOpcode() == AMDGPU::SI_PC_ADD_REL_OFFSET); 3443 3444 const GlobalValue *G = PCRel->getOperand(1).getGlobal(); 3445 3446 MachineInstrBuilder MIB; 3447 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 3448 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg) 3449 .add(MI.getOperand(0)) 3450 .addGlobalAddress(G); 3451 } else { 3452 MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_TCRETURN)) 3453 .add(MI.getOperand(0)) 3454 .addGlobalAddress(G); 3455 3456 // There is an additional imm operand for tcreturn, but it should be in the 3457 // right place already. 3458 } 3459 3460 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) 3461 MIB.add(MI.getOperand(I)); 3462 3463 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 3464 MI.eraseFromParent(); 3465 return BB; 3466 } 3467 default: 3468 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 3469 } 3470 } 3471 3472 bool SITargetLowering::hasBitPreservingFPLogic(EVT VT) const { 3473 return isTypeLegal(VT.getScalarType()); 3474 } 3475 3476 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { 3477 // This currently forces unfolding various combinations of fsub into fma with 3478 // free fneg'd operands. As long as we have fast FMA (controlled by 3479 // isFMAFasterThanFMulAndFAdd), we should perform these. 3480 3481 // When fma is quarter rate, for f64 where add / sub are at best half rate, 3482 // most of these combines appear to be cycle neutral but save on instruction 3483 // count / code size. 3484 return true; 3485 } 3486 3487 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, 3488 EVT VT) const { 3489 if (!VT.isVector()) { 3490 return MVT::i1; 3491 } 3492 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements()); 3493 } 3494 3495 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { 3496 // TODO: Should i16 be used always if legal? For now it would force VALU 3497 // shifts. 3498 return (VT == MVT::i16) ? MVT::i16 : MVT::i32; 3499 } 3500 3501 // Answering this is somewhat tricky and depends on the specific device which 3502 // have different rates for fma or all f64 operations. 3503 // 3504 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other 3505 // regardless of which device (although the number of cycles differs between 3506 // devices), so it is always profitable for f64. 3507 // 3508 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable 3509 // only on full rate devices. Normally, we should prefer selecting v_mad_f32 3510 // which we can always do even without fused FP ops since it returns the same 3511 // result as the separate operations and since it is always full 3512 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32 3513 // however does not support denormals, so we do report fma as faster if we have 3514 // a fast fma device and require denormals. 3515 // 3516 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3517 VT = VT.getScalarType(); 3518 3519 switch (VT.getSimpleVT().SimpleTy) { 3520 case MVT::f32: { 3521 // This is as fast on some subtargets. However, we always have full rate f32 3522 // mad available which returns the same result as the separate operations 3523 // which we should prefer over fma. We can't use this if we want to support 3524 // denormals, so only report this in these cases. 3525 if (Subtarget->hasFP32Denormals()) 3526 return Subtarget->hasFastFMAF32() || Subtarget->hasDLInsts(); 3527 3528 // If the subtarget has v_fmac_f32, that's just as good as v_mac_f32. 3529 return Subtarget->hasFastFMAF32() && Subtarget->hasDLInsts(); 3530 } 3531 case MVT::f64: 3532 return true; 3533 case MVT::f16: 3534 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals(); 3535 default: 3536 break; 3537 } 3538 3539 return false; 3540 } 3541 3542 //===----------------------------------------------------------------------===// 3543 // Custom DAG Lowering Operations 3544 //===----------------------------------------------------------------------===// 3545 3546 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 3547 // wider vector type is legal. 3548 SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op, 3549 SelectionDAG &DAG) const { 3550 unsigned Opc = Op.getOpcode(); 3551 EVT VT = Op.getValueType(); 3552 assert(VT == MVT::v4f16); 3553 3554 SDValue Lo, Hi; 3555 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 3556 3557 SDLoc SL(Op); 3558 SDValue OpLo = DAG.getNode(Opc, SL, Lo.getValueType(), Lo, 3559 Op->getFlags()); 3560 SDValue OpHi = DAG.getNode(Opc, SL, Hi.getValueType(), Hi, 3561 Op->getFlags()); 3562 3563 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 3564 } 3565 3566 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the 3567 // wider vector type is legal. 3568 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op, 3569 SelectionDAG &DAG) const { 3570 unsigned Opc = Op.getOpcode(); 3571 EVT VT = Op.getValueType(); 3572 assert(VT == MVT::v4i16 || VT == MVT::v4f16); 3573 3574 SDValue Lo0, Hi0; 3575 std::tie(Lo0, Hi0) = DAG.SplitVectorOperand(Op.getNode(), 0); 3576 SDValue Lo1, Hi1; 3577 std::tie(Lo1, Hi1) = DAG.SplitVectorOperand(Op.getNode(), 1); 3578 3579 SDLoc SL(Op); 3580 3581 SDValue OpLo = DAG.getNode(Opc, SL, Lo0.getValueType(), Lo0, Lo1, 3582 Op->getFlags()); 3583 SDValue OpHi = DAG.getNode(Opc, SL, Hi0.getValueType(), Hi0, Hi1, 3584 Op->getFlags()); 3585 3586 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi); 3587 } 3588 3589 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 3590 switch (Op.getOpcode()) { 3591 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 3592 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 3593 case ISD::LOAD: { 3594 SDValue Result = LowerLOAD(Op, DAG); 3595 assert((!Result.getNode() || 3596 Result.getNode()->getNumValues() == 2) && 3597 "Load should return a value and a chain"); 3598 return Result; 3599 } 3600 3601 case ISD::FSIN: 3602 case ISD::FCOS: 3603 return LowerTrig(Op, DAG); 3604 case ISD::SELECT: return LowerSELECT(Op, DAG); 3605 case ISD::FDIV: return LowerFDIV(Op, DAG); 3606 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG); 3607 case ISD::STORE: return LowerSTORE(Op, DAG); 3608 case ISD::GlobalAddress: { 3609 MachineFunction &MF = DAG.getMachineFunction(); 3610 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 3611 return LowerGlobalAddress(MFI, Op, DAG); 3612 } 3613 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3614 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); 3615 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); 3616 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); 3617 case ISD::INSERT_VECTOR_ELT: 3618 return lowerINSERT_VECTOR_ELT(Op, DAG); 3619 case ISD::EXTRACT_VECTOR_ELT: 3620 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 3621 case ISD::BUILD_VECTOR: 3622 return lowerBUILD_VECTOR(Op, DAG); 3623 case ISD::FP_ROUND: 3624 return lowerFP_ROUND(Op, DAG); 3625 case ISD::TRAP: 3626 return lowerTRAP(Op, DAG); 3627 case ISD::DEBUGTRAP: 3628 return lowerDEBUGTRAP(Op, DAG); 3629 case ISD::FABS: 3630 case ISD::FNEG: 3631 return splitUnaryVectorOp(Op, DAG); 3632 case ISD::SHL: 3633 case ISD::SRA: 3634 case ISD::SRL: 3635 case ISD::ADD: 3636 case ISD::SUB: 3637 case ISD::MUL: 3638 case ISD::SMIN: 3639 case ISD::SMAX: 3640 case ISD::UMIN: 3641 case ISD::UMAX: 3642 case ISD::FMINNUM: 3643 case ISD::FMAXNUM: 3644 case ISD::FADD: 3645 case ISD::FMUL: 3646 return splitBinaryVectorOp(Op, DAG); 3647 } 3648 return SDValue(); 3649 } 3650 3651 static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT, 3652 const SDLoc &DL, 3653 SelectionDAG &DAG, bool Unpacked) { 3654 if (!LoadVT.isVector()) 3655 return Result; 3656 3657 if (Unpacked) { // From v2i32/v4i32 back to v2f16/v4f16. 3658 // Truncate to v2i16/v4i16. 3659 EVT IntLoadVT = LoadVT.changeTypeToInteger(); 3660 3661 // Workaround legalizer not scalarizing truncate after vector op 3662 // legalization byt not creating intermediate vector trunc. 3663 SmallVector<SDValue, 4> Elts; 3664 DAG.ExtractVectorElements(Result, Elts); 3665 for (SDValue &Elt : Elts) 3666 Elt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Elt); 3667 3668 Result = DAG.getBuildVector(IntLoadVT, DL, Elts); 3669 3670 // Bitcast to original type (v2f16/v4f16). 3671 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 3672 } 3673 3674 // Cast back to the original packed type. 3675 return DAG.getNode(ISD::BITCAST, DL, LoadVT, Result); 3676 } 3677 3678 SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode, 3679 MemSDNode *M, 3680 SelectionDAG &DAG, 3681 bool IsIntrinsic) const { 3682 SDLoc DL(M); 3683 SmallVector<SDValue, 10> Ops; 3684 Ops.reserve(M->getNumOperands()); 3685 3686 Ops.push_back(M->getOperand(0)); 3687 if (IsIntrinsic) 3688 Ops.push_back(DAG.getConstant(Opcode, DL, MVT::i32)); 3689 3690 // Skip 1, as it is the intrinsic ID. 3691 for (unsigned I = 2, E = M->getNumOperands(); I != E; ++I) 3692 Ops.push_back(M->getOperand(I)); 3693 3694 bool Unpacked = Subtarget->hasUnpackedD16VMem(); 3695 EVT LoadVT = M->getValueType(0); 3696 3697 EVT EquivLoadVT = LoadVT; 3698 if (Unpacked && LoadVT.isVector()) { 3699 EquivLoadVT = LoadVT.isVector() ? 3700 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 3701 LoadVT.getVectorNumElements()) : LoadVT; 3702 } 3703 3704 // Change from v4f16/v2f16 to EquivLoadVT. 3705 SDVTList VTList = DAG.getVTList(EquivLoadVT, MVT::Other); 3706 3707 SDValue Load 3708 = DAG.getMemIntrinsicNode( 3709 IsIntrinsic ? (unsigned)ISD::INTRINSIC_W_CHAIN : Opcode, DL, 3710 VTList, Ops, M->getMemoryVT(), 3711 M->getMemOperand()); 3712 if (!Unpacked) // Just adjusted the opcode. 3713 return Load; 3714 3715 SDValue Adjusted = adjustLoadValueTypeImpl(Load, LoadVT, DL, DAG, Unpacked); 3716 3717 return DAG.getMergeValues({ Adjusted, Load.getValue(1) }, DL); 3718 } 3719 3720 void SITargetLowering::ReplaceNodeResults(SDNode *N, 3721 SmallVectorImpl<SDValue> &Results, 3722 SelectionDAG &DAG) const { 3723 switch (N->getOpcode()) { 3724 case ISD::INSERT_VECTOR_ELT: { 3725 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG)) 3726 Results.push_back(Res); 3727 return; 3728 } 3729 case ISD::EXTRACT_VECTOR_ELT: { 3730 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG)) 3731 Results.push_back(Res); 3732 return; 3733 } 3734 case ISD::INTRINSIC_WO_CHAIN: { 3735 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3736 switch (IID) { 3737 case Intrinsic::amdgcn_cvt_pkrtz: { 3738 SDValue Src0 = N->getOperand(1); 3739 SDValue Src1 = N->getOperand(2); 3740 SDLoc SL(N); 3741 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32, 3742 Src0, Src1); 3743 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt)); 3744 return; 3745 } 3746 case Intrinsic::amdgcn_cvt_pknorm_i16: 3747 case Intrinsic::amdgcn_cvt_pknorm_u16: 3748 case Intrinsic::amdgcn_cvt_pk_i16: 3749 case Intrinsic::amdgcn_cvt_pk_u16: { 3750 SDValue Src0 = N->getOperand(1); 3751 SDValue Src1 = N->getOperand(2); 3752 SDLoc SL(N); 3753 unsigned Opcode; 3754 3755 if (IID == Intrinsic::amdgcn_cvt_pknorm_i16) 3756 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 3757 else if (IID == Intrinsic::amdgcn_cvt_pknorm_u16) 3758 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 3759 else if (IID == Intrinsic::amdgcn_cvt_pk_i16) 3760 Opcode = AMDGPUISD::CVT_PK_I16_I32; 3761 else 3762 Opcode = AMDGPUISD::CVT_PK_U16_U32; 3763 3764 SDValue Cvt = DAG.getNode(Opcode, SL, MVT::i32, Src0, Src1); 3765 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, Cvt)); 3766 return; 3767 } 3768 } 3769 break; 3770 } 3771 case ISD::INTRINSIC_W_CHAIN: { 3772 if (SDValue Res = LowerINTRINSIC_W_CHAIN(SDValue(N, 0), DAG)) { 3773 Results.push_back(Res); 3774 Results.push_back(Res.getValue(1)); 3775 return; 3776 } 3777 3778 break; 3779 } 3780 case ISD::SELECT: { 3781 SDLoc SL(N); 3782 EVT VT = N->getValueType(0); 3783 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 3784 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1)); 3785 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2)); 3786 3787 EVT SelectVT = NewVT; 3788 if (NewVT.bitsLT(MVT::i32)) { 3789 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS); 3790 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS); 3791 SelectVT = MVT::i32; 3792 } 3793 3794 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT, 3795 N->getOperand(0), LHS, RHS); 3796 3797 if (NewVT != SelectVT) 3798 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect); 3799 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect)); 3800 return; 3801 } 3802 case ISD::FNEG: { 3803 if (N->getValueType(0) != MVT::v2f16) 3804 break; 3805 3806 SDLoc SL(N); 3807 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 3808 3809 SDValue Op = DAG.getNode(ISD::XOR, SL, MVT::i32, 3810 BC, 3811 DAG.getConstant(0x80008000, SL, MVT::i32)); 3812 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 3813 return; 3814 } 3815 case ISD::FABS: { 3816 if (N->getValueType(0) != MVT::v2f16) 3817 break; 3818 3819 SDLoc SL(N); 3820 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, N->getOperand(0)); 3821 3822 SDValue Op = DAG.getNode(ISD::AND, SL, MVT::i32, 3823 BC, 3824 DAG.getConstant(0x7fff7fff, SL, MVT::i32)); 3825 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Op)); 3826 return; 3827 } 3828 default: 3829 break; 3830 } 3831 } 3832 3833 /// Helper function for LowerBRCOND 3834 static SDNode *findUser(SDValue Value, unsigned Opcode) { 3835 3836 SDNode *Parent = Value.getNode(); 3837 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end(); 3838 I != E; ++I) { 3839 3840 if (I.getUse().get() != Value) 3841 continue; 3842 3843 if (I->getOpcode() == Opcode) 3844 return *I; 3845 } 3846 return nullptr; 3847 } 3848 3849 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { 3850 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 3851 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) { 3852 case Intrinsic::amdgcn_if: 3853 return AMDGPUISD::IF; 3854 case Intrinsic::amdgcn_else: 3855 return AMDGPUISD::ELSE; 3856 case Intrinsic::amdgcn_loop: 3857 return AMDGPUISD::LOOP; 3858 case Intrinsic::amdgcn_end_cf: 3859 llvm_unreachable("should not occur"); 3860 default: 3861 return 0; 3862 } 3863 } 3864 3865 // break, if_break, else_break are all only used as inputs to loop, not 3866 // directly as branch conditions. 3867 return 0; 3868 } 3869 3870 void SITargetLowering::createDebuggerPrologueStackObjects( 3871 MachineFunction &MF) const { 3872 // Create stack objects that are used for emitting debugger prologue. 3873 // 3874 // Debugger prologue writes work group IDs and work item IDs to scratch memory 3875 // at fixed location in the following format: 3876 // offset 0: work group ID x 3877 // offset 4: work group ID y 3878 // offset 8: work group ID z 3879 // offset 16: work item ID x 3880 // offset 20: work item ID y 3881 // offset 24: work item ID z 3882 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 3883 int ObjectIdx = 0; 3884 3885 // For each dimension: 3886 for (unsigned i = 0; i < 3; ++i) { 3887 // Create fixed stack object for work group ID. 3888 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true); 3889 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx); 3890 // Create fixed stack object for work item ID. 3891 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true); 3892 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx); 3893 } 3894 } 3895 3896 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { 3897 const Triple &TT = getTargetMachine().getTargetTriple(); 3898 return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS || 3899 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) && 3900 AMDGPU::shouldEmitConstantsToTextSection(TT); 3901 } 3902 3903 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { 3904 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 3905 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS || 3906 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) && 3907 !shouldEmitFixup(GV) && 3908 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 3909 } 3910 3911 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { 3912 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV); 3913 } 3914 3915 /// This transforms the control flow intrinsics to get the branch destination as 3916 /// last parameter, also switches branch target with BR if the need arise 3917 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND, 3918 SelectionDAG &DAG) const { 3919 SDLoc DL(BRCOND); 3920 3921 SDNode *Intr = BRCOND.getOperand(1).getNode(); 3922 SDValue Target = BRCOND.getOperand(2); 3923 SDNode *BR = nullptr; 3924 SDNode *SetCC = nullptr; 3925 3926 if (Intr->getOpcode() == ISD::SETCC) { 3927 // As long as we negate the condition everything is fine 3928 SetCC = Intr; 3929 Intr = SetCC->getOperand(0).getNode(); 3930 3931 } else { 3932 // Get the target from BR if we don't negate the condition 3933 BR = findUser(BRCOND, ISD::BR); 3934 Target = BR->getOperand(1); 3935 } 3936 3937 // FIXME: This changes the types of the intrinsics instead of introducing new 3938 // nodes with the correct types. 3939 // e.g. llvm.amdgcn.loop 3940 3941 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3 3942 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088> 3943 3944 unsigned CFNode = isCFIntrinsic(Intr); 3945 if (CFNode == 0) { 3946 // This is a uniform branch so we don't need to legalize. 3947 return BRCOND; 3948 } 3949 3950 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID || 3951 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN; 3952 3953 assert(!SetCC || 3954 (SetCC->getConstantOperandVal(1) == 1 && 3955 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() == 3956 ISD::SETNE)); 3957 3958 // operands of the new intrinsic call 3959 SmallVector<SDValue, 4> Ops; 3960 if (HaveChain) 3961 Ops.push_back(BRCOND.getOperand(0)); 3962 3963 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end()); 3964 Ops.push_back(Target); 3965 3966 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end()); 3967 3968 // build the new intrinsic call 3969 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode(); 3970 3971 if (!HaveChain) { 3972 SDValue Ops[] = { 3973 SDValue(Result, 0), 3974 BRCOND.getOperand(0) 3975 }; 3976 3977 Result = DAG.getMergeValues(Ops, DL).getNode(); 3978 } 3979 3980 if (BR) { 3981 // Give the branch instruction our target 3982 SDValue Ops[] = { 3983 BR->getOperand(0), 3984 BRCOND.getOperand(2) 3985 }; 3986 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops); 3987 DAG.ReplaceAllUsesWith(BR, NewBR.getNode()); 3988 BR = NewBR.getNode(); 3989 } 3990 3991 SDValue Chain = SDValue(Result, Result->getNumValues() - 1); 3992 3993 // Copy the intrinsic results to registers 3994 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) { 3995 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg); 3996 if (!CopyToReg) 3997 continue; 3998 3999 Chain = DAG.getCopyToReg( 4000 Chain, DL, 4001 CopyToReg->getOperand(1), 4002 SDValue(Result, i - 1), 4003 SDValue()); 4004 4005 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0)); 4006 } 4007 4008 // Remove the old intrinsic from the chain 4009 DAG.ReplaceAllUsesOfValueWith( 4010 SDValue(Intr, Intr->getNumValues() - 1), 4011 Intr->getOperand(0)); 4012 4013 return Chain; 4014 } 4015 4016 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, 4017 SDValue Op, 4018 const SDLoc &DL, 4019 EVT VT) const { 4020 return Op.getValueType().bitsLE(VT) ? 4021 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) : 4022 DAG.getNode(ISD::FTRUNC, DL, VT, Op); 4023 } 4024 4025 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 4026 assert(Op.getValueType() == MVT::f16 && 4027 "Do not know how to custom lower FP_ROUND for non-f16 type"); 4028 4029 SDValue Src = Op.getOperand(0); 4030 EVT SrcVT = Src.getValueType(); 4031 if (SrcVT != MVT::f64) 4032 return Op; 4033 4034 SDLoc DL(Op); 4035 4036 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src); 4037 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16); 4038 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc); 4039 } 4040 4041 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { 4042 SDLoc SL(Op); 4043 SDValue Chain = Op.getOperand(0); 4044 4045 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || 4046 !Subtarget->isTrapHandlerEnabled()) 4047 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain); 4048 4049 MachineFunction &MF = DAG.getMachineFunction(); 4050 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4051 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 4052 assert(UserSGPR != AMDGPU::NoRegister); 4053 SDValue QueuePtr = CreateLiveInRegister( 4054 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 4055 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64); 4056 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01, 4057 QueuePtr, SDValue()); 4058 SDValue Ops[] = { 4059 ToReg, 4060 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMTrap, SL, MVT::i16), 4061 SGPR01, 4062 ToReg.getValue(1) 4063 }; 4064 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 4065 } 4066 4067 SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { 4068 SDLoc SL(Op); 4069 SDValue Chain = Op.getOperand(0); 4070 MachineFunction &MF = DAG.getMachineFunction(); 4071 4072 if (Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa || 4073 !Subtarget->isTrapHandlerEnabled()) { 4074 DiagnosticInfoUnsupported NoTrap(MF.getFunction(), 4075 "debugtrap handler not supported", 4076 Op.getDebugLoc(), 4077 DS_Warning); 4078 LLVMContext &Ctx = MF.getFunction().getContext(); 4079 Ctx.diagnose(NoTrap); 4080 return Chain; 4081 } 4082 4083 SDValue Ops[] = { 4084 Chain, 4085 DAG.getTargetConstant(GCNSubtarget::TrapIDLLVMDebugTrap, SL, MVT::i16) 4086 }; 4087 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops); 4088 } 4089 4090 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL, 4091 SelectionDAG &DAG) const { 4092 // FIXME: Use inline constants (src_{shared, private}_base) instead. 4093 if (Subtarget->hasApertureRegs()) { 4094 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ? 4095 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE : 4096 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE; 4097 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ? 4098 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE : 4099 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE; 4100 unsigned Encoding = 4101 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ | 4102 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ | 4103 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_; 4104 4105 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16); 4106 SDValue ApertureReg = SDValue( 4107 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0); 4108 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32); 4109 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount); 4110 } 4111 4112 MachineFunction &MF = DAG.getMachineFunction(); 4113 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 4114 unsigned UserSGPR = Info->getQueuePtrUserSGPR(); 4115 assert(UserSGPR != AMDGPU::NoRegister); 4116 4117 SDValue QueuePtr = CreateLiveInRegister( 4118 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64); 4119 4120 // Offset into amd_queue_t for group_segment_aperture_base_hi / 4121 // private_segment_aperture_base_hi. 4122 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44; 4123 4124 SDValue Ptr = DAG.getObjectPtrOffset(DL, QueuePtr, StructOffset); 4125 4126 // TODO: Use custom target PseudoSourceValue. 4127 // TODO: We should use the value from the IR intrinsic call, but it might not 4128 // be available and how do we get it? 4129 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()), 4130 AMDGPUASI.CONSTANT_ADDRESS)); 4131 4132 MachinePointerInfo PtrInfo(V, StructOffset); 4133 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo, 4134 MinAlign(64, StructOffset), 4135 MachineMemOperand::MODereferenceable | 4136 MachineMemOperand::MOInvariant); 4137 } 4138 4139 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, 4140 SelectionDAG &DAG) const { 4141 SDLoc SL(Op); 4142 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 4143 4144 SDValue Src = ASC->getOperand(0); 4145 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64); 4146 4147 const AMDGPUTargetMachine &TM = 4148 static_cast<const AMDGPUTargetMachine &>(getTargetMachine()); 4149 4150 // flat -> local/private 4151 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 4152 unsigned DestAS = ASC->getDestAddressSpace(); 4153 4154 if (DestAS == AMDGPUASI.LOCAL_ADDRESS || 4155 DestAS == AMDGPUASI.PRIVATE_ADDRESS) { 4156 unsigned NullVal = TM.getNullPointerValue(DestAS); 4157 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 4158 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE); 4159 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src); 4160 4161 return DAG.getNode(ISD::SELECT, SL, MVT::i32, 4162 NonNull, Ptr, SegmentNullPtr); 4163 } 4164 } 4165 4166 // local/private -> flat 4167 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) { 4168 unsigned SrcAS = ASC->getSrcAddressSpace(); 4169 4170 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS || 4171 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) { 4172 unsigned NullVal = TM.getNullPointerValue(SrcAS); 4173 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32); 4174 4175 SDValue NonNull 4176 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE); 4177 4178 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG); 4179 SDValue CvtPtr 4180 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture); 4181 4182 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull, 4183 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr), 4184 FlatNullPtr); 4185 } 4186 } 4187 4188 // global <-> flat are no-ops and never emitted. 4189 4190 const MachineFunction &MF = DAG.getMachineFunction(); 4191 DiagnosticInfoUnsupported InvalidAddrSpaceCast( 4192 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc()); 4193 DAG.getContext()->diagnose(InvalidAddrSpaceCast); 4194 4195 return DAG.getUNDEF(ASC->getValueType(0)); 4196 } 4197 4198 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4199 SelectionDAG &DAG) const { 4200 SDValue Vec = Op.getOperand(0); 4201 SDValue InsVal = Op.getOperand(1); 4202 SDValue Idx = Op.getOperand(2); 4203 EVT VecVT = Vec.getValueType(); 4204 EVT EltVT = VecVT.getVectorElementType(); 4205 unsigned VecSize = VecVT.getSizeInBits(); 4206 unsigned EltSize = EltVT.getSizeInBits(); 4207 4208 4209 assert(VecSize <= 64); 4210 4211 unsigned NumElts = VecVT.getVectorNumElements(); 4212 SDLoc SL(Op); 4213 auto KIdx = dyn_cast<ConstantSDNode>(Idx); 4214 4215 if (NumElts == 4 && EltSize == 16 && KIdx) { 4216 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Vec); 4217 4218 SDValue LoHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 4219 DAG.getConstant(0, SL, MVT::i32)); 4220 SDValue HiHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BCVec, 4221 DAG.getConstant(1, SL, MVT::i32)); 4222 4223 SDValue LoVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, LoHalf); 4224 SDValue HiVec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i16, HiHalf); 4225 4226 unsigned Idx = KIdx->getZExtValue(); 4227 bool InsertLo = Idx < 2; 4228 SDValue InsHalf = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, MVT::v2i16, 4229 InsertLo ? LoVec : HiVec, 4230 DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal), 4231 DAG.getConstant(InsertLo ? Idx : (Idx - 2), SL, MVT::i32)); 4232 4233 InsHalf = DAG.getNode(ISD::BITCAST, SL, MVT::i32, InsHalf); 4234 4235 SDValue Concat = InsertLo ? 4236 DAG.getBuildVector(MVT::v2i32, SL, { InsHalf, HiHalf }) : 4237 DAG.getBuildVector(MVT::v2i32, SL, { LoHalf, InsHalf }); 4238 4239 return DAG.getNode(ISD::BITCAST, SL, VecVT, Concat); 4240 } 4241 4242 if (isa<ConstantSDNode>(Idx)) 4243 return SDValue(); 4244 4245 MVT IntVT = MVT::getIntegerVT(VecSize); 4246 4247 // Avoid stack access for dynamic indexing. 4248 SDValue Val = InsVal; 4249 if (InsVal.getValueType() == MVT::f16) 4250 Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, InsVal); 4251 4252 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec 4253 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Val); 4254 4255 assert(isPowerOf2_32(EltSize)); 4256 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 4257 4258 // Convert vector index to bit-index. 4259 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 4260 4261 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 4262 SDValue BFM = DAG.getNode(ISD::SHL, SL, IntVT, 4263 DAG.getConstant(0xffff, SL, IntVT), 4264 ScaledIdx); 4265 4266 SDValue LHS = DAG.getNode(ISD::AND, SL, IntVT, BFM, ExtVal); 4267 SDValue RHS = DAG.getNode(ISD::AND, SL, IntVT, 4268 DAG.getNOT(SL, BFM, IntVT), BCVec); 4269 4270 SDValue BFI = DAG.getNode(ISD::OR, SL, IntVT, LHS, RHS); 4271 return DAG.getNode(ISD::BITCAST, SL, VecVT, BFI); 4272 } 4273 4274 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4275 SelectionDAG &DAG) const { 4276 SDLoc SL(Op); 4277 4278 EVT ResultVT = Op.getValueType(); 4279 SDValue Vec = Op.getOperand(0); 4280 SDValue Idx = Op.getOperand(1); 4281 EVT VecVT = Vec.getValueType(); 4282 unsigned VecSize = VecVT.getSizeInBits(); 4283 EVT EltVT = VecVT.getVectorElementType(); 4284 assert(VecSize <= 64); 4285 4286 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 4287 4288 // Make sure we do any optimizations that will make it easier to fold 4289 // source modifiers before obscuring it with bit operations. 4290 4291 // XXX - Why doesn't this get called when vector_shuffle is expanded? 4292 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI)) 4293 return Combined; 4294 4295 unsigned EltSize = EltVT.getSizeInBits(); 4296 assert(isPowerOf2_32(EltSize)); 4297 4298 MVT IntVT = MVT::getIntegerVT(VecSize); 4299 SDValue ScaleFactor = DAG.getConstant(Log2_32(EltSize), SL, MVT::i32); 4300 4301 // Convert vector index to bit-index (* EltSize) 4302 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, ScaleFactor); 4303 4304 SDValue BC = DAG.getNode(ISD::BITCAST, SL, IntVT, Vec); 4305 SDValue Elt = DAG.getNode(ISD::SRL, SL, IntVT, BC, ScaledIdx); 4306 4307 if (ResultVT == MVT::f16) { 4308 SDValue Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Elt); 4309 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result); 4310 } 4311 4312 return DAG.getAnyExtOrTrunc(Elt, SL, ResultVT); 4313 } 4314 4315 SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op, 4316 SelectionDAG &DAG) const { 4317 SDLoc SL(Op); 4318 EVT VT = Op.getValueType(); 4319 4320 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 4321 EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(), 2); 4322 4323 // Turn into pair of packed build_vectors. 4324 // TODO: Special case for constants that can be materialized with s_mov_b64. 4325 SDValue Lo = DAG.getBuildVector(HalfVT, SL, 4326 { Op.getOperand(0), Op.getOperand(1) }); 4327 SDValue Hi = DAG.getBuildVector(HalfVT, SL, 4328 { Op.getOperand(2), Op.getOperand(3) }); 4329 4330 SDValue CastLo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Lo); 4331 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Hi); 4332 4333 SDValue Blend = DAG.getBuildVector(MVT::v2i32, SL, { CastLo, CastHi }); 4334 return DAG.getNode(ISD::BITCAST, SL, VT, Blend); 4335 } 4336 4337 assert(VT == MVT::v2f16 || VT == MVT::v2i16); 4338 4339 SDValue Lo = Op.getOperand(0); 4340 SDValue Hi = Op.getOperand(1); 4341 4342 Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Lo); 4343 Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Hi); 4344 4345 Lo = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Lo); 4346 Hi = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Hi); 4347 4348 SDValue ShlHi = DAG.getNode(ISD::SHL, SL, MVT::i32, Hi, 4349 DAG.getConstant(16, SL, MVT::i32)); 4350 4351 SDValue Or = DAG.getNode(ISD::OR, SL, MVT::i32, Lo, ShlHi); 4352 4353 return DAG.getNode(ISD::BITCAST, SL, VT, Or); 4354 } 4355 4356 bool 4357 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 4358 // We can fold offsets for anything that doesn't require a GOT relocation. 4359 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS || 4360 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS || 4361 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) && 4362 !shouldEmitGOTReloc(GA->getGlobal()); 4363 } 4364 4365 static SDValue 4366 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV, 4367 const SDLoc &DL, unsigned Offset, EVT PtrVT, 4368 unsigned GAFlags = SIInstrInfo::MO_NONE) { 4369 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is 4370 // lowered to the following code sequence: 4371 // 4372 // For constant address space: 4373 // s_getpc_b64 s[0:1] 4374 // s_add_u32 s0, s0, $symbol 4375 // s_addc_u32 s1, s1, 0 4376 // 4377 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4378 // a fixup or relocation is emitted to replace $symbol with a literal 4379 // constant, which is a pc-relative offset from the encoding of the $symbol 4380 // operand to the global variable. 4381 // 4382 // For global address space: 4383 // s_getpc_b64 s[0:1] 4384 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo 4385 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi 4386 // 4387 // s_getpc_b64 returns the address of the s_add_u32 instruction and then 4388 // fixups or relocations are emitted to replace $symbol@*@lo and 4389 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant, 4390 // which is a 64-bit pc-relative offset from the encoding of the $symbol 4391 // operand to the global variable. 4392 // 4393 // What we want here is an offset from the value returned by s_getpc 4394 // (which is the address of the s_add_u32 instruction) to the global 4395 // variable, but since the encoding of $symbol starts 4 bytes after the start 4396 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too 4397 // small. This requires us to add 4 to the global variable offset in order to 4398 // compute the correct address. 4399 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4400 GAFlags); 4401 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4, 4402 GAFlags == SIInstrInfo::MO_NONE ? 4403 GAFlags : GAFlags + 1); 4404 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi); 4405 } 4406 4407 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 4408 SDValue Op, 4409 SelectionDAG &DAG) const { 4410 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 4411 const GlobalValue *GV = GSD->getGlobal(); 4412 4413 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS && 4414 GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT && 4415 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS && 4416 // FIXME: It isn't correct to rely on the type of the pointer. This should 4417 // be removed when address space 0 is 64-bit. 4418 !GV->getType()->getElementType()->isFunctionTy()) 4419 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 4420 4421 SDLoc DL(GSD); 4422 EVT PtrVT = Op.getValueType(); 4423 4424 if (shouldEmitFixup(GV)) 4425 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT); 4426 else if (shouldEmitPCReloc(GV)) 4427 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT, 4428 SIInstrInfo::MO_REL32); 4429 4430 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT, 4431 SIInstrInfo::MO_GOTPCREL32); 4432 4433 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext()); 4434 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS); 4435 const DataLayout &DataLayout = DAG.getDataLayout(); 4436 unsigned Align = DataLayout.getABITypeAlignment(PtrTy); 4437 // FIXME: Use a PseudoSourceValue once those can be assigned an address space. 4438 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 4439 4440 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align, 4441 MachineMemOperand::MODereferenceable | 4442 MachineMemOperand::MOInvariant); 4443 } 4444 4445 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, 4446 const SDLoc &DL, SDValue V) const { 4447 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as 4448 // the destination register. 4449 // 4450 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions, 4451 // so we will end up with redundant moves to m0. 4452 // 4453 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result. 4454 4455 // A Null SDValue creates a glue result. 4456 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue, 4457 V, Chain); 4458 return SDValue(M0, 0); 4459 } 4460 4461 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG, 4462 SDValue Op, 4463 MVT VT, 4464 unsigned Offset) const { 4465 SDLoc SL(Op); 4466 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL, 4467 DAG.getEntryNode(), Offset, 4, false); 4468 // The local size values will have the hi 16-bits as zero. 4469 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param, 4470 DAG.getValueType(VT)); 4471 } 4472 4473 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4474 EVT VT) { 4475 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4476 "non-hsa intrinsic with hsa target", 4477 DL.getDebugLoc()); 4478 DAG.getContext()->diagnose(BadIntrin); 4479 return DAG.getUNDEF(VT); 4480 } 4481 4482 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL, 4483 EVT VT) { 4484 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(), 4485 "intrinsic not supported on subtarget", 4486 DL.getDebugLoc()); 4487 DAG.getContext()->diagnose(BadIntrin); 4488 return DAG.getUNDEF(VT); 4489 } 4490 4491 static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL, 4492 ArrayRef<SDValue> Elts) { 4493 assert(!Elts.empty()); 4494 MVT Type; 4495 unsigned NumElts; 4496 4497 if (Elts.size() == 1) { 4498 Type = MVT::f32; 4499 NumElts = 1; 4500 } else if (Elts.size() == 2) { 4501 Type = MVT::v2f32; 4502 NumElts = 2; 4503 } else if (Elts.size() <= 4) { 4504 Type = MVT::v4f32; 4505 NumElts = 4; 4506 } else if (Elts.size() <= 8) { 4507 Type = MVT::v8f32; 4508 NumElts = 8; 4509 } else { 4510 assert(Elts.size() <= 16); 4511 Type = MVT::v16f32; 4512 NumElts = 16; 4513 } 4514 4515 SmallVector<SDValue, 16> VecElts(NumElts); 4516 for (unsigned i = 0; i < Elts.size(); ++i) { 4517 SDValue Elt = Elts[i]; 4518 if (Elt.getValueType() != MVT::f32) 4519 Elt = DAG.getBitcast(MVT::f32, Elt); 4520 VecElts[i] = Elt; 4521 } 4522 for (unsigned i = Elts.size(); i < NumElts; ++i) 4523 VecElts[i] = DAG.getUNDEF(MVT::f32); 4524 4525 if (NumElts == 1) 4526 return VecElts[0]; 4527 return DAG.getBuildVector(Type, DL, VecElts); 4528 } 4529 4530 static bool parseCachePolicy(SDValue CachePolicy, SelectionDAG &DAG, 4531 SDValue *GLC, SDValue *SLC) { 4532 auto CachePolicyConst = dyn_cast<ConstantSDNode>(CachePolicy.getNode()); 4533 if (!CachePolicyConst) 4534 return false; 4535 4536 uint64_t Value = CachePolicyConst->getZExtValue(); 4537 SDLoc DL(CachePolicy); 4538 if (GLC) { 4539 *GLC = DAG.getTargetConstant((Value & 0x1) ? 1 : 0, DL, MVT::i32); 4540 Value &= ~(uint64_t)0x1; 4541 } 4542 if (SLC) { 4543 *SLC = DAG.getTargetConstant((Value & 0x2) ? 1 : 0, DL, MVT::i32); 4544 Value &= ~(uint64_t)0x2; 4545 } 4546 4547 return Value == 0; 4548 } 4549 4550 SDValue SITargetLowering::lowerImage(SDValue Op, 4551 const AMDGPU::ImageDimIntrinsicInfo *Intr, 4552 SelectionDAG &DAG) const { 4553 SDLoc DL(Op); 4554 MachineFunction &MF = DAG.getMachineFunction(); 4555 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4556 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 4557 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 4558 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = 4559 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); 4560 unsigned IntrOpcode = Intr->BaseOpcode; 4561 4562 SmallVector<EVT, 2> ResultTypes(Op->value_begin(), Op->value_end()); 4563 bool IsD16 = false; 4564 SDValue VData; 4565 int NumVDataDwords; 4566 unsigned AddrIdx; // Index of first address argument 4567 unsigned DMask; 4568 4569 if (BaseOpcode->Atomic) { 4570 VData = Op.getOperand(2); 4571 4572 bool Is64Bit = VData.getValueType() == MVT::i64; 4573 if (BaseOpcode->AtomicX2) { 4574 SDValue VData2 = Op.getOperand(3); 4575 VData = DAG.getBuildVector(Is64Bit ? MVT::v2i64 : MVT::v2i32, DL, 4576 {VData, VData2}); 4577 if (Is64Bit) 4578 VData = DAG.getBitcast(MVT::v4i32, VData); 4579 4580 ResultTypes[0] = Is64Bit ? MVT::v2i64 : MVT::v2i32; 4581 DMask = Is64Bit ? 0xf : 0x3; 4582 NumVDataDwords = Is64Bit ? 4 : 2; 4583 AddrIdx = 4; 4584 } else { 4585 DMask = Is64Bit ? 0x3 : 0x1; 4586 NumVDataDwords = Is64Bit ? 2 : 1; 4587 AddrIdx = 3; 4588 } 4589 } else { 4590 unsigned DMaskIdx; 4591 4592 if (BaseOpcode->Store) { 4593 VData = Op.getOperand(2); 4594 4595 MVT StoreVT = VData.getSimpleValueType(); 4596 if (StoreVT.getScalarType() == MVT::f16) { 4597 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS || 4598 !BaseOpcode->HasD16) 4599 return Op; // D16 is unsupported for this instruction 4600 4601 IsD16 = true; 4602 VData = handleD16VData(VData, DAG); 4603 } 4604 4605 NumVDataDwords = (VData.getValueType().getSizeInBits() + 31) / 32; 4606 DMaskIdx = 3; 4607 } else { 4608 MVT LoadVT = Op.getSimpleValueType(); 4609 if (LoadVT.getScalarType() == MVT::f16) { 4610 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS || 4611 !BaseOpcode->HasD16) 4612 return Op; // D16 is unsupported for this instruction 4613 4614 IsD16 = true; 4615 if (LoadVT.isVector() && Subtarget->hasUnpackedD16VMem()) 4616 ResultTypes[0] = (LoadVT == MVT::v2f16) ? MVT::v2i32 : MVT::v4i32; 4617 } 4618 4619 NumVDataDwords = (ResultTypes[0].getSizeInBits() + 31) / 32; 4620 DMaskIdx = isa<MemSDNode>(Op) ? 2 : 1; 4621 } 4622 4623 auto DMaskConst = dyn_cast<ConstantSDNode>(Op.getOperand(DMaskIdx)); 4624 if (!DMaskConst) 4625 return Op; 4626 4627 AddrIdx = DMaskIdx + 1; 4628 DMask = DMaskConst->getZExtValue(); 4629 if (!DMask && !BaseOpcode->Store) { 4630 // Eliminate no-op loads. Stores with dmask == 0 are *not* no-op: they 4631 // store the channels' default values. 4632 SDValue Undef = DAG.getUNDEF(Op.getValueType()); 4633 if (isa<MemSDNode>(Op)) 4634 return DAG.getMergeValues({Undef, Op.getOperand(0)}, DL); 4635 return Undef; 4636 } 4637 } 4638 4639 unsigned NumVAddrs = BaseOpcode->NumExtraArgs + 4640 (BaseOpcode->Gradients ? DimInfo->NumGradients : 0) + 4641 (BaseOpcode->Coordinates ? DimInfo->NumCoords : 0) + 4642 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 4643 SmallVector<SDValue, 4> VAddrs; 4644 for (unsigned i = 0; i < NumVAddrs; ++i) 4645 VAddrs.push_back(Op.getOperand(AddrIdx + i)); 4646 4647 // Optimize _L to _LZ when _L is zero 4648 if (LZMappingInfo) { 4649 if (auto ConstantLod = 4650 dyn_cast<ConstantFPSDNode>(VAddrs[NumVAddrs-1].getNode())) { 4651 if (ConstantLod->isZero() || ConstantLod->isNegative()) { 4652 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l 4653 VAddrs.pop_back(); // remove 'lod' 4654 } 4655 } 4656 } 4657 4658 SDValue VAddr = getBuildDwordsVector(DAG, DL, VAddrs); 4659 4660 SDValue True = DAG.getTargetConstant(1, DL, MVT::i1); 4661 SDValue False = DAG.getTargetConstant(0, DL, MVT::i1); 4662 unsigned CtrlIdx; // Index of texfailctrl argument 4663 SDValue Unorm; 4664 if (!BaseOpcode->Sampler) { 4665 Unorm = True; 4666 CtrlIdx = AddrIdx + NumVAddrs + 1; 4667 } else { 4668 auto UnormConst = 4669 dyn_cast<ConstantSDNode>(Op.getOperand(AddrIdx + NumVAddrs + 2)); 4670 if (!UnormConst) 4671 return Op; 4672 4673 Unorm = UnormConst->getZExtValue() ? True : False; 4674 CtrlIdx = AddrIdx + NumVAddrs + 3; 4675 } 4676 4677 SDValue TexFail = Op.getOperand(CtrlIdx); 4678 auto TexFailConst = dyn_cast<ConstantSDNode>(TexFail.getNode()); 4679 if (!TexFailConst || TexFailConst->getZExtValue() != 0) 4680 return Op; 4681 4682 SDValue GLC; 4683 SDValue SLC; 4684 if (BaseOpcode->Atomic) { 4685 GLC = True; // TODO no-return optimization 4686 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, nullptr, &SLC)) 4687 return Op; 4688 } else { 4689 if (!parseCachePolicy(Op.getOperand(CtrlIdx + 1), DAG, &GLC, &SLC)) 4690 return Op; 4691 } 4692 4693 SmallVector<SDValue, 14> Ops; 4694 if (BaseOpcode->Store || BaseOpcode->Atomic) 4695 Ops.push_back(VData); // vdata 4696 Ops.push_back(VAddr); 4697 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs)); // rsrc 4698 if (BaseOpcode->Sampler) 4699 Ops.push_back(Op.getOperand(AddrIdx + NumVAddrs + 1)); // sampler 4700 Ops.push_back(DAG.getTargetConstant(DMask, DL, MVT::i32)); 4701 Ops.push_back(Unorm); 4702 Ops.push_back(GLC); 4703 Ops.push_back(SLC); 4704 Ops.push_back(False); // r128 4705 Ops.push_back(False); // tfe 4706 Ops.push_back(False); // lwe 4707 Ops.push_back(DimInfo->DA ? True : False); 4708 if (BaseOpcode->HasD16) 4709 Ops.push_back(IsD16 ? True : False); 4710 if (isa<MemSDNode>(Op)) 4711 Ops.push_back(Op.getOperand(0)); // chain 4712 4713 int NumVAddrDwords = VAddr.getValueType().getSizeInBits() / 32; 4714 int Opcode = -1; 4715 4716 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 4717 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 4718 NumVDataDwords, NumVAddrDwords); 4719 if (Opcode == -1) 4720 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 4721 NumVDataDwords, NumVAddrDwords); 4722 assert(Opcode != -1); 4723 4724 MachineSDNode *NewNode = DAG.getMachineNode(Opcode, DL, ResultTypes, Ops); 4725 if (auto MemOp = dyn_cast<MemSDNode>(Op)) { 4726 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1); 4727 *MemRefs = MemOp->getMemOperand(); 4728 NewNode->setMemRefs(MemRefs, MemRefs + 1); 4729 } 4730 4731 if (BaseOpcode->AtomicX2) { 4732 SmallVector<SDValue, 1> Elt; 4733 DAG.ExtractVectorElements(SDValue(NewNode, 0), Elt, 0, 1); 4734 return DAG.getMergeValues({Elt[0], SDValue(NewNode, 1)}, DL); 4735 } else if (IsD16 && !BaseOpcode->Store) { 4736 MVT LoadVT = Op.getSimpleValueType(); 4737 SDValue Adjusted = adjustLoadValueTypeImpl( 4738 SDValue(NewNode, 0), LoadVT, DL, DAG, Subtarget->hasUnpackedD16VMem()); 4739 return DAG.getMergeValues({Adjusted, SDValue(NewNode, 1)}, DL); 4740 } 4741 4742 return SDValue(NewNode, 0); 4743 } 4744 4745 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4746 SelectionDAG &DAG) const { 4747 MachineFunction &MF = DAG.getMachineFunction(); 4748 auto MFI = MF.getInfo<SIMachineFunctionInfo>(); 4749 4750 EVT VT = Op.getValueType(); 4751 SDLoc DL(Op); 4752 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4753 4754 // TODO: Should this propagate fast-math-flags? 4755 4756 switch (IntrinsicID) { 4757 case Intrinsic::amdgcn_implicit_buffer_ptr: { 4758 if (getSubtarget()->isAmdCodeObjectV2(MF.getFunction())) 4759 return emitNonHSAIntrinsicError(DAG, DL, VT); 4760 return getPreloadedValue(DAG, *MFI, VT, 4761 AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR); 4762 } 4763 case Intrinsic::amdgcn_dispatch_ptr: 4764 case Intrinsic::amdgcn_queue_ptr: { 4765 if (!Subtarget->isAmdCodeObjectV2(MF.getFunction())) { 4766 DiagnosticInfoUnsupported BadIntrin( 4767 MF.getFunction(), "unsupported hsa intrinsic without hsa target", 4768 DL.getDebugLoc()); 4769 DAG.getContext()->diagnose(BadIntrin); 4770 return DAG.getUNDEF(VT); 4771 } 4772 4773 auto RegID = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ? 4774 AMDGPUFunctionArgInfo::DISPATCH_PTR : AMDGPUFunctionArgInfo::QUEUE_PTR; 4775 return getPreloadedValue(DAG, *MFI, VT, RegID); 4776 } 4777 case Intrinsic::amdgcn_implicitarg_ptr: { 4778 if (MFI->isEntryFunction()) 4779 return getImplicitArgPtr(DAG, DL); 4780 return getPreloadedValue(DAG, *MFI, VT, 4781 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 4782 } 4783 case Intrinsic::amdgcn_kernarg_segment_ptr: { 4784 return getPreloadedValue(DAG, *MFI, VT, 4785 AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 4786 } 4787 case Intrinsic::amdgcn_dispatch_id: { 4788 return getPreloadedValue(DAG, *MFI, VT, AMDGPUFunctionArgInfo::DISPATCH_ID); 4789 } 4790 case Intrinsic::amdgcn_rcp: 4791 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 4792 case Intrinsic::amdgcn_rsq: 4793 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 4794 case Intrinsic::amdgcn_rsq_legacy: 4795 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 4796 return emitRemovedIntrinsicError(DAG, DL, VT); 4797 4798 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 4799 case Intrinsic::amdgcn_rcp_legacy: 4800 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 4801 return emitRemovedIntrinsicError(DAG, DL, VT); 4802 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1)); 4803 case Intrinsic::amdgcn_rsq_clamp: { 4804 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 4805 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 4806 4807 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 4808 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 4809 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 4810 4811 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 4812 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 4813 DAG.getConstantFP(Max, DL, VT)); 4814 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 4815 DAG.getConstantFP(Min, DL, VT)); 4816 } 4817 case Intrinsic::r600_read_ngroups_x: 4818 if (Subtarget->isAmdHsaOS()) 4819 return emitNonHSAIntrinsicError(DAG, DL, VT); 4820 4821 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4822 SI::KernelInputOffsets::NGROUPS_X, 4, false); 4823 case Intrinsic::r600_read_ngroups_y: 4824 if (Subtarget->isAmdHsaOS()) 4825 return emitNonHSAIntrinsicError(DAG, DL, VT); 4826 4827 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4828 SI::KernelInputOffsets::NGROUPS_Y, 4, false); 4829 case Intrinsic::r600_read_ngroups_z: 4830 if (Subtarget->isAmdHsaOS()) 4831 return emitNonHSAIntrinsicError(DAG, DL, VT); 4832 4833 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4834 SI::KernelInputOffsets::NGROUPS_Z, 4, false); 4835 case Intrinsic::r600_read_global_size_x: 4836 if (Subtarget->isAmdHsaOS()) 4837 return emitNonHSAIntrinsicError(DAG, DL, VT); 4838 4839 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4840 SI::KernelInputOffsets::GLOBAL_SIZE_X, 4, false); 4841 case Intrinsic::r600_read_global_size_y: 4842 if (Subtarget->isAmdHsaOS()) 4843 return emitNonHSAIntrinsicError(DAG, DL, VT); 4844 4845 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4846 SI::KernelInputOffsets::GLOBAL_SIZE_Y, 4, false); 4847 case Intrinsic::r600_read_global_size_z: 4848 if (Subtarget->isAmdHsaOS()) 4849 return emitNonHSAIntrinsicError(DAG, DL, VT); 4850 4851 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4852 SI::KernelInputOffsets::GLOBAL_SIZE_Z, 4, false); 4853 case Intrinsic::r600_read_local_size_x: 4854 if (Subtarget->isAmdHsaOS()) 4855 return emitNonHSAIntrinsicError(DAG, DL, VT); 4856 4857 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4858 SI::KernelInputOffsets::LOCAL_SIZE_X); 4859 case Intrinsic::r600_read_local_size_y: 4860 if (Subtarget->isAmdHsaOS()) 4861 return emitNonHSAIntrinsicError(DAG, DL, VT); 4862 4863 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4864 SI::KernelInputOffsets::LOCAL_SIZE_Y); 4865 case Intrinsic::r600_read_local_size_z: 4866 if (Subtarget->isAmdHsaOS()) 4867 return emitNonHSAIntrinsicError(DAG, DL, VT); 4868 4869 return lowerImplicitZextParam(DAG, Op, MVT::i16, 4870 SI::KernelInputOffsets::LOCAL_SIZE_Z); 4871 case Intrinsic::amdgcn_workgroup_id_x: 4872 case Intrinsic::r600_read_tgid_x: 4873 return getPreloadedValue(DAG, *MFI, VT, 4874 AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 4875 case Intrinsic::amdgcn_workgroup_id_y: 4876 case Intrinsic::r600_read_tgid_y: 4877 return getPreloadedValue(DAG, *MFI, VT, 4878 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 4879 case Intrinsic::amdgcn_workgroup_id_z: 4880 case Intrinsic::r600_read_tgid_z: 4881 return getPreloadedValue(DAG, *MFI, VT, 4882 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 4883 case Intrinsic::amdgcn_workitem_id_x: 4884 case Intrinsic::r600_read_tidig_x: 4885 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4886 SDLoc(DAG.getEntryNode()), 4887 MFI->getArgInfo().WorkItemIDX); 4888 case Intrinsic::amdgcn_workitem_id_y: 4889 case Intrinsic::r600_read_tidig_y: 4890 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4891 SDLoc(DAG.getEntryNode()), 4892 MFI->getArgInfo().WorkItemIDY); 4893 case Intrinsic::amdgcn_workitem_id_z: 4894 case Intrinsic::r600_read_tidig_z: 4895 return loadInputValue(DAG, &AMDGPU::VGPR_32RegClass, MVT::i32, 4896 SDLoc(DAG.getEntryNode()), 4897 MFI->getArgInfo().WorkItemIDZ); 4898 case AMDGPUIntrinsic::SI_load_const: { 4899 SDValue Ops[] = { 4900 Op.getOperand(1), 4901 Op.getOperand(2) 4902 }; 4903 4904 MachineMemOperand *MMO = MF.getMachineMemOperand( 4905 MachinePointerInfo(), 4906 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 4907 MachineMemOperand::MOInvariant, 4908 VT.getStoreSize(), 4); 4909 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL, 4910 Op->getVTList(), Ops, VT, MMO); 4911 } 4912 case Intrinsic::amdgcn_fdiv_fast: 4913 return lowerFDIV_FAST(Op, DAG); 4914 case Intrinsic::amdgcn_interp_mov: { 4915 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 4916 SDValue Glue = M0.getValue(1); 4917 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1), 4918 Op.getOperand(2), Op.getOperand(3), Glue); 4919 } 4920 case Intrinsic::amdgcn_interp_p1: { 4921 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4)); 4922 SDValue Glue = M0.getValue(1); 4923 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1), 4924 Op.getOperand(2), Op.getOperand(3), Glue); 4925 } 4926 case Intrinsic::amdgcn_interp_p2: { 4927 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5)); 4928 SDValue Glue = SDValue(M0.getNode(), 1); 4929 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1), 4930 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4), 4931 Glue); 4932 } 4933 case Intrinsic::amdgcn_sin: 4934 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1)); 4935 4936 case Intrinsic::amdgcn_cos: 4937 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1)); 4938 4939 case Intrinsic::amdgcn_log_clamp: { 4940 if (Subtarget->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) 4941 return SDValue(); 4942 4943 DiagnosticInfoUnsupported BadIntrin( 4944 MF.getFunction(), "intrinsic not supported on subtarget", 4945 DL.getDebugLoc()); 4946 DAG.getContext()->diagnose(BadIntrin); 4947 return DAG.getUNDEF(VT); 4948 } 4949 case Intrinsic::amdgcn_ldexp: 4950 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, 4951 Op.getOperand(1), Op.getOperand(2)); 4952 4953 case Intrinsic::amdgcn_fract: 4954 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); 4955 4956 case Intrinsic::amdgcn_class: 4957 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 4958 Op.getOperand(1), Op.getOperand(2)); 4959 case Intrinsic::amdgcn_div_fmas: 4960 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 4961 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 4962 Op.getOperand(4)); 4963 4964 case Intrinsic::amdgcn_div_fixup: 4965 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 4966 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4967 4968 case Intrinsic::amdgcn_trig_preop: 4969 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 4970 Op.getOperand(1), Op.getOperand(2)); 4971 case Intrinsic::amdgcn_div_scale: { 4972 // 3rd parameter required to be a constant. 4973 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4974 if (!Param) 4975 return DAG.getMergeValues({ DAG.getUNDEF(VT), DAG.getUNDEF(MVT::i1) }, DL); 4976 4977 // Translate to the operands expected by the machine instruction. The 4978 // first parameter must be the same as the first instruction. 4979 SDValue Numerator = Op.getOperand(1); 4980 SDValue Denominator = Op.getOperand(2); 4981 4982 // Note this order is opposite of the machine instruction's operations, 4983 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 4984 // intrinsic has the numerator as the first operand to match a normal 4985 // division operation. 4986 4987 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 4988 4989 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 4990 Denominator, Numerator); 4991 } 4992 case Intrinsic::amdgcn_icmp: { 4993 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 4994 if (!CD) 4995 return DAG.getUNDEF(VT); 4996 4997 int CondCode = CD->getSExtValue(); 4998 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE || 4999 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE) 5000 return DAG.getUNDEF(VT); 5001 5002 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode); 5003 ISD::CondCode CCOpcode = getICmpCondCode(IcInput); 5004 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 5005 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 5006 } 5007 case Intrinsic::amdgcn_fcmp: { 5008 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 5009 if (!CD) 5010 return DAG.getUNDEF(VT); 5011 5012 int CondCode = CD->getSExtValue(); 5013 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE || 5014 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE) 5015 return DAG.getUNDEF(VT); 5016 5017 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode); 5018 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput); 5019 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1), 5020 Op.getOperand(2), DAG.getCondCode(CCOpcode)); 5021 } 5022 case Intrinsic::amdgcn_fmed3: 5023 return DAG.getNode(AMDGPUISD::FMED3, DL, VT, 5024 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5025 case Intrinsic::amdgcn_fdot2: 5026 return DAG.getNode(AMDGPUISD::FDOT2, DL, VT, 5027 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 5028 Op.getOperand(4)); 5029 case Intrinsic::amdgcn_fmul_legacy: 5030 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT, 5031 Op.getOperand(1), Op.getOperand(2)); 5032 case Intrinsic::amdgcn_sffbh: 5033 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1)); 5034 case Intrinsic::amdgcn_sbfe: 5035 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 5036 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5037 case Intrinsic::amdgcn_ubfe: 5038 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 5039 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 5040 case Intrinsic::amdgcn_cvt_pkrtz: 5041 case Intrinsic::amdgcn_cvt_pknorm_i16: 5042 case Intrinsic::amdgcn_cvt_pknorm_u16: 5043 case Intrinsic::amdgcn_cvt_pk_i16: 5044 case Intrinsic::amdgcn_cvt_pk_u16: { 5045 // FIXME: Stop adding cast if v2f16/v2i16 are legal. 5046 EVT VT = Op.getValueType(); 5047 unsigned Opcode; 5048 5049 if (IntrinsicID == Intrinsic::amdgcn_cvt_pkrtz) 5050 Opcode = AMDGPUISD::CVT_PKRTZ_F16_F32; 5051 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_i16) 5052 Opcode = AMDGPUISD::CVT_PKNORM_I16_F32; 5053 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pknorm_u16) 5054 Opcode = AMDGPUISD::CVT_PKNORM_U16_F32; 5055 else if (IntrinsicID == Intrinsic::amdgcn_cvt_pk_i16) 5056 Opcode = AMDGPUISD::CVT_PK_I16_I32; 5057 else 5058 Opcode = AMDGPUISD::CVT_PK_U16_U32; 5059 5060 SDValue Node = DAG.getNode(Opcode, DL, MVT::i32, 5061 Op.getOperand(1), Op.getOperand(2)); 5062 return DAG.getNode(ISD::BITCAST, DL, VT, Node); 5063 } 5064 case Intrinsic::amdgcn_wqm: { 5065 SDValue Src = Op.getOperand(1); 5066 return SDValue(DAG.getMachineNode(AMDGPU::WQM, DL, Src.getValueType(), Src), 5067 0); 5068 } 5069 case Intrinsic::amdgcn_wwm: { 5070 SDValue Src = Op.getOperand(1); 5071 return SDValue(DAG.getMachineNode(AMDGPU::WWM, DL, Src.getValueType(), Src), 5072 0); 5073 } 5074 case Intrinsic::amdgcn_fmad_ftz: 5075 return DAG.getNode(AMDGPUISD::FMAD_FTZ, DL, VT, Op.getOperand(1), 5076 Op.getOperand(2), Op.getOperand(3)); 5077 default: 5078 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 5079 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 5080 return lowerImage(Op, ImageDimIntr, DAG); 5081 5082 return Op; 5083 } 5084 } 5085 5086 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 5087 SelectionDAG &DAG) const { 5088 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5089 SDLoc DL(Op); 5090 5091 switch (IntrID) { 5092 case Intrinsic::amdgcn_atomic_inc: 5093 case Intrinsic::amdgcn_atomic_dec: 5094 case Intrinsic::amdgcn_ds_fadd: 5095 case Intrinsic::amdgcn_ds_fmin: 5096 case Intrinsic::amdgcn_ds_fmax: { 5097 MemSDNode *M = cast<MemSDNode>(Op); 5098 unsigned Opc; 5099 switch (IntrID) { 5100 case Intrinsic::amdgcn_atomic_inc: 5101 Opc = AMDGPUISD::ATOMIC_INC; 5102 break; 5103 case Intrinsic::amdgcn_atomic_dec: 5104 Opc = AMDGPUISD::ATOMIC_DEC; 5105 break; 5106 case Intrinsic::amdgcn_ds_fadd: 5107 Opc = AMDGPUISD::ATOMIC_LOAD_FADD; 5108 break; 5109 case Intrinsic::amdgcn_ds_fmin: 5110 Opc = AMDGPUISD::ATOMIC_LOAD_FMIN; 5111 break; 5112 case Intrinsic::amdgcn_ds_fmax: 5113 Opc = AMDGPUISD::ATOMIC_LOAD_FMAX; 5114 break; 5115 default: 5116 llvm_unreachable("Unknown intrinsic!"); 5117 } 5118 SDValue Ops[] = { 5119 M->getOperand(0), // Chain 5120 M->getOperand(2), // Ptr 5121 M->getOperand(3) // Value 5122 }; 5123 5124 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops, 5125 M->getMemoryVT(), M->getMemOperand()); 5126 } 5127 case Intrinsic::amdgcn_buffer_load: 5128 case Intrinsic::amdgcn_buffer_load_format: { 5129 SDValue Ops[] = { 5130 Op.getOperand(0), // Chain 5131 Op.getOperand(2), // rsrc 5132 Op.getOperand(3), // vindex 5133 Op.getOperand(4), // offset 5134 Op.getOperand(5), // glc 5135 Op.getOperand(6) // slc 5136 }; 5137 5138 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? 5139 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; 5140 EVT VT = Op.getValueType(); 5141 EVT IntVT = VT.changeTypeToInteger(); 5142 auto *M = cast<MemSDNode>(Op); 5143 EVT LoadVT = Op.getValueType(); 5144 bool IsD16 = LoadVT.getScalarType() == MVT::f16; 5145 if (IsD16) 5146 return adjustLoadValueType(AMDGPUISD::BUFFER_LOAD_FORMAT_D16, M, DAG); 5147 5148 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, 5149 M->getMemOperand()); 5150 } 5151 case Intrinsic::amdgcn_tbuffer_load: { 5152 MemSDNode *M = cast<MemSDNode>(Op); 5153 EVT LoadVT = Op.getValueType(); 5154 bool IsD16 = LoadVT.getScalarType() == MVT::f16; 5155 if (IsD16) { 5156 return adjustLoadValueType(AMDGPUISD::TBUFFER_LOAD_FORMAT_D16, M, DAG); 5157 } 5158 5159 SDValue Ops[] = { 5160 Op.getOperand(0), // Chain 5161 Op.getOperand(2), // rsrc 5162 Op.getOperand(3), // vindex 5163 Op.getOperand(4), // voffset 5164 Op.getOperand(5), // soffset 5165 Op.getOperand(6), // offset 5166 Op.getOperand(7), // dfmt 5167 Op.getOperand(8), // nfmt 5168 Op.getOperand(9), // glc 5169 Op.getOperand(10) // slc 5170 }; 5171 5172 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL, 5173 Op->getVTList(), Ops, LoadVT, 5174 M->getMemOperand()); 5175 } 5176 case Intrinsic::amdgcn_buffer_atomic_swap: 5177 case Intrinsic::amdgcn_buffer_atomic_add: 5178 case Intrinsic::amdgcn_buffer_atomic_sub: 5179 case Intrinsic::amdgcn_buffer_atomic_smin: 5180 case Intrinsic::amdgcn_buffer_atomic_umin: 5181 case Intrinsic::amdgcn_buffer_atomic_smax: 5182 case Intrinsic::amdgcn_buffer_atomic_umax: 5183 case Intrinsic::amdgcn_buffer_atomic_and: 5184 case Intrinsic::amdgcn_buffer_atomic_or: 5185 case Intrinsic::amdgcn_buffer_atomic_xor: { 5186 SDValue Ops[] = { 5187 Op.getOperand(0), // Chain 5188 Op.getOperand(2), // vdata 5189 Op.getOperand(3), // rsrc 5190 Op.getOperand(4), // vindex 5191 Op.getOperand(5), // offset 5192 Op.getOperand(6) // slc 5193 }; 5194 EVT VT = Op.getValueType(); 5195 5196 auto *M = cast<MemSDNode>(Op); 5197 unsigned Opcode = 0; 5198 5199 switch (IntrID) { 5200 case Intrinsic::amdgcn_buffer_atomic_swap: 5201 Opcode = AMDGPUISD::BUFFER_ATOMIC_SWAP; 5202 break; 5203 case Intrinsic::amdgcn_buffer_atomic_add: 5204 Opcode = AMDGPUISD::BUFFER_ATOMIC_ADD; 5205 break; 5206 case Intrinsic::amdgcn_buffer_atomic_sub: 5207 Opcode = AMDGPUISD::BUFFER_ATOMIC_SUB; 5208 break; 5209 case Intrinsic::amdgcn_buffer_atomic_smin: 5210 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMIN; 5211 break; 5212 case Intrinsic::amdgcn_buffer_atomic_umin: 5213 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMIN; 5214 break; 5215 case Intrinsic::amdgcn_buffer_atomic_smax: 5216 Opcode = AMDGPUISD::BUFFER_ATOMIC_SMAX; 5217 break; 5218 case Intrinsic::amdgcn_buffer_atomic_umax: 5219 Opcode = AMDGPUISD::BUFFER_ATOMIC_UMAX; 5220 break; 5221 case Intrinsic::amdgcn_buffer_atomic_and: 5222 Opcode = AMDGPUISD::BUFFER_ATOMIC_AND; 5223 break; 5224 case Intrinsic::amdgcn_buffer_atomic_or: 5225 Opcode = AMDGPUISD::BUFFER_ATOMIC_OR; 5226 break; 5227 case Intrinsic::amdgcn_buffer_atomic_xor: 5228 Opcode = AMDGPUISD::BUFFER_ATOMIC_XOR; 5229 break; 5230 default: 5231 llvm_unreachable("unhandled atomic opcode"); 5232 } 5233 5234 return DAG.getMemIntrinsicNode(Opcode, DL, Op->getVTList(), Ops, VT, 5235 M->getMemOperand()); 5236 } 5237 5238 case Intrinsic::amdgcn_buffer_atomic_cmpswap: { 5239 SDValue Ops[] = { 5240 Op.getOperand(0), // Chain 5241 Op.getOperand(2), // src 5242 Op.getOperand(3), // cmp 5243 Op.getOperand(4), // rsrc 5244 Op.getOperand(5), // vindex 5245 Op.getOperand(6), // offset 5246 Op.getOperand(7) // slc 5247 }; 5248 EVT VT = Op.getValueType(); 5249 auto *M = cast<MemSDNode>(Op); 5250 5251 return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, 5252 Op->getVTList(), Ops, VT, M->getMemOperand()); 5253 } 5254 5255 default: 5256 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 5257 AMDGPU::getImageDimIntrinsicInfo(IntrID)) 5258 return lowerImage(Op, ImageDimIntr, DAG); 5259 5260 return SDValue(); 5261 } 5262 } 5263 5264 SDValue SITargetLowering::handleD16VData(SDValue VData, 5265 SelectionDAG &DAG) const { 5266 EVT StoreVT = VData.getValueType(); 5267 5268 // No change for f16 and legal vector D16 types. 5269 if (!StoreVT.isVector()) 5270 return VData; 5271 5272 SDLoc DL(VData); 5273 assert((StoreVT.getVectorNumElements() != 3) && "Handle v3f16"); 5274 5275 if (Subtarget->hasUnpackedD16VMem()) { 5276 // We need to unpack the packed data to store. 5277 EVT IntStoreVT = StoreVT.changeTypeToInteger(); 5278 SDValue IntVData = DAG.getNode(ISD::BITCAST, DL, IntStoreVT, VData); 5279 5280 EVT EquivStoreVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, 5281 StoreVT.getVectorNumElements()); 5282 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, EquivStoreVT, IntVData); 5283 return DAG.UnrollVectorOp(ZExt.getNode()); 5284 } 5285 5286 assert(isTypeLegal(StoreVT)); 5287 return VData; 5288 } 5289 5290 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, 5291 SelectionDAG &DAG) const { 5292 SDLoc DL(Op); 5293 SDValue Chain = Op.getOperand(0); 5294 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 5295 MachineFunction &MF = DAG.getMachineFunction(); 5296 5297 switch (IntrinsicID) { 5298 case Intrinsic::amdgcn_exp: { 5299 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 5300 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 5301 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8)); 5302 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9)); 5303 5304 const SDValue Ops[] = { 5305 Chain, 5306 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 5307 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 5308 Op.getOperand(4), // src0 5309 Op.getOperand(5), // src1 5310 Op.getOperand(6), // src2 5311 Op.getOperand(7), // src3 5312 DAG.getTargetConstant(0, DL, MVT::i1), // compr 5313 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 5314 }; 5315 5316 unsigned Opc = Done->isNullValue() ? 5317 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 5318 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 5319 } 5320 case Intrinsic::amdgcn_exp_compr: { 5321 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2)); 5322 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3)); 5323 SDValue Src0 = Op.getOperand(4); 5324 SDValue Src1 = Op.getOperand(5); 5325 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6)); 5326 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7)); 5327 5328 SDValue Undef = DAG.getUNDEF(MVT::f32); 5329 const SDValue Ops[] = { 5330 Chain, 5331 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt 5332 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en 5333 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0), 5334 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1), 5335 Undef, // src2 5336 Undef, // src3 5337 DAG.getTargetConstant(1, DL, MVT::i1), // compr 5338 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1) 5339 }; 5340 5341 unsigned Opc = Done->isNullValue() ? 5342 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE; 5343 return DAG.getNode(Opc, DL, Op->getVTList(), Ops); 5344 } 5345 case Intrinsic::amdgcn_s_sendmsg: 5346 case Intrinsic::amdgcn_s_sendmsghalt: { 5347 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ? 5348 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT; 5349 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3)); 5350 SDValue Glue = Chain.getValue(1); 5351 return DAG.getNode(NodeOp, DL, MVT::Other, Chain, 5352 Op.getOperand(2), Glue); 5353 } 5354 case Intrinsic::amdgcn_init_exec: { 5355 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain, 5356 Op.getOperand(2)); 5357 } 5358 case Intrinsic::amdgcn_init_exec_from_input: { 5359 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain, 5360 Op.getOperand(2), Op.getOperand(3)); 5361 } 5362 case AMDGPUIntrinsic::AMDGPU_kill: { 5363 SDValue Src = Op.getOperand(2); 5364 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) { 5365 if (!K->isNegative()) 5366 return Chain; 5367 5368 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32); 5369 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne); 5370 } 5371 5372 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src); 5373 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); 5374 } 5375 case Intrinsic::amdgcn_s_barrier: { 5376 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) { 5377 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5378 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second; 5379 if (WGSize <= ST.getWavefrontSize()) 5380 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other, 5381 Op.getOperand(0)), 0); 5382 } 5383 return SDValue(); 5384 }; 5385 case AMDGPUIntrinsic::SI_tbuffer_store: { 5386 5387 // Extract vindex and voffset from vaddr as appropriate 5388 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10)); 5389 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11)); 5390 SDValue VAddr = Op.getOperand(5); 5391 5392 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32); 5393 5394 assert(!(OffEn->isOne() && IdxEn->isOne()) && 5395 "Legacy intrinsic doesn't support both offset and index - use new version"); 5396 5397 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero; 5398 SDValue VOffset = OffEn->isOne() ? VAddr : Zero; 5399 5400 // Deal with the vec-3 case 5401 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4)); 5402 auto Opcode = NumChannels->getZExtValue() == 3 ? 5403 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT; 5404 5405 SDValue Ops[] = { 5406 Chain, 5407 Op.getOperand(3), // vdata 5408 Op.getOperand(2), // rsrc 5409 VIndex, 5410 VOffset, 5411 Op.getOperand(6), // soffset 5412 Op.getOperand(7), // inst_offset 5413 Op.getOperand(8), // dfmt 5414 Op.getOperand(9), // nfmt 5415 Op.getOperand(12), // glc 5416 Op.getOperand(13), // slc 5417 }; 5418 5419 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 && 5420 "Value of tfe other than zero is unsupported"); 5421 5422 EVT VT = Op.getOperand(3).getValueType(); 5423 MachineMemOperand *MMO = MF.getMachineMemOperand( 5424 MachinePointerInfo(), 5425 MachineMemOperand::MOStore, 5426 VT.getStoreSize(), 4); 5427 return DAG.getMemIntrinsicNode(Opcode, DL, 5428 Op->getVTList(), Ops, VT, MMO); 5429 } 5430 5431 case Intrinsic::amdgcn_tbuffer_store: { 5432 SDValue VData = Op.getOperand(2); 5433 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 5434 if (IsD16) 5435 VData = handleD16VData(VData, DAG); 5436 SDValue Ops[] = { 5437 Chain, 5438 VData, // vdata 5439 Op.getOperand(3), // rsrc 5440 Op.getOperand(4), // vindex 5441 Op.getOperand(5), // voffset 5442 Op.getOperand(6), // soffset 5443 Op.getOperand(7), // offset 5444 Op.getOperand(8), // dfmt 5445 Op.getOperand(9), // nfmt 5446 Op.getOperand(10), // glc 5447 Op.getOperand(11) // slc 5448 }; 5449 unsigned Opc = IsD16 ? AMDGPUISD::TBUFFER_STORE_FORMAT_D16 : 5450 AMDGPUISD::TBUFFER_STORE_FORMAT; 5451 MemSDNode *M = cast<MemSDNode>(Op); 5452 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 5453 M->getMemoryVT(), M->getMemOperand()); 5454 } 5455 5456 case Intrinsic::amdgcn_buffer_store: 5457 case Intrinsic::amdgcn_buffer_store_format: { 5458 SDValue VData = Op.getOperand(2); 5459 bool IsD16 = (VData.getValueType().getScalarType() == MVT::f16); 5460 if (IsD16) 5461 VData = handleD16VData(VData, DAG); 5462 SDValue Ops[] = { 5463 Chain, 5464 VData, // vdata 5465 Op.getOperand(3), // rsrc 5466 Op.getOperand(4), // vindex 5467 Op.getOperand(5), // offset 5468 Op.getOperand(6), // glc 5469 Op.getOperand(7) // slc 5470 }; 5471 unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? 5472 AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; 5473 Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; 5474 MemSDNode *M = cast<MemSDNode>(Op); 5475 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, 5476 M->getMemoryVT(), M->getMemOperand()); 5477 } 5478 default: { 5479 if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = 5480 AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) 5481 return lowerImage(Op, ImageDimIntr, DAG); 5482 5483 return Op; 5484 } 5485 } 5486 } 5487 5488 static SDValue getLoadExtOrTrunc(SelectionDAG &DAG, 5489 ISD::LoadExtType ExtType, SDValue Op, 5490 const SDLoc &SL, EVT VT) { 5491 if (VT.bitsLT(Op.getValueType())) 5492 return DAG.getNode(ISD::TRUNCATE, SL, VT, Op); 5493 5494 switch (ExtType) { 5495 case ISD::SEXTLOAD: 5496 return DAG.getNode(ISD::SIGN_EXTEND, SL, VT, Op); 5497 case ISD::ZEXTLOAD: 5498 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, Op); 5499 case ISD::EXTLOAD: 5500 return DAG.getNode(ISD::ANY_EXTEND, SL, VT, Op); 5501 case ISD::NON_EXTLOAD: 5502 return Op; 5503 } 5504 5505 llvm_unreachable("invalid ext type"); 5506 } 5507 5508 SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { 5509 SelectionDAG &DAG = DCI.DAG; 5510 if (Ld->getAlignment() < 4 || Ld->isDivergent()) 5511 return SDValue(); 5512 5513 // FIXME: Constant loads should all be marked invariant. 5514 unsigned AS = Ld->getAddressSpace(); 5515 if (AS != AMDGPUASI.CONSTANT_ADDRESS && 5516 AS != AMDGPUASI.CONSTANT_ADDRESS_32BIT && 5517 (AS != AMDGPUAS::GLOBAL_ADDRESS || !Ld->isInvariant())) 5518 return SDValue(); 5519 5520 // Don't do this early, since it may interfere with adjacent load merging for 5521 // illegal types. We can avoid losing alignment information for exotic types 5522 // pre-legalize. 5523 EVT MemVT = Ld->getMemoryVT(); 5524 if ((MemVT.isSimple() && !DCI.isAfterLegalizeDAG()) || 5525 MemVT.getSizeInBits() >= 32) 5526 return SDValue(); 5527 5528 SDLoc SL(Ld); 5529 5530 assert((!MemVT.isVector() || Ld->getExtensionType() == ISD::NON_EXTLOAD) && 5531 "unexpected vector extload"); 5532 5533 // TODO: Drop only high part of range. 5534 SDValue Ptr = Ld->getBasePtr(); 5535 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, 5536 MVT::i32, SL, Ld->getChain(), Ptr, 5537 Ld->getOffset(), 5538 Ld->getPointerInfo(), MVT::i32, 5539 Ld->getAlignment(), 5540 Ld->getMemOperand()->getFlags(), 5541 Ld->getAAInfo(), 5542 nullptr); // Drop ranges 5543 5544 EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()); 5545 if (MemVT.isFloatingPoint()) { 5546 assert(Ld->getExtensionType() == ISD::NON_EXTLOAD && 5547 "unexpected fp extload"); 5548 TruncVT = MemVT.changeTypeToInteger(); 5549 } 5550 5551 SDValue Cvt = NewLoad; 5552 if (Ld->getExtensionType() == ISD::SEXTLOAD) { 5553 Cvt = DAG.getNode(ISD::SIGN_EXTEND_INREG, SL, MVT::i32, NewLoad, 5554 DAG.getValueType(TruncVT)); 5555 } else if (Ld->getExtensionType() == ISD::ZEXTLOAD || 5556 Ld->getExtensionType() == ISD::NON_EXTLOAD) { 5557 Cvt = DAG.getZeroExtendInReg(NewLoad, SL, TruncVT); 5558 } else { 5559 assert(Ld->getExtensionType() == ISD::EXTLOAD); 5560 } 5561 5562 EVT VT = Ld->getValueType(0); 5563 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 5564 5565 DCI.AddToWorklist(Cvt.getNode()); 5566 5567 // We may need to handle exotic cases, such as i16->i64 extloads, so insert 5568 // the appropriate extension from the 32-bit load. 5569 Cvt = getLoadExtOrTrunc(DAG, Ld->getExtensionType(), Cvt, SL, IntVT); 5570 DCI.AddToWorklist(Cvt.getNode()); 5571 5572 // Handle conversion back to floating point if necessary. 5573 Cvt = DAG.getNode(ISD::BITCAST, SL, VT, Cvt); 5574 5575 return DAG.getMergeValues({ Cvt, NewLoad.getValue(1) }, SL); 5576 } 5577 5578 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5579 SDLoc DL(Op); 5580 LoadSDNode *Load = cast<LoadSDNode>(Op); 5581 ISD::LoadExtType ExtType = Load->getExtensionType(); 5582 EVT MemVT = Load->getMemoryVT(); 5583 5584 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { 5585 if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) 5586 return SDValue(); 5587 5588 // FIXME: Copied from PPC 5589 // First, load into 32 bits, then truncate to 1 bit. 5590 5591 SDValue Chain = Load->getChain(); 5592 SDValue BasePtr = Load->getBasePtr(); 5593 MachineMemOperand *MMO = Load->getMemOperand(); 5594 5595 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; 5596 5597 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 5598 BasePtr, RealMemVT, MMO); 5599 5600 SDValue Ops[] = { 5601 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD), 5602 NewLD.getValue(1) 5603 }; 5604 5605 return DAG.getMergeValues(Ops, DL); 5606 } 5607 5608 if (!MemVT.isVector()) 5609 return SDValue(); 5610 5611 assert(Op.getValueType().getVectorElementType() == MVT::i32 && 5612 "Custom lowering for non-i32 vectors hasn't been implemented."); 5613 5614 unsigned Alignment = Load->getAlignment(); 5615 unsigned AS = Load->getAddressSpace(); 5616 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, 5617 AS, Alignment)) { 5618 SDValue Ops[2]; 5619 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); 5620 return DAG.getMergeValues(Ops, DL); 5621 } 5622 5623 MachineFunction &MF = DAG.getMachineFunction(); 5624 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 5625 // If there is a possibilty that flat instruction access scratch memory 5626 // then we need to use the same legalization rules we use for private. 5627 if (AS == AMDGPUASI.FLAT_ADDRESS) 5628 AS = MFI->hasFlatScratchInit() ? 5629 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 5630 5631 unsigned NumElements = MemVT.getVectorNumElements(); 5632 5633 if (AS == AMDGPUASI.CONSTANT_ADDRESS || 5634 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) { 5635 if (!Op->isDivergent() && Alignment >= 4) 5636 return SDValue(); 5637 // Non-uniform loads will be selected to MUBUF instructions, so they 5638 // have the same legalization requirements as global and private 5639 // loads. 5640 // 5641 } 5642 5643 if (AS == AMDGPUASI.CONSTANT_ADDRESS || 5644 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT || 5645 AS == AMDGPUASI.GLOBAL_ADDRESS) { 5646 if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && 5647 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load) && 5648 Alignment >= 4) 5649 return SDValue(); 5650 // Non-uniform loads will be selected to MUBUF instructions, so they 5651 // have the same legalization requirements as global and private 5652 // loads. 5653 // 5654 } 5655 if (AS == AMDGPUASI.CONSTANT_ADDRESS || 5656 AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT || 5657 AS == AMDGPUASI.GLOBAL_ADDRESS || 5658 AS == AMDGPUASI.FLAT_ADDRESS) { 5659 if (NumElements > 4) 5660 return SplitVectorLoad(Op, DAG); 5661 // v4 loads are supported for private and global memory. 5662 return SDValue(); 5663 } 5664 if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 5665 // Depending on the setting of the private_element_size field in the 5666 // resource descriptor, we can only make private accesses up to a certain 5667 // size. 5668 switch (Subtarget->getMaxPrivateElementSize()) { 5669 case 4: 5670 return scalarizeVectorLoad(Load, DAG); 5671 case 8: 5672 if (NumElements > 2) 5673 return SplitVectorLoad(Op, DAG); 5674 return SDValue(); 5675 case 16: 5676 // Same as global/flat 5677 if (NumElements > 4) 5678 return SplitVectorLoad(Op, DAG); 5679 return SDValue(); 5680 default: 5681 llvm_unreachable("unsupported private_element_size"); 5682 } 5683 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 5684 // Use ds_read_b128 if possible. 5685 if (Subtarget->useDS128() && Load->getAlignment() >= 16 && 5686 MemVT.getStoreSize() == 16) 5687 return SDValue(); 5688 5689 if (NumElements > 2) 5690 return SplitVectorLoad(Op, DAG); 5691 } 5692 return SDValue(); 5693 } 5694 5695 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 5696 EVT VT = Op.getValueType(); 5697 assert(VT.getSizeInBits() == 64); 5698 5699 SDLoc DL(Op); 5700 SDValue Cond = Op.getOperand(0); 5701 5702 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 5703 SDValue One = DAG.getConstant(1, DL, MVT::i32); 5704 5705 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1)); 5706 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2)); 5707 5708 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero); 5709 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero); 5710 5711 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1); 5712 5713 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One); 5714 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One); 5715 5716 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1); 5717 5718 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi}); 5719 return DAG.getNode(ISD::BITCAST, DL, VT, Res); 5720 } 5721 5722 // Catch division cases where we can use shortcuts with rcp and rsq 5723 // instructions. 5724 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op, 5725 SelectionDAG &DAG) const { 5726 SDLoc SL(Op); 5727 SDValue LHS = Op.getOperand(0); 5728 SDValue RHS = Op.getOperand(1); 5729 EVT VT = Op.getValueType(); 5730 const SDNodeFlags Flags = Op->getFlags(); 5731 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath || Flags.hasAllowReciprocal(); 5732 5733 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals()) 5734 return SDValue(); 5735 5736 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) { 5737 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) { 5738 if (CLHS->isExactlyValue(1.0)) { 5739 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to 5740 // the CI documentation has a worst case error of 1 ulp. 5741 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to 5742 // use it as long as we aren't trying to use denormals. 5743 // 5744 // v_rcp_f16 and v_rsq_f16 DO support denormals. 5745 5746 // 1.0 / sqrt(x) -> rsq(x) 5747 5748 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP 5749 // error seems really high at 2^29 ULP. 5750 if (RHS.getOpcode() == ISD::FSQRT) 5751 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0)); 5752 5753 // 1.0 / x -> rcp(x) 5754 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 5755 } 5756 5757 // Same as for 1.0, but expand the sign out of the constant. 5758 if (CLHS->isExactlyValue(-1.0)) { 5759 // -1.0 / x -> rcp (fneg x) 5760 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 5761 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS); 5762 } 5763 } 5764 } 5765 5766 if (Unsafe) { 5767 // Turn into multiply by the reciprocal. 5768 // x / y -> x * (1.0 / y) 5769 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS); 5770 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags); 5771 } 5772 5773 return SDValue(); 5774 } 5775 5776 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 5777 EVT VT, SDValue A, SDValue B, SDValue GlueChain) { 5778 if (GlueChain->getNumValues() <= 1) { 5779 return DAG.getNode(Opcode, SL, VT, A, B); 5780 } 5781 5782 assert(GlueChain->getNumValues() == 3); 5783 5784 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 5785 switch (Opcode) { 5786 default: llvm_unreachable("no chain equivalent for opcode"); 5787 case ISD::FMUL: 5788 Opcode = AMDGPUISD::FMUL_W_CHAIN; 5789 break; 5790 } 5791 5792 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, 5793 GlueChain.getValue(2)); 5794 } 5795 5796 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL, 5797 EVT VT, SDValue A, SDValue B, SDValue C, 5798 SDValue GlueChain) { 5799 if (GlueChain->getNumValues() <= 1) { 5800 return DAG.getNode(Opcode, SL, VT, A, B, C); 5801 } 5802 5803 assert(GlueChain->getNumValues() == 3); 5804 5805 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue); 5806 switch (Opcode) { 5807 default: llvm_unreachable("no chain equivalent for opcode"); 5808 case ISD::FMA: 5809 Opcode = AMDGPUISD::FMA_W_CHAIN; 5810 break; 5811 } 5812 5813 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C, 5814 GlueChain.getValue(2)); 5815 } 5816 5817 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { 5818 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 5819 return FastLowered; 5820 5821 SDLoc SL(Op); 5822 SDValue Src0 = Op.getOperand(0); 5823 SDValue Src1 = Op.getOperand(1); 5824 5825 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0); 5826 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1); 5827 5828 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1); 5829 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1); 5830 5831 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32); 5832 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag); 5833 5834 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0); 5835 } 5836 5837 // Faster 2.5 ULP division that does not support denormals. 5838 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { 5839 SDLoc SL(Op); 5840 SDValue LHS = Op.getOperand(1); 5841 SDValue RHS = Op.getOperand(2); 5842 5843 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS); 5844 5845 const APFloat K0Val(BitsToFloat(0x6f800000)); 5846 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32); 5847 5848 const APFloat K1Val(BitsToFloat(0x2f800000)); 5849 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32); 5850 5851 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 5852 5853 EVT SetCCVT = 5854 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 5855 5856 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT); 5857 5858 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One); 5859 5860 // TODO: Should this propagate fast-math-flags? 5861 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3); 5862 5863 // rcp does not support denormals. 5864 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1); 5865 5866 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0); 5867 5868 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul); 5869 } 5870 5871 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { 5872 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG)) 5873 return FastLowered; 5874 5875 SDLoc SL(Op); 5876 SDValue LHS = Op.getOperand(0); 5877 SDValue RHS = Op.getOperand(1); 5878 5879 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 5880 5881 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1); 5882 5883 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 5884 RHS, RHS, LHS); 5885 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, 5886 LHS, RHS, LHS); 5887 5888 // Denominator is scaled to not be denormal, so using rcp is ok. 5889 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, 5890 DenominatorScaled); 5891 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, 5892 DenominatorScaled); 5893 5894 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE | 5895 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) | 5896 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_); 5897 5898 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16); 5899 5900 if (!Subtarget->hasFP32Denormals()) { 5901 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); 5902 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE, 5903 SL, MVT::i32); 5904 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs, 5905 DAG.getEntryNode(), 5906 EnableDenormValue, BitField); 5907 SDValue Ops[3] = { 5908 NegDivScale0, 5909 EnableDenorm.getValue(0), 5910 EnableDenorm.getValue(1) 5911 }; 5912 5913 NegDivScale0 = DAG.getMergeValues(Ops, SL); 5914 } 5915 5916 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, 5917 ApproxRcp, One, NegDivScale0); 5918 5919 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, 5920 ApproxRcp, Fma0); 5921 5922 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled, 5923 Fma1, Fma1); 5924 5925 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, 5926 NumeratorScaled, Mul); 5927 5928 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2); 5929 5930 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, 5931 NumeratorScaled, Fma3); 5932 5933 if (!Subtarget->hasFP32Denormals()) { 5934 const SDValue DisableDenormValue = 5935 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32); 5936 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other, 5937 Fma4.getValue(1), 5938 DisableDenormValue, 5939 BitField, 5940 Fma4.getValue(2)); 5941 5942 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 5943 DisableDenorm, DAG.getRoot()); 5944 DAG.setRoot(OutputChain); 5945 } 5946 5947 SDValue Scale = NumeratorScaled.getValue(1); 5948 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, 5949 Fma4, Fma1, Fma3, Scale); 5950 5951 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS); 5952 } 5953 5954 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { 5955 if (DAG.getTarget().Options.UnsafeFPMath) 5956 return lowerFastUnsafeFDIV(Op, DAG); 5957 5958 SDLoc SL(Op); 5959 SDValue X = Op.getOperand(0); 5960 SDValue Y = Op.getOperand(1); 5961 5962 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 5963 5964 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1); 5965 5966 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X); 5967 5968 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0); 5969 5970 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0); 5971 5972 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One); 5973 5974 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp); 5975 5976 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One); 5977 5978 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X); 5979 5980 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1); 5981 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3); 5982 5983 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64, 5984 NegDivScale0, Mul, DivScale1); 5985 5986 SDValue Scale; 5987 5988 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS) { 5989 // Workaround a hardware bug on SI where the condition output from div_scale 5990 // is not usable. 5991 5992 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32); 5993 5994 // Figure out if the scale to use for div_fmas. 5995 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 5996 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y); 5997 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0); 5998 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1); 5999 6000 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi); 6001 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi); 6002 6003 SDValue Scale0Hi 6004 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi); 6005 SDValue Scale1Hi 6006 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi); 6007 6008 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ); 6009 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ); 6010 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen); 6011 } else { 6012 Scale = DivScale1.getValue(1); 6013 } 6014 6015 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64, 6016 Fma4, Fma3, Mul, Scale); 6017 6018 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X); 6019 } 6020 6021 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { 6022 EVT VT = Op.getValueType(); 6023 6024 if (VT == MVT::f32) 6025 return LowerFDIV32(Op, DAG); 6026 6027 if (VT == MVT::f64) 6028 return LowerFDIV64(Op, DAG); 6029 6030 if (VT == MVT::f16) 6031 return LowerFDIV16(Op, DAG); 6032 6033 llvm_unreachable("Unexpected type for fdiv"); 6034 } 6035 6036 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 6037 SDLoc DL(Op); 6038 StoreSDNode *Store = cast<StoreSDNode>(Op); 6039 EVT VT = Store->getMemoryVT(); 6040 6041 if (VT == MVT::i1) { 6042 return DAG.getTruncStore(Store->getChain(), DL, 6043 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32), 6044 Store->getBasePtr(), MVT::i1, Store->getMemOperand()); 6045 } 6046 6047 assert(VT.isVector() && 6048 Store->getValue().getValueType().getScalarType() == MVT::i32); 6049 6050 unsigned AS = Store->getAddressSpace(); 6051 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 6052 AS, Store->getAlignment())) { 6053 return expandUnalignedStore(Store, DAG); 6054 } 6055 6056 MachineFunction &MF = DAG.getMachineFunction(); 6057 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 6058 // If there is a possibilty that flat instruction access scratch memory 6059 // then we need to use the same legalization rules we use for private. 6060 if (AS == AMDGPUASI.FLAT_ADDRESS) 6061 AS = MFI->hasFlatScratchInit() ? 6062 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS; 6063 6064 unsigned NumElements = VT.getVectorNumElements(); 6065 if (AS == AMDGPUASI.GLOBAL_ADDRESS || 6066 AS == AMDGPUASI.FLAT_ADDRESS) { 6067 if (NumElements > 4) 6068 return SplitVectorStore(Op, DAG); 6069 return SDValue(); 6070 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) { 6071 switch (Subtarget->getMaxPrivateElementSize()) { 6072 case 4: 6073 return scalarizeVectorStore(Store, DAG); 6074 case 8: 6075 if (NumElements > 2) 6076 return SplitVectorStore(Op, DAG); 6077 return SDValue(); 6078 case 16: 6079 if (NumElements > 4) 6080 return SplitVectorStore(Op, DAG); 6081 return SDValue(); 6082 default: 6083 llvm_unreachable("unsupported private_element_size"); 6084 } 6085 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) { 6086 // Use ds_write_b128 if possible. 6087 if (Subtarget->useDS128() && Store->getAlignment() >= 16 && 6088 VT.getStoreSize() == 16) 6089 return SDValue(); 6090 6091 if (NumElements > 2) 6092 return SplitVectorStore(Op, DAG); 6093 return SDValue(); 6094 } else { 6095 llvm_unreachable("unhandled address space"); 6096 } 6097 } 6098 6099 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 6100 SDLoc DL(Op); 6101 EVT VT = Op.getValueType(); 6102 SDValue Arg = Op.getOperand(0); 6103 // TODO: Should this propagate fast-math-flags? 6104 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 6105 DAG.getNode(ISD::FMUL, DL, VT, Arg, 6106 DAG.getConstantFP(0.5/M_PI, DL, 6107 VT))); 6108 6109 switch (Op.getOpcode()) { 6110 case ISD::FCOS: 6111 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart); 6112 case ISD::FSIN: 6113 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart); 6114 default: 6115 llvm_unreachable("Wrong trig opcode"); 6116 } 6117 } 6118 6119 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { 6120 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op); 6121 assert(AtomicNode->isCompareAndSwap()); 6122 unsigned AS = AtomicNode->getAddressSpace(); 6123 6124 // No custom lowering required for local address space 6125 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI)) 6126 return Op; 6127 6128 // Non-local address space requires custom lowering for atomic compare 6129 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2 6130 SDLoc DL(Op); 6131 SDValue ChainIn = Op.getOperand(0); 6132 SDValue Addr = Op.getOperand(1); 6133 SDValue Old = Op.getOperand(2); 6134 SDValue New = Op.getOperand(3); 6135 EVT VT = Op.getValueType(); 6136 MVT SimpleVT = VT.getSimpleVT(); 6137 MVT VecType = MVT::getVectorVT(SimpleVT, 2); 6138 6139 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old}); 6140 SDValue Ops[] = { ChainIn, Addr, NewOld }; 6141 6142 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(), 6143 Ops, VT, AtomicNode->getMemOperand()); 6144 } 6145 6146 //===----------------------------------------------------------------------===// 6147 // Custom DAG optimizations 6148 //===----------------------------------------------------------------------===// 6149 6150 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N, 6151 DAGCombinerInfo &DCI) const { 6152 EVT VT = N->getValueType(0); 6153 EVT ScalarVT = VT.getScalarType(); 6154 if (ScalarVT != MVT::f32) 6155 return SDValue(); 6156 6157 SelectionDAG &DAG = DCI.DAG; 6158 SDLoc DL(N); 6159 6160 SDValue Src = N->getOperand(0); 6161 EVT SrcVT = Src.getValueType(); 6162 6163 // TODO: We could try to match extracting the higher bytes, which would be 6164 // easier if i8 vectors weren't promoted to i32 vectors, particularly after 6165 // types are legalized. v4i8 -> v4f32 is probably the only case to worry 6166 // about in practice. 6167 if (DCI.isAfterLegalizeDAG() && SrcVT == MVT::i32) { 6168 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) { 6169 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src); 6170 DCI.AddToWorklist(Cvt.getNode()); 6171 return Cvt; 6172 } 6173 } 6174 6175 return SDValue(); 6176 } 6177 6178 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2) 6179 6180 // This is a variant of 6181 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2), 6182 // 6183 // The normal DAG combiner will do this, but only if the add has one use since 6184 // that would increase the number of instructions. 6185 // 6186 // This prevents us from seeing a constant offset that can be folded into a 6187 // memory instruction's addressing mode. If we know the resulting add offset of 6188 // a pointer can be folded into an addressing offset, we can replace the pointer 6189 // operand with the add of new constant offset. This eliminates one of the uses, 6190 // and may allow the remaining use to also be simplified. 6191 // 6192 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N, 6193 unsigned AddrSpace, 6194 EVT MemVT, 6195 DAGCombinerInfo &DCI) const { 6196 SDValue N0 = N->getOperand(0); 6197 SDValue N1 = N->getOperand(1); 6198 6199 // We only do this to handle cases where it's profitable when there are 6200 // multiple uses of the add, so defer to the standard combine. 6201 if ((N0.getOpcode() != ISD::ADD && N0.getOpcode() != ISD::OR) || 6202 N0->hasOneUse()) 6203 return SDValue(); 6204 6205 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1); 6206 if (!CN1) 6207 return SDValue(); 6208 6209 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 6210 if (!CAdd) 6211 return SDValue(); 6212 6213 // If the resulting offset is too large, we can't fold it into the addressing 6214 // mode offset. 6215 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue(); 6216 Type *Ty = MemVT.getTypeForEVT(*DCI.DAG.getContext()); 6217 6218 AddrMode AM; 6219 AM.HasBaseReg = true; 6220 AM.BaseOffs = Offset.getSExtValue(); 6221 if (!isLegalAddressingMode(DCI.DAG.getDataLayout(), AM, Ty, AddrSpace)) 6222 return SDValue(); 6223 6224 SelectionDAG &DAG = DCI.DAG; 6225 SDLoc SL(N); 6226 EVT VT = N->getValueType(0); 6227 6228 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1); 6229 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32); 6230 6231 SDNodeFlags Flags; 6232 Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() && 6233 (N0.getOpcode() == ISD::OR || 6234 N0->getFlags().hasNoUnsignedWrap())); 6235 6236 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset, Flags); 6237 } 6238 6239 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N, 6240 DAGCombinerInfo &DCI) const { 6241 SDValue Ptr = N->getBasePtr(); 6242 SelectionDAG &DAG = DCI.DAG; 6243 SDLoc SL(N); 6244 6245 // TODO: We could also do this for multiplies. 6246 if (Ptr.getOpcode() == ISD::SHL) { 6247 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), N->getAddressSpace(), 6248 N->getMemoryVT(), DCI); 6249 if (NewPtr) { 6250 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end()); 6251 6252 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr; 6253 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0); 6254 } 6255 } 6256 6257 return SDValue(); 6258 } 6259 6260 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { 6261 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) || 6262 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) || 6263 (Opc == ISD::XOR && Val == 0); 6264 } 6265 6266 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This 6267 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit 6268 // integer combine opportunities since most 64-bit operations are decomposed 6269 // this way. TODO: We won't want this for SALU especially if it is an inline 6270 // immediate. 6271 SDValue SITargetLowering::splitBinaryBitConstantOp( 6272 DAGCombinerInfo &DCI, 6273 const SDLoc &SL, 6274 unsigned Opc, SDValue LHS, 6275 const ConstantSDNode *CRHS) const { 6276 uint64_t Val = CRHS->getZExtValue(); 6277 uint32_t ValLo = Lo_32(Val); 6278 uint32_t ValHi = Hi_32(Val); 6279 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 6280 6281 if ((bitOpWithConstantIsReducible(Opc, ValLo) || 6282 bitOpWithConstantIsReducible(Opc, ValHi)) || 6283 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) { 6284 // If we need to materialize a 64-bit immediate, it will be split up later 6285 // anyway. Avoid creating the harder to understand 64-bit immediate 6286 // materialization. 6287 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi); 6288 } 6289 6290 return SDValue(); 6291 } 6292 6293 // Returns true if argument is a boolean value which is not serialized into 6294 // memory or argument and does not require v_cmdmask_b32 to be deserialized. 6295 static bool isBoolSGPR(SDValue V) { 6296 if (V.getValueType() != MVT::i1) 6297 return false; 6298 switch (V.getOpcode()) { 6299 default: break; 6300 case ISD::SETCC: 6301 case ISD::AND: 6302 case ISD::OR: 6303 case ISD::XOR: 6304 case AMDGPUISD::FP_CLASS: 6305 return true; 6306 } 6307 return false; 6308 } 6309 6310 // If a constant has all zeroes or all ones within each byte return it. 6311 // Otherwise return 0. 6312 static uint32_t getConstantPermuteMask(uint32_t C) { 6313 // 0xff for any zero byte in the mask 6314 uint32_t ZeroByteMask = 0; 6315 if (!(C & 0x000000ff)) ZeroByteMask |= 0x000000ff; 6316 if (!(C & 0x0000ff00)) ZeroByteMask |= 0x0000ff00; 6317 if (!(C & 0x00ff0000)) ZeroByteMask |= 0x00ff0000; 6318 if (!(C & 0xff000000)) ZeroByteMask |= 0xff000000; 6319 uint32_t NonZeroByteMask = ~ZeroByteMask; // 0xff for any non-zero byte 6320 if ((NonZeroByteMask & C) != NonZeroByteMask) 6321 return 0; // Partial bytes selected. 6322 return C; 6323 } 6324 6325 // Check if a node selects whole bytes from its operand 0 starting at a byte 6326 // boundary while masking the rest. Returns select mask as in the v_perm_b32 6327 // or -1 if not succeeded. 6328 // Note byte select encoding: 6329 // value 0-3 selects corresponding source byte; 6330 // value 0xc selects zero; 6331 // value 0xff selects 0xff. 6332 static uint32_t getPermuteMask(SelectionDAG &DAG, SDValue V) { 6333 assert(V.getValueSizeInBits() == 32); 6334 6335 if (V.getNumOperands() != 2) 6336 return ~0; 6337 6338 ConstantSDNode *N1 = dyn_cast<ConstantSDNode>(V.getOperand(1)); 6339 if (!N1) 6340 return ~0; 6341 6342 uint32_t C = N1->getZExtValue(); 6343 6344 switch (V.getOpcode()) { 6345 default: 6346 break; 6347 case ISD::AND: 6348 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 6349 return (0x03020100 & ConstMask) | (0x0c0c0c0c & ~ConstMask); 6350 } 6351 break; 6352 6353 case ISD::OR: 6354 if (uint32_t ConstMask = getConstantPermuteMask(C)) { 6355 return (0x03020100 & ~ConstMask) | ConstMask; 6356 } 6357 break; 6358 6359 case ISD::SHL: 6360 if (C % 8) 6361 return ~0; 6362 6363 return uint32_t((0x030201000c0c0c0cull << C) >> 32); 6364 6365 case ISD::SRL: 6366 if (C % 8) 6367 return ~0; 6368 6369 return uint32_t(0x0c0c0c0c03020100ull >> C); 6370 } 6371 6372 return ~0; 6373 } 6374 6375 SDValue SITargetLowering::performAndCombine(SDNode *N, 6376 DAGCombinerInfo &DCI) const { 6377 if (DCI.isBeforeLegalize()) 6378 return SDValue(); 6379 6380 SelectionDAG &DAG = DCI.DAG; 6381 EVT VT = N->getValueType(0); 6382 SDValue LHS = N->getOperand(0); 6383 SDValue RHS = N->getOperand(1); 6384 6385 6386 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 6387 if (VT == MVT::i64 && CRHS) { 6388 if (SDValue Split 6389 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) 6390 return Split; 6391 } 6392 6393 if (CRHS && VT == MVT::i32) { 6394 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb 6395 // nb = number of trailing zeroes in mask 6396 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass, 6397 // given that we are selecting 8 or 16 bit fields starting at byte boundary. 6398 uint64_t Mask = CRHS->getZExtValue(); 6399 unsigned Bits = countPopulation(Mask); 6400 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL && 6401 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) { 6402 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) { 6403 unsigned Shift = CShift->getZExtValue(); 6404 unsigned NB = CRHS->getAPIntValue().countTrailingZeros(); 6405 unsigned Offset = NB + Shift; 6406 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary. 6407 SDLoc SL(N); 6408 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 6409 LHS->getOperand(0), 6410 DAG.getConstant(Offset, SL, MVT::i32), 6411 DAG.getConstant(Bits, SL, MVT::i32)); 6412 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 6413 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE, 6414 DAG.getValueType(NarrowVT)); 6415 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext, 6416 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32)); 6417 return Shl; 6418 } 6419 } 6420 } 6421 6422 // and (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 6423 if (LHS.hasOneUse() && LHS.getOpcode() == AMDGPUISD::PERM && 6424 isa<ConstantSDNode>(LHS.getOperand(2))) { 6425 uint32_t Sel = getConstantPermuteMask(Mask); 6426 if (!Sel) 6427 return SDValue(); 6428 6429 // Select 0xc for all zero bytes 6430 Sel = (LHS.getConstantOperandVal(2) & Sel) | (~Sel & 0x0c0c0c0c); 6431 SDLoc DL(N); 6432 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 6433 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 6434 } 6435 } 6436 6437 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) -> 6438 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity) 6439 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) { 6440 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 6441 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); 6442 6443 SDValue X = LHS.getOperand(0); 6444 SDValue Y = RHS.getOperand(0); 6445 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X) 6446 return SDValue(); 6447 6448 if (LCC == ISD::SETO) { 6449 if (X != LHS.getOperand(1)) 6450 return SDValue(); 6451 6452 if (RCC == ISD::SETUNE) { 6453 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1)); 6454 if (!C1 || !C1->isInfinity() || C1->isNegative()) 6455 return SDValue(); 6456 6457 const uint32_t Mask = SIInstrFlags::N_NORMAL | 6458 SIInstrFlags::N_SUBNORMAL | 6459 SIInstrFlags::N_ZERO | 6460 SIInstrFlags::P_ZERO | 6461 SIInstrFlags::P_SUBNORMAL | 6462 SIInstrFlags::P_NORMAL; 6463 6464 static_assert(((~(SIInstrFlags::S_NAN | 6465 SIInstrFlags::Q_NAN | 6466 SIInstrFlags::N_INFINITY | 6467 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask, 6468 "mask not equal"); 6469 6470 SDLoc DL(N); 6471 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 6472 X, DAG.getConstant(Mask, DL, MVT::i32)); 6473 } 6474 } 6475 } 6476 6477 if (VT == MVT::i32 && 6478 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { 6479 // and x, (sext cc from i1) => select cc, x, 0 6480 if (RHS.getOpcode() != ISD::SIGN_EXTEND) 6481 std::swap(LHS, RHS); 6482 if (isBoolSGPR(RHS.getOperand(0))) 6483 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0), 6484 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32)); 6485 } 6486 6487 // and (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 6488 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 6489 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 6490 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { 6491 uint32_t LHSMask = getPermuteMask(DAG, LHS); 6492 uint32_t RHSMask = getPermuteMask(DAG, RHS); 6493 if (LHSMask != ~0u && RHSMask != ~0u) { 6494 // Canonicalize the expression in an attempt to have fewer unique masks 6495 // and therefore fewer registers used to hold the masks. 6496 if (LHSMask > RHSMask) { 6497 std::swap(LHSMask, RHSMask); 6498 std::swap(LHS, RHS); 6499 } 6500 6501 // Select 0xc for each lane used from source operand. Zero has 0xc mask 6502 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 6503 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 6504 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 6505 6506 // Check of we need to combine values from two sources within a byte. 6507 if (!(LHSUsedLanes & RHSUsedLanes) && 6508 // If we select high and lower word keep it for SDWA. 6509 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 6510 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 6511 // Each byte in each mask is either selector mask 0-3, or has higher 6512 // bits set in either of masks, which can be 0xff for 0xff or 0x0c for 6513 // zero. If 0x0c is in either mask it shall always be 0x0c. Otherwise 6514 // mask which is not 0xff wins. By anding both masks we have a correct 6515 // result except that 0x0c shall be corrected to give 0x0c only. 6516 uint32_t Mask = LHSMask & RHSMask; 6517 for (unsigned I = 0; I < 32; I += 8) { 6518 uint32_t ByteSel = 0xff << I; 6519 if ((LHSMask & ByteSel) == 0x0c || (RHSMask & ByteSel) == 0x0c) 6520 Mask &= (0x0c << I) & 0xffffffff; 6521 } 6522 6523 // Add 4 to each active LHS lane. It will not affect any existing 0xff 6524 // or 0x0c. 6525 uint32_t Sel = Mask | (LHSUsedLanes & 0x04040404); 6526 SDLoc DL(N); 6527 6528 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 6529 LHS.getOperand(0), RHS.getOperand(0), 6530 DAG.getConstant(Sel, DL, MVT::i32)); 6531 } 6532 } 6533 } 6534 6535 return SDValue(); 6536 } 6537 6538 SDValue SITargetLowering::performOrCombine(SDNode *N, 6539 DAGCombinerInfo &DCI) const { 6540 SelectionDAG &DAG = DCI.DAG; 6541 SDValue LHS = N->getOperand(0); 6542 SDValue RHS = N->getOperand(1); 6543 6544 EVT VT = N->getValueType(0); 6545 if (VT == MVT::i1) { 6546 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2) 6547 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS && 6548 RHS.getOpcode() == AMDGPUISD::FP_CLASS) { 6549 SDValue Src = LHS.getOperand(0); 6550 if (Src != RHS.getOperand(0)) 6551 return SDValue(); 6552 6553 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 6554 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 6555 if (!CLHS || !CRHS) 6556 return SDValue(); 6557 6558 // Only 10 bits are used. 6559 static const uint32_t MaxMask = 0x3ff; 6560 6561 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask; 6562 SDLoc DL(N); 6563 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, 6564 Src, DAG.getConstant(NewMask, DL, MVT::i32)); 6565 } 6566 6567 return SDValue(); 6568 } 6569 6570 // or (perm x, y, c1), c2 -> perm x, y, permute_mask(c1, c2) 6571 if (isa<ConstantSDNode>(RHS) && LHS.hasOneUse() && 6572 LHS.getOpcode() == AMDGPUISD::PERM && 6573 isa<ConstantSDNode>(LHS.getOperand(2))) { 6574 uint32_t Sel = getConstantPermuteMask(N->getConstantOperandVal(1)); 6575 if (!Sel) 6576 return SDValue(); 6577 6578 Sel |= LHS.getConstantOperandVal(2); 6579 SDLoc DL(N); 6580 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, LHS.getOperand(0), 6581 LHS.getOperand(1), DAG.getConstant(Sel, DL, MVT::i32)); 6582 } 6583 6584 // or (op x, c1), (op y, c2) -> perm x, y, permute_mask(c1, c2) 6585 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 6586 if (VT == MVT::i32 && LHS.hasOneUse() && RHS.hasOneUse() && 6587 N->isDivergent() && TII->pseudoToMCOpcode(AMDGPU::V_PERM_B32) != -1) { 6588 uint32_t LHSMask = getPermuteMask(DAG, LHS); 6589 uint32_t RHSMask = getPermuteMask(DAG, RHS); 6590 if (LHSMask != ~0u && RHSMask != ~0u) { 6591 // Canonicalize the expression in an attempt to have fewer unique masks 6592 // and therefore fewer registers used to hold the masks. 6593 if (LHSMask > RHSMask) { 6594 std::swap(LHSMask, RHSMask); 6595 std::swap(LHS, RHS); 6596 } 6597 6598 // Select 0xc for each lane used from source operand. Zero has 0xc mask 6599 // set, 0xff have 0xff in the mask, actual lanes are in the 0-3 range. 6600 uint32_t LHSUsedLanes = ~(LHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 6601 uint32_t RHSUsedLanes = ~(RHSMask & 0x0c0c0c0c) & 0x0c0c0c0c; 6602 6603 // Check of we need to combine values from two sources within a byte. 6604 if (!(LHSUsedLanes & RHSUsedLanes) && 6605 // If we select high and lower word keep it for SDWA. 6606 // TODO: teach SDWA to work with v_perm_b32 and remove the check. 6607 !(LHSUsedLanes == 0x0c0c0000 && RHSUsedLanes == 0x00000c0c)) { 6608 // Kill zero bytes selected by other mask. Zero value is 0xc. 6609 LHSMask &= ~RHSUsedLanes; 6610 RHSMask &= ~LHSUsedLanes; 6611 // Add 4 to each active LHS lane 6612 LHSMask |= LHSUsedLanes & 0x04040404; 6613 // Combine masks 6614 uint32_t Sel = LHSMask | RHSMask; 6615 SDLoc DL(N); 6616 6617 return DAG.getNode(AMDGPUISD::PERM, DL, MVT::i32, 6618 LHS.getOperand(0), RHS.getOperand(0), 6619 DAG.getConstant(Sel, DL, MVT::i32)); 6620 } 6621 } 6622 } 6623 6624 if (VT != MVT::i64) 6625 return SDValue(); 6626 6627 // TODO: This could be a generic combine with a predicate for extracting the 6628 // high half of an integer being free. 6629 6630 // (or i64:x, (zero_extend i32:y)) -> 6631 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x))) 6632 if (LHS.getOpcode() == ISD::ZERO_EXTEND && 6633 RHS.getOpcode() != ISD::ZERO_EXTEND) 6634 std::swap(LHS, RHS); 6635 6636 if (RHS.getOpcode() == ISD::ZERO_EXTEND) { 6637 SDValue ExtSrc = RHS.getOperand(0); 6638 EVT SrcVT = ExtSrc.getValueType(); 6639 if (SrcVT == MVT::i32) { 6640 SDLoc SL(N); 6641 SDValue LowLHS, HiBits; 6642 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG); 6643 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc); 6644 6645 DCI.AddToWorklist(LowOr.getNode()); 6646 DCI.AddToWorklist(HiBits.getNode()); 6647 6648 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 6649 LowOr, HiBits); 6650 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 6651 } 6652 } 6653 6654 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6655 if (CRHS) { 6656 if (SDValue Split 6657 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) 6658 return Split; 6659 } 6660 6661 return SDValue(); 6662 } 6663 6664 SDValue SITargetLowering::performXorCombine(SDNode *N, 6665 DAGCombinerInfo &DCI) const { 6666 EVT VT = N->getValueType(0); 6667 if (VT != MVT::i64) 6668 return SDValue(); 6669 6670 SDValue LHS = N->getOperand(0); 6671 SDValue RHS = N->getOperand(1); 6672 6673 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS); 6674 if (CRHS) { 6675 if (SDValue Split 6676 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) 6677 return Split; 6678 } 6679 6680 return SDValue(); 6681 } 6682 6683 // Instructions that will be lowered with a final instruction that zeros the 6684 // high result bits. 6685 // XXX - probably only need to list legal operations. 6686 static bool fp16SrcZerosHighBits(unsigned Opc) { 6687 switch (Opc) { 6688 case ISD::FADD: 6689 case ISD::FSUB: 6690 case ISD::FMUL: 6691 case ISD::FDIV: 6692 case ISD::FREM: 6693 case ISD::FMA: 6694 case ISD::FMAD: 6695 case ISD::FCANONICALIZE: 6696 case ISD::FP_ROUND: 6697 case ISD::UINT_TO_FP: 6698 case ISD::SINT_TO_FP: 6699 case ISD::FABS: 6700 // Fabs is lowered to a bit operation, but it's an and which will clear the 6701 // high bits anyway. 6702 case ISD::FSQRT: 6703 case ISD::FSIN: 6704 case ISD::FCOS: 6705 case ISD::FPOWI: 6706 case ISD::FPOW: 6707 case ISD::FLOG: 6708 case ISD::FLOG2: 6709 case ISD::FLOG10: 6710 case ISD::FEXP: 6711 case ISD::FEXP2: 6712 case ISD::FCEIL: 6713 case ISD::FTRUNC: 6714 case ISD::FRINT: 6715 case ISD::FNEARBYINT: 6716 case ISD::FROUND: 6717 case ISD::FFLOOR: 6718 case ISD::FMINNUM: 6719 case ISD::FMAXNUM: 6720 case AMDGPUISD::FRACT: 6721 case AMDGPUISD::CLAMP: 6722 case AMDGPUISD::COS_HW: 6723 case AMDGPUISD::SIN_HW: 6724 case AMDGPUISD::FMIN3: 6725 case AMDGPUISD::FMAX3: 6726 case AMDGPUISD::FMED3: 6727 case AMDGPUISD::FMAD_FTZ: 6728 case AMDGPUISD::RCP: 6729 case AMDGPUISD::RSQ: 6730 case AMDGPUISD::RCP_IFLAG: 6731 case AMDGPUISD::LDEXP: 6732 return true; 6733 default: 6734 // fcopysign, select and others may be lowered to 32-bit bit operations 6735 // which don't zero the high bits. 6736 return false; 6737 } 6738 } 6739 6740 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N, 6741 DAGCombinerInfo &DCI) const { 6742 if (!Subtarget->has16BitInsts() || 6743 DCI.getDAGCombineLevel() < AfterLegalizeDAG) 6744 return SDValue(); 6745 6746 EVT VT = N->getValueType(0); 6747 if (VT != MVT::i32) 6748 return SDValue(); 6749 6750 SDValue Src = N->getOperand(0); 6751 if (Src.getValueType() != MVT::i16) 6752 return SDValue(); 6753 6754 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src 6755 // FIXME: It is not universally true that the high bits are zeroed on gfx9. 6756 if (Src.getOpcode() == ISD::BITCAST) { 6757 SDValue BCSrc = Src.getOperand(0); 6758 if (BCSrc.getValueType() == MVT::f16 && 6759 fp16SrcZerosHighBits(BCSrc.getOpcode())) 6760 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc); 6761 } 6762 6763 return SDValue(); 6764 } 6765 6766 SDValue SITargetLowering::performClassCombine(SDNode *N, 6767 DAGCombinerInfo &DCI) const { 6768 SelectionDAG &DAG = DCI.DAG; 6769 SDValue Mask = N->getOperand(1); 6770 6771 // fp_class x, 0 -> false 6772 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) { 6773 if (CMask->isNullValue()) 6774 return DAG.getConstant(0, SDLoc(N), MVT::i1); 6775 } 6776 6777 if (N->getOperand(0).isUndef()) 6778 return DAG.getUNDEF(MVT::i1); 6779 6780 return SDValue(); 6781 } 6782 6783 SDValue SITargetLowering::performRcpCombine(SDNode *N, 6784 DAGCombinerInfo &DCI) const { 6785 EVT VT = N->getValueType(0); 6786 SDValue N0 = N->getOperand(0); 6787 6788 if (N0.isUndef()) 6789 return N0; 6790 6791 if (VT == MVT::f32 && (N0.getOpcode() == ISD::UINT_TO_FP || 6792 N0.getOpcode() == ISD::SINT_TO_FP)) { 6793 return DCI.DAG.getNode(AMDGPUISD::RCP_IFLAG, SDLoc(N), VT, N0, 6794 N->getFlags()); 6795 } 6796 6797 return AMDGPUTargetLowering::performRcpCombine(N, DCI); 6798 } 6799 6800 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) { 6801 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions()) 6802 return true; 6803 6804 return DAG.isKnownNeverNaN(Op); 6805 } 6806 6807 static bool isCanonicalized(SelectionDAG &DAG, SDValue Op, 6808 const GCNSubtarget *ST, unsigned MaxDepth=5) { 6809 // If source is a result of another standard FP operation it is already in 6810 // canonical form. 6811 6812 switch (Op.getOpcode()) { 6813 default: 6814 break; 6815 6816 // These will flush denorms if required. 6817 case ISD::FADD: 6818 case ISD::FSUB: 6819 case ISD::FMUL: 6820 case ISD::FSQRT: 6821 case ISD::FCEIL: 6822 case ISD::FFLOOR: 6823 case ISD::FMA: 6824 case ISD::FMAD: 6825 6826 case ISD::FCANONICALIZE: 6827 return true; 6828 6829 case ISD::FP_ROUND: 6830 return Op.getValueType().getScalarType() != MVT::f16 || 6831 ST->hasFP16Denormals(); 6832 6833 case ISD::FP_EXTEND: 6834 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 || 6835 ST->hasFP16Denormals(); 6836 6837 // It can/will be lowered or combined as a bit operation. 6838 // Need to check their input recursively to handle. 6839 case ISD::FNEG: 6840 case ISD::FABS: 6841 return (MaxDepth > 0) && 6842 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1); 6843 6844 case ISD::FSIN: 6845 case ISD::FCOS: 6846 case ISD::FSINCOS: 6847 return Op.getValueType().getScalarType() != MVT::f16; 6848 6849 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms. 6850 // For such targets need to check their input recursively. 6851 case ISD::FMINNUM: 6852 case ISD::FMAXNUM: 6853 case ISD::FMINNAN: 6854 case ISD::FMAXNAN: 6855 6856 if (ST->supportsMinMaxDenormModes() && 6857 DAG.isKnownNeverNaN(Op.getOperand(0)) && 6858 DAG.isKnownNeverNaN(Op.getOperand(1))) 6859 return true; 6860 6861 return (MaxDepth > 0) && 6862 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) && 6863 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1); 6864 6865 case ISD::ConstantFP: { 6866 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6867 return !F.isDenormal() && !(F.isNaN() && F.isSignaling()); 6868 } 6869 } 6870 return false; 6871 } 6872 6873 // Constant fold canonicalize. 6874 SDValue SITargetLowering::performFCanonicalizeCombine( 6875 SDNode *N, 6876 DAGCombinerInfo &DCI) const { 6877 SelectionDAG &DAG = DCI.DAG; 6878 SDValue N0 = N->getOperand(0); 6879 6880 // fcanonicalize undef -> qnan 6881 if (N0.isUndef()) { 6882 EVT VT = N->getValueType(0); 6883 APFloat QNaN = APFloat::getQNaN(SelectionDAG::EVTToAPFloatSemantics(VT)); 6884 return DAG.getConstantFP(QNaN, SDLoc(N), VT); 6885 } 6886 6887 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N0); 6888 if (!CFP) { 6889 SDValue N0 = N->getOperand(0); 6890 EVT VT = N0.getValueType().getScalarType(); 6891 auto ST = getSubtarget(); 6892 6893 if (((VT == MVT::f32 && ST->hasFP32Denormals()) || 6894 (VT == MVT::f64 && ST->hasFP64Denormals()) || 6895 (VT == MVT::f16 && ST->hasFP16Denormals())) && 6896 DAG.isKnownNeverNaN(N0)) 6897 return N0; 6898 6899 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction()); 6900 6901 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) && 6902 isCanonicalized(DAG, N0, ST)) 6903 return N0; 6904 6905 return SDValue(); 6906 } 6907 6908 const APFloat &C = CFP->getValueAPF(); 6909 6910 // Flush denormals to 0 if not enabled. 6911 if (C.isDenormal()) { 6912 EVT VT = N->getValueType(0); 6913 EVT SVT = VT.getScalarType(); 6914 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals()) 6915 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6916 6917 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals()) 6918 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6919 6920 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals()) 6921 return DAG.getConstantFP(0.0, SDLoc(N), VT); 6922 } 6923 6924 if (C.isNaN()) { 6925 EVT VT = N->getValueType(0); 6926 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics()); 6927 if (C.isSignaling()) { 6928 // Quiet a signaling NaN. 6929 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 6930 } 6931 6932 // Make sure it is the canonical NaN bitpattern. 6933 // 6934 // TODO: Can we use -1 as the canonical NaN value since it's an inline 6935 // immediate? 6936 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt()) 6937 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT); 6938 } 6939 6940 return N0; 6941 } 6942 6943 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { 6944 switch (Opc) { 6945 case ISD::FMAXNUM: 6946 return AMDGPUISD::FMAX3; 6947 case ISD::SMAX: 6948 return AMDGPUISD::SMAX3; 6949 case ISD::UMAX: 6950 return AMDGPUISD::UMAX3; 6951 case ISD::FMINNUM: 6952 return AMDGPUISD::FMIN3; 6953 case ISD::SMIN: 6954 return AMDGPUISD::SMIN3; 6955 case ISD::UMIN: 6956 return AMDGPUISD::UMIN3; 6957 default: 6958 llvm_unreachable("Not a min/max opcode"); 6959 } 6960 } 6961 6962 SDValue SITargetLowering::performIntMed3ImmCombine( 6963 SelectionDAG &DAG, const SDLoc &SL, 6964 SDValue Op0, SDValue Op1, bool Signed) const { 6965 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1); 6966 if (!K1) 6967 return SDValue(); 6968 6969 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 6970 if (!K0) 6971 return SDValue(); 6972 6973 if (Signed) { 6974 if (K0->getAPIntValue().sge(K1->getAPIntValue())) 6975 return SDValue(); 6976 } else { 6977 if (K0->getAPIntValue().uge(K1->getAPIntValue())) 6978 return SDValue(); 6979 } 6980 6981 EVT VT = K0->getValueType(0); 6982 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3; 6983 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) { 6984 return DAG.getNode(Med3Opc, SL, VT, 6985 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0)); 6986 } 6987 6988 // If there isn't a 16-bit med3 operation, convert to 32-bit. 6989 MVT NVT = MVT::i32; 6990 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6991 6992 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0)); 6993 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1)); 6994 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1); 6995 6996 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3); 6997 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3); 6998 } 6999 7000 static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { 7001 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 7002 return C; 7003 7004 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op)) { 7005 if (ConstantFPSDNode *C = BV->getConstantFPSplatNode()) 7006 return C; 7007 } 7008 7009 return nullptr; 7010 } 7011 7012 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG, 7013 const SDLoc &SL, 7014 SDValue Op0, 7015 SDValue Op1) const { 7016 ConstantFPSDNode *K1 = getSplatConstantFP(Op1); 7017 if (!K1) 7018 return SDValue(); 7019 7020 ConstantFPSDNode *K0 = getSplatConstantFP(Op0.getOperand(1)); 7021 if (!K0) 7022 return SDValue(); 7023 7024 // Ordered >= (although NaN inputs should have folded away by now). 7025 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF()); 7026 if (Cmp == APFloat::cmpGreaterThan) 7027 return SDValue(); 7028 7029 // TODO: Check IEEE bit enabled? 7030 EVT VT = Op0.getValueType(); 7031 if (Subtarget->enableDX10Clamp()) { 7032 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the 7033 // hardware fmed3 behavior converting to a min. 7034 // FIXME: Should this be allowing -0.0? 7035 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0)) 7036 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0)); 7037 } 7038 7039 // med3 for f16 is only available on gfx9+, and not available for v2f16. 7040 if (VT == MVT::f32 || (VT == MVT::f16 && Subtarget->hasMed3_16())) { 7041 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a 7042 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would 7043 // then give the other result, which is different from med3 with a NaN 7044 // input. 7045 SDValue Var = Op0.getOperand(0); 7046 if (!isKnownNeverSNan(DAG, Var)) 7047 return SDValue(); 7048 7049 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0), 7050 Var, SDValue(K0, 0), SDValue(K1, 0)); 7051 } 7052 7053 return SDValue(); 7054 } 7055 7056 SDValue SITargetLowering::performMinMaxCombine(SDNode *N, 7057 DAGCombinerInfo &DCI) const { 7058 SelectionDAG &DAG = DCI.DAG; 7059 7060 EVT VT = N->getValueType(0); 7061 unsigned Opc = N->getOpcode(); 7062 SDValue Op0 = N->getOperand(0); 7063 SDValue Op1 = N->getOperand(1); 7064 7065 // Only do this if the inner op has one use since this will just increases 7066 // register pressure for no benefit. 7067 7068 7069 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && 7070 !VT.isVector() && VT != MVT::f64 && 7071 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { 7072 // max(max(a, b), c) -> max3(a, b, c) 7073 // min(min(a, b), c) -> min3(a, b, c) 7074 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { 7075 SDLoc DL(N); 7076 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 7077 DL, 7078 N->getValueType(0), 7079 Op0.getOperand(0), 7080 Op0.getOperand(1), 7081 Op1); 7082 } 7083 7084 // Try commuted. 7085 // max(a, max(b, c)) -> max3(a, b, c) 7086 // min(a, min(b, c)) -> min3(a, b, c) 7087 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) { 7088 SDLoc DL(N); 7089 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc), 7090 DL, 7091 N->getValueType(0), 7092 Op0, 7093 Op1.getOperand(0), 7094 Op1.getOperand(1)); 7095 } 7096 } 7097 7098 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1) 7099 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) { 7100 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true)) 7101 return Med3; 7102 } 7103 7104 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { 7105 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false)) 7106 return Med3; 7107 } 7108 7109 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1) 7110 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) || 7111 (Opc == AMDGPUISD::FMIN_LEGACY && 7112 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) && 7113 (VT == MVT::f32 || VT == MVT::f64 || 7114 (VT == MVT::f16 && Subtarget->has16BitInsts()) || 7115 (VT == MVT::v2f16 && Subtarget->hasVOP3PInsts())) && 7116 Op0.hasOneUse()) { 7117 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1)) 7118 return Res; 7119 } 7120 7121 return SDValue(); 7122 } 7123 7124 static bool isClampZeroToOne(SDValue A, SDValue B) { 7125 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) { 7126 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) { 7127 // FIXME: Should this be allowing -0.0? 7128 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) || 7129 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0)); 7130 } 7131 } 7132 7133 return false; 7134 } 7135 7136 // FIXME: Should only worry about snans for version with chain. 7137 SDValue SITargetLowering::performFMed3Combine(SDNode *N, 7138 DAGCombinerInfo &DCI) const { 7139 EVT VT = N->getValueType(0); 7140 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and 7141 // NaNs. With a NaN input, the order of the operands may change the result. 7142 7143 SelectionDAG &DAG = DCI.DAG; 7144 SDLoc SL(N); 7145 7146 SDValue Src0 = N->getOperand(0); 7147 SDValue Src1 = N->getOperand(1); 7148 SDValue Src2 = N->getOperand(2); 7149 7150 if (isClampZeroToOne(Src0, Src1)) { 7151 // const_a, const_b, x -> clamp is safe in all cases including signaling 7152 // nans. 7153 // FIXME: Should this be allowing -0.0? 7154 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2); 7155 } 7156 7157 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother 7158 // handling no dx10-clamp? 7159 if (Subtarget->enableDX10Clamp()) { 7160 // If NaNs is clamped to 0, we are free to reorder the inputs. 7161 7162 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 7163 std::swap(Src0, Src1); 7164 7165 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2)) 7166 std::swap(Src1, Src2); 7167 7168 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1)) 7169 std::swap(Src0, Src1); 7170 7171 if (isClampZeroToOne(Src1, Src2)) 7172 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0); 7173 } 7174 7175 return SDValue(); 7176 } 7177 7178 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N, 7179 DAGCombinerInfo &DCI) const { 7180 SDValue Src0 = N->getOperand(0); 7181 SDValue Src1 = N->getOperand(1); 7182 if (Src0.isUndef() && Src1.isUndef()) 7183 return DCI.DAG.getUNDEF(N->getValueType(0)); 7184 return SDValue(); 7185 } 7186 7187 SDValue SITargetLowering::performExtractVectorEltCombine( 7188 SDNode *N, DAGCombinerInfo &DCI) const { 7189 SDValue Vec = N->getOperand(0); 7190 SelectionDAG &DAG = DCI.DAG; 7191 7192 EVT VecVT = Vec.getValueType(); 7193 EVT EltVT = VecVT.getVectorElementType(); 7194 7195 if ((Vec.getOpcode() == ISD::FNEG || 7196 Vec.getOpcode() == ISD::FABS) && allUsesHaveSourceMods(N)) { 7197 SDLoc SL(N); 7198 EVT EltVT = N->getValueType(0); 7199 SDValue Idx = N->getOperand(1); 7200 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7201 Vec.getOperand(0), Idx); 7202 return DAG.getNode(Vec.getOpcode(), SL, EltVT, Elt); 7203 } 7204 7205 // ScalarRes = EXTRACT_VECTOR_ELT ((vector-BINOP Vec1, Vec2), Idx) 7206 // => 7207 // Vec1Elt = EXTRACT_VECTOR_ELT(Vec1, Idx) 7208 // Vec2Elt = EXTRACT_VECTOR_ELT(Vec2, Idx) 7209 // ScalarRes = scalar-BINOP Vec1Elt, Vec2Elt 7210 if (Vec.hasOneUse() && DCI.isBeforeLegalize()) { 7211 SDLoc SL(N); 7212 EVT EltVT = N->getValueType(0); 7213 SDValue Idx = N->getOperand(1); 7214 unsigned Opc = Vec.getOpcode(); 7215 7216 switch(Opc) { 7217 default: 7218 return SDValue(); 7219 // TODO: Support other binary operations. 7220 case ISD::FADD: 7221 case ISD::ADD: 7222 case ISD::UMIN: 7223 case ISD::UMAX: 7224 case ISD::SMIN: 7225 case ISD::SMAX: 7226 case ISD::FMAXNUM: 7227 case ISD::FMINNUM: 7228 return DAG.getNode(Opc, SL, EltVT, 7229 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7230 Vec.getOperand(0), Idx), 7231 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 7232 Vec.getOperand(1), Idx)); 7233 } 7234 } 7235 7236 if (!DCI.isBeforeLegalize()) 7237 return SDValue(); 7238 7239 unsigned VecSize = VecVT.getSizeInBits(); 7240 unsigned EltSize = EltVT.getSizeInBits(); 7241 7242 // Try to turn sub-dword accesses of vectors into accesses of the same 32-bit 7243 // elements. This exposes more load reduction opportunities by replacing 7244 // multiple small extract_vector_elements with a single 32-bit extract. 7245 auto *Idx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7246 if (EltSize <= 16 && 7247 EltVT.isByteSized() && 7248 VecSize > 32 && 7249 VecSize % 32 == 0 && 7250 Idx) { 7251 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VecVT); 7252 7253 unsigned BitIndex = Idx->getZExtValue() * EltSize; 7254 unsigned EltIdx = BitIndex / 32; 7255 unsigned LeftoverBitIdx = BitIndex % 32; 7256 SDLoc SL(N); 7257 7258 SDValue Cast = DAG.getNode(ISD::BITCAST, SL, NewVT, Vec); 7259 DCI.AddToWorklist(Cast.getNode()); 7260 7261 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Cast, 7262 DAG.getConstant(EltIdx, SL, MVT::i32)); 7263 DCI.AddToWorklist(Elt.getNode()); 7264 SDValue Srl = DAG.getNode(ISD::SRL, SL, MVT::i32, Elt, 7265 DAG.getConstant(LeftoverBitIdx, SL, MVT::i32)); 7266 DCI.AddToWorklist(Srl.getNode()); 7267 7268 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, EltVT.changeTypeToInteger(), Srl); 7269 DCI.AddToWorklist(Trunc.getNode()); 7270 return DAG.getNode(ISD::BITCAST, SL, EltVT, Trunc); 7271 } 7272 7273 return SDValue(); 7274 } 7275 7276 static bool convertBuildVectorCastElt(SelectionDAG &DAG, 7277 SDValue &Lo, SDValue &Hi) { 7278 if (Hi.getOpcode() == ISD::BITCAST && 7279 Hi.getOperand(0).getValueType() == MVT::f16 && 7280 (isa<ConstantSDNode>(Lo) || Lo.isUndef())) { 7281 Lo = DAG.getNode(ISD::BITCAST, SDLoc(Lo), MVT::f16, Lo); 7282 Hi = Hi.getOperand(0); 7283 return true; 7284 } 7285 7286 return false; 7287 } 7288 7289 SDValue SITargetLowering::performBuildVectorCombine( 7290 SDNode *N, DAGCombinerInfo &DCI) const { 7291 SDLoc SL(N); 7292 7293 if (!isTypeLegal(MVT::v2i16)) 7294 return SDValue(); 7295 SelectionDAG &DAG = DCI.DAG; 7296 EVT VT = N->getValueType(0); 7297 7298 if (VT == MVT::v2i16) { 7299 SDValue Lo = N->getOperand(0); 7300 SDValue Hi = N->getOperand(1); 7301 7302 // v2i16 build_vector (const|undef), (bitcast f16:$x) 7303 // -> bitcast (v2f16 build_vector const|undef, $x 7304 if (convertBuildVectorCastElt(DAG, Lo, Hi)) { 7305 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Lo, Hi }); 7306 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec); 7307 } 7308 7309 if (convertBuildVectorCastElt(DAG, Hi, Lo)) { 7310 SDValue NewVec = DAG.getBuildVector(MVT::v2f16, SL, { Hi, Lo }); 7311 return DAG.getNode(ISD::BITCAST, SL, VT, NewVec); 7312 } 7313 } 7314 7315 return SDValue(); 7316 } 7317 7318 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, 7319 const SDNode *N0, 7320 const SDNode *N1) const { 7321 EVT VT = N0->getValueType(0); 7322 7323 // Only do this if we are not trying to support denormals. v_mad_f32 does not 7324 // support denormals ever. 7325 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) || 7326 (VT == MVT::f16 && !Subtarget->hasFP16Denormals())) 7327 return ISD::FMAD; 7328 7329 const TargetOptions &Options = DAG.getTarget().Options; 7330 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 7331 (N0->getFlags().hasAllowContract() && 7332 N1->getFlags().hasAllowContract())) && 7333 isFMAFasterThanFMulAndFAdd(VT)) { 7334 return ISD::FMA; 7335 } 7336 7337 return 0; 7338 } 7339 7340 static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, 7341 EVT VT, 7342 SDValue N0, SDValue N1, SDValue N2, 7343 bool Signed) { 7344 unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; 7345 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); 7346 SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); 7347 return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); 7348 } 7349 7350 SDValue SITargetLowering::performAddCombine(SDNode *N, 7351 DAGCombinerInfo &DCI) const { 7352 SelectionDAG &DAG = DCI.DAG; 7353 EVT VT = N->getValueType(0); 7354 SDLoc SL(N); 7355 SDValue LHS = N->getOperand(0); 7356 SDValue RHS = N->getOperand(1); 7357 7358 if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) 7359 && Subtarget->hasMad64_32() && 7360 !VT.isVector() && VT.getScalarSizeInBits() > 32 && 7361 VT.getScalarSizeInBits() <= 64) { 7362 if (LHS.getOpcode() != ISD::MUL) 7363 std::swap(LHS, RHS); 7364 7365 SDValue MulLHS = LHS.getOperand(0); 7366 SDValue MulRHS = LHS.getOperand(1); 7367 SDValue AddRHS = RHS; 7368 7369 // TODO: Maybe restrict if SGPR inputs. 7370 if (numBitsUnsigned(MulLHS, DAG) <= 32 && 7371 numBitsUnsigned(MulRHS, DAG) <= 32) { 7372 MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); 7373 MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); 7374 AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); 7375 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); 7376 } 7377 7378 if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { 7379 MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); 7380 MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); 7381 AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); 7382 return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); 7383 } 7384 7385 return SDValue(); 7386 } 7387 7388 if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG()) 7389 return SDValue(); 7390 7391 // add x, zext (setcc) => addcarry x, 0, setcc 7392 // add x, sext (setcc) => subcarry x, 0, setcc 7393 unsigned Opc = LHS.getOpcode(); 7394 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND || 7395 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY) 7396 std::swap(RHS, LHS); 7397 7398 Opc = RHS.getOpcode(); 7399 switch (Opc) { 7400 default: break; 7401 case ISD::ZERO_EXTEND: 7402 case ISD::SIGN_EXTEND: 7403 case ISD::ANY_EXTEND: { 7404 auto Cond = RHS.getOperand(0); 7405 if (!isBoolSGPR(Cond)) 7406 break; 7407 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1); 7408 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond }; 7409 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY; 7410 return DAG.getNode(Opc, SL, VTList, Args); 7411 } 7412 case ISD::ADDCARRY: { 7413 // add x, (addcarry y, 0, cc) => addcarry x, y, cc 7414 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1)); 7415 if (!C || C->getZExtValue() != 0) break; 7416 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) }; 7417 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args); 7418 } 7419 } 7420 return SDValue(); 7421 } 7422 7423 SDValue SITargetLowering::performSubCombine(SDNode *N, 7424 DAGCombinerInfo &DCI) const { 7425 SelectionDAG &DAG = DCI.DAG; 7426 EVT VT = N->getValueType(0); 7427 7428 if (VT != MVT::i32) 7429 return SDValue(); 7430 7431 SDLoc SL(N); 7432 SDValue LHS = N->getOperand(0); 7433 SDValue RHS = N->getOperand(1); 7434 7435 unsigned Opc = LHS.getOpcode(); 7436 if (Opc != ISD::SUBCARRY) 7437 std::swap(RHS, LHS); 7438 7439 if (LHS.getOpcode() == ISD::SUBCARRY) { 7440 // sub (subcarry x, 0, cc), y => subcarry x, y, cc 7441 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1)); 7442 if (!C || C->getZExtValue() != 0) 7443 return SDValue(); 7444 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) }; 7445 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args); 7446 } 7447 return SDValue(); 7448 } 7449 7450 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N, 7451 DAGCombinerInfo &DCI) const { 7452 7453 if (N->getValueType(0) != MVT::i32) 7454 return SDValue(); 7455 7456 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7457 if (!C || C->getZExtValue() != 0) 7458 return SDValue(); 7459 7460 SelectionDAG &DAG = DCI.DAG; 7461 SDValue LHS = N->getOperand(0); 7462 7463 // addcarry (add x, y), 0, cc => addcarry x, y, cc 7464 // subcarry (sub x, y), 0, cc => subcarry x, y, cc 7465 unsigned LHSOpc = LHS.getOpcode(); 7466 unsigned Opc = N->getOpcode(); 7467 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) || 7468 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) { 7469 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) }; 7470 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args); 7471 } 7472 return SDValue(); 7473 } 7474 7475 SDValue SITargetLowering::performFAddCombine(SDNode *N, 7476 DAGCombinerInfo &DCI) const { 7477 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 7478 return SDValue(); 7479 7480 SelectionDAG &DAG = DCI.DAG; 7481 EVT VT = N->getValueType(0); 7482 7483 SDLoc SL(N); 7484 SDValue LHS = N->getOperand(0); 7485 SDValue RHS = N->getOperand(1); 7486 7487 // These should really be instruction patterns, but writing patterns with 7488 // source modiifiers is a pain. 7489 7490 // fadd (fadd (a, a), b) -> mad 2.0, a, b 7491 if (LHS.getOpcode() == ISD::FADD) { 7492 SDValue A = LHS.getOperand(0); 7493 if (A == LHS.getOperand(1)) { 7494 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 7495 if (FusedOp != 0) { 7496 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 7497 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS); 7498 } 7499 } 7500 } 7501 7502 // fadd (b, fadd (a, a)) -> mad 2.0, a, b 7503 if (RHS.getOpcode() == ISD::FADD) { 7504 SDValue A = RHS.getOperand(0); 7505 if (A == RHS.getOperand(1)) { 7506 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 7507 if (FusedOp != 0) { 7508 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 7509 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS); 7510 } 7511 } 7512 } 7513 7514 return SDValue(); 7515 } 7516 7517 SDValue SITargetLowering::performFSubCombine(SDNode *N, 7518 DAGCombinerInfo &DCI) const { 7519 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 7520 return SDValue(); 7521 7522 SelectionDAG &DAG = DCI.DAG; 7523 SDLoc SL(N); 7524 EVT VT = N->getValueType(0); 7525 assert(!VT.isVector()); 7526 7527 // Try to get the fneg to fold into the source modifier. This undoes generic 7528 // DAG combines and folds them into the mad. 7529 // 7530 // Only do this if we are not trying to support denormals. v_mad_f32 does 7531 // not support denormals ever. 7532 SDValue LHS = N->getOperand(0); 7533 SDValue RHS = N->getOperand(1); 7534 if (LHS.getOpcode() == ISD::FADD) { 7535 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c) 7536 SDValue A = LHS.getOperand(0); 7537 if (A == LHS.getOperand(1)) { 7538 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode()); 7539 if (FusedOp != 0){ 7540 const SDValue Two = DAG.getConstantFP(2.0, SL, VT); 7541 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 7542 7543 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS); 7544 } 7545 } 7546 } 7547 7548 if (RHS.getOpcode() == ISD::FADD) { 7549 // (fsub c, (fadd a, a)) -> mad -2.0, a, c 7550 7551 SDValue A = RHS.getOperand(0); 7552 if (A == RHS.getOperand(1)) { 7553 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode()); 7554 if (FusedOp != 0){ 7555 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT); 7556 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS); 7557 } 7558 } 7559 } 7560 7561 return SDValue(); 7562 } 7563 7564 SDValue SITargetLowering::performFMACombine(SDNode *N, 7565 DAGCombinerInfo &DCI) const { 7566 SelectionDAG &DAG = DCI.DAG; 7567 EVT VT = N->getValueType(0); 7568 SDLoc SL(N); 7569 7570 if (!Subtarget->hasDLInsts() || VT != MVT::f32) 7571 return SDValue(); 7572 7573 // FMA((F32)S0.x, (F32)S1. x, FMA((F32)S0.y, (F32)S1.y, (F32)z)) -> 7574 // FDOT2((V2F16)S0, (V2F16)S1, (F32)z)) 7575 SDValue Op1 = N->getOperand(0); 7576 SDValue Op2 = N->getOperand(1); 7577 SDValue FMA = N->getOperand(2); 7578 7579 if (FMA.getOpcode() != ISD::FMA || 7580 Op1.getOpcode() != ISD::FP_EXTEND || 7581 Op2.getOpcode() != ISD::FP_EXTEND) 7582 return SDValue(); 7583 7584 // fdot2_f32_f16 always flushes fp32 denormal operand and output to zero, 7585 // regardless of the denorm mode setting. Therefore, unsafe-fp-math/fp-contract 7586 // is sufficient to allow generaing fdot2. 7587 const TargetOptions &Options = DAG.getTarget().Options; 7588 if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath || 7589 (N->getFlags().hasAllowContract() && 7590 FMA->getFlags().hasAllowContract())) { 7591 Op1 = Op1.getOperand(0); 7592 Op2 = Op2.getOperand(0); 7593 if (Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7594 Op2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7595 return SDValue(); 7596 7597 SDValue Vec1 = Op1.getOperand(0); 7598 SDValue Idx1 = Op1.getOperand(1); 7599 SDValue Vec2 = Op2.getOperand(0); 7600 7601 SDValue FMAOp1 = FMA.getOperand(0); 7602 SDValue FMAOp2 = FMA.getOperand(1); 7603 SDValue FMAAcc = FMA.getOperand(2); 7604 7605 if (FMAOp1.getOpcode() != ISD::FP_EXTEND || 7606 FMAOp2.getOpcode() != ISD::FP_EXTEND) 7607 return SDValue(); 7608 7609 FMAOp1 = FMAOp1.getOperand(0); 7610 FMAOp2 = FMAOp2.getOperand(0); 7611 if (FMAOp1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7612 FMAOp2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7613 return SDValue(); 7614 7615 SDValue Vec3 = FMAOp1.getOperand(0); 7616 SDValue Vec4 = FMAOp2.getOperand(0); 7617 SDValue Idx2 = FMAOp1.getOperand(1); 7618 7619 if (Idx1 != Op2.getOperand(1) || Idx2 != FMAOp2.getOperand(1) || 7620 // Idx1 and Idx2 cannot be the same. 7621 Idx1 == Idx2) 7622 return SDValue(); 7623 7624 if (Vec1 == Vec2 || Vec3 == Vec4) 7625 return SDValue(); 7626 7627 if (Vec1.getValueType() != MVT::v2f16 || Vec2.getValueType() != MVT::v2f16) 7628 return SDValue(); 7629 7630 if ((Vec1 == Vec3 && Vec2 == Vec4) || 7631 (Vec1 == Vec4 && Vec2 == Vec3)) { 7632 return DAG.getNode(AMDGPUISD::FDOT2, SL, MVT::f32, Vec1, Vec2, FMAAcc, 7633 DAG.getTargetConstant(0, SL, MVT::i1)); 7634 } 7635 } 7636 return SDValue(); 7637 } 7638 7639 SDValue SITargetLowering::performSetCCCombine(SDNode *N, 7640 DAGCombinerInfo &DCI) const { 7641 SelectionDAG &DAG = DCI.DAG; 7642 SDLoc SL(N); 7643 7644 SDValue LHS = N->getOperand(0); 7645 SDValue RHS = N->getOperand(1); 7646 EVT VT = LHS.getValueType(); 7647 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 7648 7649 auto CRHS = dyn_cast<ConstantSDNode>(RHS); 7650 if (!CRHS) { 7651 CRHS = dyn_cast<ConstantSDNode>(LHS); 7652 if (CRHS) { 7653 std::swap(LHS, RHS); 7654 CC = getSetCCSwappedOperands(CC); 7655 } 7656 } 7657 7658 if (CRHS) { 7659 if (VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND && 7660 isBoolSGPR(LHS.getOperand(0))) { 7661 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1 7662 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc 7663 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1 7664 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc 7665 if ((CRHS->isAllOnesValue() && 7666 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) || 7667 (CRHS->isNullValue() && 7668 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE))) 7669 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 7670 DAG.getConstant(-1, SL, MVT::i1)); 7671 if ((CRHS->isAllOnesValue() && 7672 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) || 7673 (CRHS->isNullValue() && 7674 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT))) 7675 return LHS.getOperand(0); 7676 } 7677 7678 uint64_t CRHSVal = CRHS->getZExtValue(); 7679 if ((CC == ISD::SETEQ || CC == ISD::SETNE) && 7680 LHS.getOpcode() == ISD::SELECT && 7681 isa<ConstantSDNode>(LHS.getOperand(1)) && 7682 isa<ConstantSDNode>(LHS.getOperand(2)) && 7683 LHS.getConstantOperandVal(1) != LHS.getConstantOperandVal(2) && 7684 isBoolSGPR(LHS.getOperand(0))) { 7685 // Given CT != FT: 7686 // setcc (select cc, CT, CF), CF, eq => xor cc, -1 7687 // setcc (select cc, CT, CF), CF, ne => cc 7688 // setcc (select cc, CT, CF), CT, ne => xor cc, -1 7689 // setcc (select cc, CT, CF), CT, eq => cc 7690 uint64_t CT = LHS.getConstantOperandVal(1); 7691 uint64_t CF = LHS.getConstantOperandVal(2); 7692 7693 if ((CF == CRHSVal && CC == ISD::SETEQ) || 7694 (CT == CRHSVal && CC == ISD::SETNE)) 7695 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0), 7696 DAG.getConstant(-1, SL, MVT::i1)); 7697 if ((CF == CRHSVal && CC == ISD::SETNE) || 7698 (CT == CRHSVal && CC == ISD::SETEQ)) 7699 return LHS.getOperand(0); 7700 } 7701 } 7702 7703 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() && 7704 VT != MVT::f16)) 7705 return SDValue(); 7706 7707 // Match isinf pattern 7708 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) 7709 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { 7710 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 7711 if (!CRHS) 7712 return SDValue(); 7713 7714 const APFloat &APF = CRHS->getValueAPF(); 7715 if (APF.isInfinity() && !APF.isNegative()) { 7716 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY; 7717 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0), 7718 DAG.getConstant(Mask, SL, MVT::i32)); 7719 } 7720 } 7721 7722 return SDValue(); 7723 } 7724 7725 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N, 7726 DAGCombinerInfo &DCI) const { 7727 SelectionDAG &DAG = DCI.DAG; 7728 SDLoc SL(N); 7729 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0; 7730 7731 SDValue Src = N->getOperand(0); 7732 SDValue Srl = N->getOperand(0); 7733 if (Srl.getOpcode() == ISD::ZERO_EXTEND) 7734 Srl = Srl.getOperand(0); 7735 7736 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero. 7737 if (Srl.getOpcode() == ISD::SRL) { 7738 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x 7739 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x 7740 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x 7741 7742 if (const ConstantSDNode *C = 7743 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) { 7744 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), 7745 EVT(MVT::i32)); 7746 7747 unsigned SrcOffset = C->getZExtValue() + 8 * Offset; 7748 if (SrcOffset < 32 && SrcOffset % 8 == 0) { 7749 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL, 7750 MVT::f32, Srl); 7751 } 7752 } 7753 } 7754 7755 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8); 7756 7757 KnownBits Known; 7758 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 7759 !DCI.isBeforeLegalizeOps()); 7760 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7761 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) || 7762 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) { 7763 DCI.CommitTargetLoweringOpt(TLO); 7764 } 7765 7766 return SDValue(); 7767 } 7768 7769 SDValue SITargetLowering::performClampCombine(SDNode *N, 7770 DAGCombinerInfo &DCI) const { 7771 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 7772 if (!CSrc) 7773 return SDValue(); 7774 7775 const APFloat &F = CSrc->getValueAPF(); 7776 APFloat Zero = APFloat::getZero(F.getSemantics()); 7777 APFloat::cmpResult Cmp0 = F.compare(Zero); 7778 if (Cmp0 == APFloat::cmpLessThan || 7779 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) { 7780 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); 7781 } 7782 7783 APFloat One(F.getSemantics(), "1.0"); 7784 APFloat::cmpResult Cmp1 = F.compare(One); 7785 if (Cmp1 == APFloat::cmpGreaterThan) 7786 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); 7787 7788 return SDValue(CSrc, 0); 7789 } 7790 7791 7792 SDValue SITargetLowering::PerformDAGCombine(SDNode *N, 7793 DAGCombinerInfo &DCI) const { 7794 switch (N->getOpcode()) { 7795 default: 7796 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 7797 case ISD::ADD: 7798 return performAddCombine(N, DCI); 7799 case ISD::SUB: 7800 return performSubCombine(N, DCI); 7801 case ISD::ADDCARRY: 7802 case ISD::SUBCARRY: 7803 return performAddCarrySubCarryCombine(N, DCI); 7804 case ISD::FADD: 7805 return performFAddCombine(N, DCI); 7806 case ISD::FSUB: 7807 return performFSubCombine(N, DCI); 7808 case ISD::SETCC: 7809 return performSetCCCombine(N, DCI); 7810 case ISD::FMAXNUM: 7811 case ISD::FMINNUM: 7812 case ISD::SMAX: 7813 case ISD::SMIN: 7814 case ISD::UMAX: 7815 case ISD::UMIN: 7816 case AMDGPUISD::FMIN_LEGACY: 7817 case AMDGPUISD::FMAX_LEGACY: { 7818 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG && 7819 getTargetMachine().getOptLevel() > CodeGenOpt::None) 7820 return performMinMaxCombine(N, DCI); 7821 break; 7822 } 7823 case ISD::FMA: 7824 return performFMACombine(N, DCI); 7825 case ISD::LOAD: { 7826 if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI)) 7827 return Widended; 7828 LLVM_FALLTHROUGH; 7829 } 7830 case ISD::STORE: 7831 case ISD::ATOMIC_LOAD: 7832 case ISD::ATOMIC_STORE: 7833 case ISD::ATOMIC_CMP_SWAP: 7834 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: 7835 case ISD::ATOMIC_SWAP: 7836 case ISD::ATOMIC_LOAD_ADD: 7837 case ISD::ATOMIC_LOAD_SUB: 7838 case ISD::ATOMIC_LOAD_AND: 7839 case ISD::ATOMIC_LOAD_OR: 7840 case ISD::ATOMIC_LOAD_XOR: 7841 case ISD::ATOMIC_LOAD_NAND: 7842 case ISD::ATOMIC_LOAD_MIN: 7843 case ISD::ATOMIC_LOAD_MAX: 7844 case ISD::ATOMIC_LOAD_UMIN: 7845 case ISD::ATOMIC_LOAD_UMAX: 7846 case AMDGPUISD::ATOMIC_INC: 7847 case AMDGPUISD::ATOMIC_DEC: 7848 case AMDGPUISD::ATOMIC_LOAD_FADD: 7849 case AMDGPUISD::ATOMIC_LOAD_FMIN: 7850 case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics. 7851 if (DCI.isBeforeLegalize()) 7852 break; 7853 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI); 7854 case ISD::AND: 7855 return performAndCombine(N, DCI); 7856 case ISD::OR: 7857 return performOrCombine(N, DCI); 7858 case ISD::XOR: 7859 return performXorCombine(N, DCI); 7860 case ISD::ZERO_EXTEND: 7861 return performZeroExtendCombine(N, DCI); 7862 case AMDGPUISD::FP_CLASS: 7863 return performClassCombine(N, DCI); 7864 case ISD::FCANONICALIZE: 7865 return performFCanonicalizeCombine(N, DCI); 7866 case AMDGPUISD::RCP: 7867 return performRcpCombine(N, DCI); 7868 case AMDGPUISD::FRACT: 7869 case AMDGPUISD::RSQ: 7870 case AMDGPUISD::RCP_LEGACY: 7871 case AMDGPUISD::RSQ_LEGACY: 7872 case AMDGPUISD::RCP_IFLAG: 7873 case AMDGPUISD::RSQ_CLAMP: 7874 case AMDGPUISD::LDEXP: { 7875 SDValue Src = N->getOperand(0); 7876 if (Src.isUndef()) 7877 return Src; 7878 break; 7879 } 7880 case ISD::SINT_TO_FP: 7881 case ISD::UINT_TO_FP: 7882 return performUCharToFloatCombine(N, DCI); 7883 case AMDGPUISD::CVT_F32_UBYTE0: 7884 case AMDGPUISD::CVT_F32_UBYTE1: 7885 case AMDGPUISD::CVT_F32_UBYTE2: 7886 case AMDGPUISD::CVT_F32_UBYTE3: 7887 return performCvtF32UByteNCombine(N, DCI); 7888 case AMDGPUISD::FMED3: 7889 return performFMed3Combine(N, DCI); 7890 case AMDGPUISD::CVT_PKRTZ_F16_F32: 7891 return performCvtPkRTZCombine(N, DCI); 7892 case AMDGPUISD::CLAMP: 7893 return performClampCombine(N, DCI); 7894 case ISD::SCALAR_TO_VECTOR: { 7895 SelectionDAG &DAG = DCI.DAG; 7896 EVT VT = N->getValueType(0); 7897 7898 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x)) 7899 if (VT == MVT::v2i16 || VT == MVT::v2f16) { 7900 SDLoc SL(N); 7901 SDValue Src = N->getOperand(0); 7902 EVT EltVT = Src.getValueType(); 7903 if (EltVT == MVT::f16) 7904 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src); 7905 7906 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src); 7907 return DAG.getNode(ISD::BITCAST, SL, VT, Ext); 7908 } 7909 7910 break; 7911 } 7912 case ISD::EXTRACT_VECTOR_ELT: 7913 return performExtractVectorEltCombine(N, DCI); 7914 case ISD::BUILD_VECTOR: 7915 return performBuildVectorCombine(N, DCI); 7916 } 7917 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 7918 } 7919 7920 /// Helper function for adjustWritemask 7921 static unsigned SubIdx2Lane(unsigned Idx) { 7922 switch (Idx) { 7923 default: return 0; 7924 case AMDGPU::sub0: return 0; 7925 case AMDGPU::sub1: return 1; 7926 case AMDGPU::sub2: return 2; 7927 case AMDGPU::sub3: return 3; 7928 } 7929 } 7930 7931 /// Adjust the writemask of MIMG instructions 7932 SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node, 7933 SelectionDAG &DAG) const { 7934 unsigned Opcode = Node->getMachineOpcode(); 7935 7936 // Subtract 1 because the vdata output is not a MachineSDNode operand. 7937 int D16Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::d16) - 1; 7938 if (D16Idx >= 0 && Node->getConstantOperandVal(D16Idx)) 7939 return Node; // not implemented for D16 7940 7941 SDNode *Users[4] = { nullptr }; 7942 unsigned Lane = 0; 7943 unsigned DmaskIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::dmask) - 1; 7944 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx); 7945 unsigned NewDmask = 0; 7946 bool HasChain = Node->getNumValues() > 1; 7947 7948 if (OldDmask == 0) { 7949 // These are folded out, but on the chance it happens don't assert. 7950 return Node; 7951 } 7952 7953 // Try to figure out the used register components 7954 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end(); 7955 I != E; ++I) { 7956 7957 // Don't look at users of the chain. 7958 if (I.getUse().getResNo() != 0) 7959 continue; 7960 7961 // Abort if we can't understand the usage 7962 if (!I->isMachineOpcode() || 7963 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) 7964 return Node; 7965 7966 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 7967 // Note that subregs are packed, i.e. Lane==0 is the first bit set 7968 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 7969 // set, etc. 7970 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 7971 7972 // Set which texture component corresponds to the lane. 7973 unsigned Comp; 7974 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) { 7975 Comp = countTrailingZeros(Dmask); 7976 Dmask &= ~(1 << Comp); 7977 } 7978 7979 // Abort if we have more than one user per component 7980 if (Users[Lane]) 7981 return Node; 7982 7983 Users[Lane] = *I; 7984 NewDmask |= 1 << Comp; 7985 } 7986 7987 // Abort if there's no change 7988 if (NewDmask == OldDmask) 7989 return Node; 7990 7991 unsigned BitsSet = countPopulation(NewDmask); 7992 7993 int NewOpcode = AMDGPU::getMaskedMIMGOp(Node->getMachineOpcode(), BitsSet); 7994 assert(NewOpcode != -1 && 7995 NewOpcode != static_cast<int>(Node->getMachineOpcode()) && 7996 "failed to find equivalent MIMG op"); 7997 7998 // Adjust the writemask in the node 7999 SmallVector<SDValue, 12> Ops; 8000 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx); 8001 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32)); 8002 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end()); 8003 8004 MVT SVT = Node->getValueType(0).getVectorElementType().getSimpleVT(); 8005 8006 MVT ResultVT = BitsSet == 1 ? 8007 SVT : MVT::getVectorVT(SVT, BitsSet == 3 ? 4 : BitsSet); 8008 SDVTList NewVTList = HasChain ? 8009 DAG.getVTList(ResultVT, MVT::Other) : DAG.getVTList(ResultVT); 8010 8011 8012 MachineSDNode *NewNode = DAG.getMachineNode(NewOpcode, SDLoc(Node), 8013 NewVTList, Ops); 8014 8015 if (HasChain) { 8016 // Update chain. 8017 NewNode->setMemRefs(Node->memoperands_begin(), Node->memoperands_end()); 8018 DAG.ReplaceAllUsesOfValueWith(SDValue(Node, 1), SDValue(NewNode, 1)); 8019 } 8020 8021 if (BitsSet == 1) { 8022 assert(Node->hasNUsesOfValue(1, 0)); 8023 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY, 8024 SDLoc(Node), Users[Lane]->getValueType(0), 8025 SDValue(NewNode, 0)); 8026 DAG.ReplaceAllUsesWith(Users[Lane], Copy); 8027 return nullptr; 8028 } 8029 8030 // Update the users of the node with the new indices 8031 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) { 8032 SDNode *User = Users[i]; 8033 if (!User) 8034 continue; 8035 8036 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32); 8037 DAG.UpdateNodeOperands(User, SDValue(NewNode, 0), Op); 8038 8039 switch (Idx) { 8040 default: break; 8041 case AMDGPU::sub0: Idx = AMDGPU::sub1; break; 8042 case AMDGPU::sub1: Idx = AMDGPU::sub2; break; 8043 case AMDGPU::sub2: Idx = AMDGPU::sub3; break; 8044 } 8045 } 8046 8047 DAG.RemoveDeadNode(Node); 8048 return nullptr; 8049 } 8050 8051 static bool isFrameIndexOp(SDValue Op) { 8052 if (Op.getOpcode() == ISD::AssertZext) 8053 Op = Op.getOperand(0); 8054 8055 return isa<FrameIndexSDNode>(Op); 8056 } 8057 8058 /// Legalize target independent instructions (e.g. INSERT_SUBREG) 8059 /// with frame index operands. 8060 /// LLVM assumes that inputs are to these instructions are registers. 8061 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, 8062 SelectionDAG &DAG) const { 8063 if (Node->getOpcode() == ISD::CopyToReg) { 8064 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1)); 8065 SDValue SrcVal = Node->getOperand(2); 8066 8067 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have 8068 // to try understanding copies to physical registers. 8069 if (SrcVal.getValueType() == MVT::i1 && 8070 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) { 8071 SDLoc SL(Node); 8072 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 8073 SDValue VReg = DAG.getRegister( 8074 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1); 8075 8076 SDNode *Glued = Node->getGluedNode(); 8077 SDValue ToVReg 8078 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal, 8079 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0)); 8080 SDValue ToResultReg 8081 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0), 8082 VReg, ToVReg.getValue(1)); 8083 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode()); 8084 DAG.RemoveDeadNode(Node); 8085 return ToResultReg.getNode(); 8086 } 8087 } 8088 8089 SmallVector<SDValue, 8> Ops; 8090 for (unsigned i = 0; i < Node->getNumOperands(); ++i) { 8091 if (!isFrameIndexOp(Node->getOperand(i))) { 8092 Ops.push_back(Node->getOperand(i)); 8093 continue; 8094 } 8095 8096 SDLoc DL(Node); 8097 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, 8098 Node->getOperand(i).getValueType(), 8099 Node->getOperand(i)), 0)); 8100 } 8101 8102 return DAG.UpdateNodeOperands(Node, Ops); 8103 } 8104 8105 /// Fold the instructions after selecting them. 8106 /// Returns null if users were already updated. 8107 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node, 8108 SelectionDAG &DAG) const { 8109 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8110 unsigned Opcode = Node->getMachineOpcode(); 8111 8112 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() && 8113 !TII->isGather4(Opcode)) { 8114 return adjustWritemask(Node, DAG); 8115 } 8116 8117 if (Opcode == AMDGPU::INSERT_SUBREG || 8118 Opcode == AMDGPU::REG_SEQUENCE) { 8119 legalizeTargetIndependentNode(Node, DAG); 8120 return Node; 8121 } 8122 8123 switch (Opcode) { 8124 case AMDGPU::V_DIV_SCALE_F32: 8125 case AMDGPU::V_DIV_SCALE_F64: { 8126 // Satisfy the operand register constraint when one of the inputs is 8127 // undefined. Ordinarily each undef value will have its own implicit_def of 8128 // a vreg, so force these to use a single register. 8129 SDValue Src0 = Node->getOperand(0); 8130 SDValue Src1 = Node->getOperand(1); 8131 SDValue Src2 = Node->getOperand(2); 8132 8133 if ((Src0.isMachineOpcode() && 8134 Src0.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) && 8135 (Src0 == Src1 || Src0 == Src2)) 8136 break; 8137 8138 MVT VT = Src0.getValueType().getSimpleVT(); 8139 const TargetRegisterClass *RC = getRegClassFor(VT); 8140 8141 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 8142 SDValue UndefReg = DAG.getRegister(MRI.createVirtualRegister(RC), VT); 8143 8144 SDValue ImpDef = DAG.getCopyToReg(DAG.getEntryNode(), SDLoc(Node), 8145 UndefReg, Src0, SDValue()); 8146 8147 // src0 must be the same register as src1 or src2, even if the value is 8148 // undefined, so make sure we don't violate this constraint. 8149 if (Src0.isMachineOpcode() && 8150 Src0.getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { 8151 if (Src1.isMachineOpcode() && 8152 Src1.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 8153 Src0 = Src1; 8154 else if (Src2.isMachineOpcode() && 8155 Src2.getMachineOpcode() != AMDGPU::IMPLICIT_DEF) 8156 Src0 = Src2; 8157 else { 8158 assert(Src1.getMachineOpcode() == AMDGPU::IMPLICIT_DEF); 8159 Src0 = UndefReg; 8160 Src1 = UndefReg; 8161 } 8162 } else 8163 break; 8164 8165 SmallVector<SDValue, 4> Ops = { Src0, Src1, Src2 }; 8166 for (unsigned I = 3, N = Node->getNumOperands(); I != N; ++I) 8167 Ops.push_back(Node->getOperand(I)); 8168 8169 Ops.push_back(ImpDef.getValue(1)); 8170 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 8171 } 8172 default: 8173 break; 8174 } 8175 8176 return Node; 8177 } 8178 8179 /// Assign the register class depending on the number of 8180 /// bits set in the writemask 8181 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 8182 SDNode *Node) const { 8183 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8184 8185 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 8186 8187 if (TII->isVOP3(MI.getOpcode())) { 8188 // Make sure constant bus requirements are respected. 8189 TII->legalizeOperandsVOP3(MRI, MI); 8190 return; 8191 } 8192 8193 // Replace unused atomics with the no return version. 8194 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); 8195 if (NoRetAtomicOp != -1) { 8196 if (!Node->hasAnyUseOfValue(0)) { 8197 MI.setDesc(TII->get(NoRetAtomicOp)); 8198 MI.RemoveOperand(0); 8199 return; 8200 } 8201 8202 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg 8203 // instruction, because the return type of these instructions is a vec2 of 8204 // the memory type, so it can be tied to the input operand. 8205 // This means these instructions always have a use, so we need to add a 8206 // special case to check if the atomic has only one extract_subreg use, 8207 // which itself has no uses. 8208 if ((Node->hasNUsesOfValue(1, 0) && 8209 Node->use_begin()->isMachineOpcode() && 8210 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG && 8211 !Node->use_begin()->hasAnyUseOfValue(0))) { 8212 unsigned Def = MI.getOperand(0).getReg(); 8213 8214 // Change this into a noret atomic. 8215 MI.setDesc(TII->get(NoRetAtomicOp)); 8216 MI.RemoveOperand(0); 8217 8218 // If we only remove the def operand from the atomic instruction, the 8219 // extract_subreg will be left with a use of a vreg without a def. 8220 // So we need to insert an implicit_def to avoid machine verifier 8221 // errors. 8222 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), 8223 TII->get(AMDGPU::IMPLICIT_DEF), Def); 8224 } 8225 return; 8226 } 8227 } 8228 8229 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL, 8230 uint64_t Val) { 8231 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32); 8232 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); 8233 } 8234 8235 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG, 8236 const SDLoc &DL, 8237 SDValue Ptr) const { 8238 const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); 8239 8240 // Build the half of the subregister with the constants before building the 8241 // full 128-bit register. If we are building multiple resource descriptors, 8242 // this will allow CSEing of the 2-component register. 8243 const SDValue Ops0[] = { 8244 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32), 8245 buildSMovImm32(DAG, DL, 0), 8246 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 8247 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32), 8248 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32) 8249 }; 8250 8251 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, 8252 MVT::v2i32, Ops0), 0); 8253 8254 // Combine the constants and the pointer. 8255 const SDValue Ops1[] = { 8256 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 8257 Ptr, 8258 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32), 8259 SubRegHi, 8260 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32) 8261 }; 8262 8263 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1); 8264 } 8265 8266 /// Return a resource descriptor with the 'Add TID' bit enabled 8267 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48] 8268 /// of the resource descriptor) to create an offset, which is added to 8269 /// the resource pointer. 8270 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL, 8271 SDValue Ptr, uint32_t RsrcDword1, 8272 uint64_t RsrcDword2And3) const { 8273 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); 8274 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); 8275 if (RsrcDword1) { 8276 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, 8277 DAG.getConstant(RsrcDword1, DL, MVT::i32)), 8278 0); 8279 } 8280 8281 SDValue DataLo = buildSMovImm32(DAG, DL, 8282 RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); 8283 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); 8284 8285 const SDValue Ops[] = { 8286 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32), 8287 PtrLo, 8288 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32), 8289 PtrHi, 8290 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32), 8291 DataLo, 8292 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32), 8293 DataHi, 8294 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32) 8295 }; 8296 8297 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops); 8298 } 8299 8300 //===----------------------------------------------------------------------===// 8301 // SI Inline Assembly Support 8302 //===----------------------------------------------------------------------===// 8303 8304 std::pair<unsigned, const TargetRegisterClass *> 8305 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 8306 StringRef Constraint, 8307 MVT VT) const { 8308 const TargetRegisterClass *RC = nullptr; 8309 if (Constraint.size() == 1) { 8310 switch (Constraint[0]) { 8311 default: 8312 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 8313 case 's': 8314 case 'r': 8315 switch (VT.getSizeInBits()) { 8316 default: 8317 return std::make_pair(0U, nullptr); 8318 case 32: 8319 case 16: 8320 RC = &AMDGPU::SReg_32_XM0RegClass; 8321 break; 8322 case 64: 8323 RC = &AMDGPU::SGPR_64RegClass; 8324 break; 8325 case 128: 8326 RC = &AMDGPU::SReg_128RegClass; 8327 break; 8328 case 256: 8329 RC = &AMDGPU::SReg_256RegClass; 8330 break; 8331 case 512: 8332 RC = &AMDGPU::SReg_512RegClass; 8333 break; 8334 } 8335 break; 8336 case 'v': 8337 switch (VT.getSizeInBits()) { 8338 default: 8339 return std::make_pair(0U, nullptr); 8340 case 32: 8341 case 16: 8342 RC = &AMDGPU::VGPR_32RegClass; 8343 break; 8344 case 64: 8345 RC = &AMDGPU::VReg_64RegClass; 8346 break; 8347 case 96: 8348 RC = &AMDGPU::VReg_96RegClass; 8349 break; 8350 case 128: 8351 RC = &AMDGPU::VReg_128RegClass; 8352 break; 8353 case 256: 8354 RC = &AMDGPU::VReg_256RegClass; 8355 break; 8356 case 512: 8357 RC = &AMDGPU::VReg_512RegClass; 8358 break; 8359 } 8360 break; 8361 } 8362 // We actually support i128, i16 and f16 as inline parameters 8363 // even if they are not reported as legal 8364 if (RC && (isTypeLegal(VT) || VT.SimpleTy == MVT::i128 || 8365 VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f16)) 8366 return std::make_pair(0U, RC); 8367 } 8368 8369 if (Constraint.size() > 1) { 8370 if (Constraint[1] == 'v') { 8371 RC = &AMDGPU::VGPR_32RegClass; 8372 } else if (Constraint[1] == 's') { 8373 RC = &AMDGPU::SGPR_32RegClass; 8374 } 8375 8376 if (RC) { 8377 uint32_t Idx; 8378 bool Failed = Constraint.substr(2).getAsInteger(10, Idx); 8379 if (!Failed && Idx < RC->getNumRegs()) 8380 return std::make_pair(RC->getRegister(Idx), RC); 8381 } 8382 } 8383 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 8384 } 8385 8386 SITargetLowering::ConstraintType 8387 SITargetLowering::getConstraintType(StringRef Constraint) const { 8388 if (Constraint.size() == 1) { 8389 switch (Constraint[0]) { 8390 default: break; 8391 case 's': 8392 case 'v': 8393 return C_RegisterClass; 8394 } 8395 } 8396 return TargetLowering::getConstraintType(Constraint); 8397 } 8398 8399 // Figure out which registers should be reserved for stack access. Only after 8400 // the function is legalized do we know all of the non-spill stack objects or if 8401 // calls are present. 8402 void SITargetLowering::finalizeLowering(MachineFunction &MF) const { 8403 MachineRegisterInfo &MRI = MF.getRegInfo(); 8404 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 8405 const MachineFrameInfo &MFI = MF.getFrameInfo(); 8406 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 8407 8408 if (Info->isEntryFunction()) { 8409 // Callable functions have fixed registers used for stack access. 8410 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info); 8411 } 8412 8413 // We have to assume the SP is needed in case there are calls in the function 8414 // during lowering. Calls are only detected after the function is 8415 // lowered. We're about to reserve registers, so don't bother using it if we 8416 // aren't really going to use it. 8417 bool NeedSP = !Info->isEntryFunction() || 8418 MFI.hasVarSizedObjects() || 8419 MFI.hasCalls(); 8420 8421 if (NeedSP) { 8422 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF); 8423 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg); 8424 8425 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg()); 8426 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(), 8427 Info->getStackPtrOffsetReg())); 8428 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg()); 8429 } 8430 8431 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg()); 8432 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg()); 8433 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG, 8434 Info->getScratchWaveOffsetReg()); 8435 8436 Info->limitOccupancy(MF); 8437 8438 TargetLoweringBase::finalizeLowering(MF); 8439 } 8440 8441 void SITargetLowering::computeKnownBitsForFrameIndex(const SDValue Op, 8442 KnownBits &Known, 8443 const APInt &DemandedElts, 8444 const SelectionDAG &DAG, 8445 unsigned Depth) const { 8446 TargetLowering::computeKnownBitsForFrameIndex(Op, Known, DemandedElts, 8447 DAG, Depth); 8448 8449 if (getSubtarget()->enableHugePrivateBuffer()) 8450 return; 8451 8452 // Technically it may be possible to have a dispatch with a single workitem 8453 // that uses the full private memory size, but that's not really useful. We 8454 // can't use vaddr in MUBUF instructions if we don't know the address 8455 // calculation won't overflow, so assume the sign bit is never set. 8456 Known.Zero.setHighBits(AssumeFrameIndexHighZeroBits); 8457 } 8458 8459 bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, 8460 FunctionLoweringInfo * FLI, DivergenceAnalysis * DA) const 8461 { 8462 switch (N->getOpcode()) { 8463 case ISD::Register: 8464 case ISD::CopyFromReg: 8465 { 8466 const RegisterSDNode *R = nullptr; 8467 if (N->getOpcode() == ISD::Register) { 8468 R = dyn_cast<RegisterSDNode>(N); 8469 } 8470 else { 8471 R = dyn_cast<RegisterSDNode>(N->getOperand(1)); 8472 } 8473 if (R) 8474 { 8475 const MachineFunction * MF = FLI->MF; 8476 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 8477 const MachineRegisterInfo &MRI = MF->getRegInfo(); 8478 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); 8479 unsigned Reg = R->getReg(); 8480 if (TRI.isPhysicalRegister(Reg)) 8481 return TRI.isVGPR(MRI, Reg); 8482 8483 if (MRI.isLiveIn(Reg)) { 8484 // workitem.id.x workitem.id.y workitem.id.z 8485 // Any VGPR formal argument is also considered divergent 8486 if (TRI.isVGPR(MRI, Reg)) 8487 return true; 8488 // Formal arguments of non-entry functions 8489 // are conservatively considered divergent 8490 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) 8491 return true; 8492 } 8493 return !DA || DA->isDivergent(FLI->getValueFromVirtualReg(Reg)); 8494 } 8495 } 8496 break; 8497 case ISD::LOAD: { 8498 const LoadSDNode *L = dyn_cast<LoadSDNode>(N); 8499 if (L->getMemOperand()->getAddrSpace() == 8500 Subtarget->getAMDGPUAS().PRIVATE_ADDRESS) 8501 return true; 8502 } break; 8503 case ISD::CALLSEQ_END: 8504 return true; 8505 break; 8506 case ISD::INTRINSIC_WO_CHAIN: 8507 { 8508 8509 } 8510 return AMDGPU::isIntrinsicSourceOfDivergence( 8511 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); 8512 case ISD::INTRINSIC_W_CHAIN: 8513 return AMDGPU::isIntrinsicSourceOfDivergence( 8514 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); 8515 // In some cases intrinsics that are a source of divergence have been 8516 // lowered to AMDGPUISD so we also need to check those too. 8517 case AMDGPUISD::INTERP_MOV: 8518 case AMDGPUISD::INTERP_P1: 8519 case AMDGPUISD::INTERP_P2: 8520 return true; 8521 } 8522 return false; 8523 } 8524