1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This is the parent TargetLowering class for hardware code gen 12 /// targets. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define AMDGPU_LOG2E_F 1.44269504088896340735992468100189214f 17 #define AMDGPU_LN2_F 0.693147180559945309417232121458176568f 18 #define AMDGPU_LN10_F 2.30258509299404568401799145468436421f 19 20 #include "AMDGPUISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUCallLowering.h" 23 #include "AMDGPUFrameLowering.h" 24 #include "AMDGPUIntrinsicInfo.h" 25 #include "AMDGPURegisterInfo.h" 26 #include "AMDGPUSubtarget.h" 27 #include "AMDGPUTargetMachine.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "R600MachineFunctionInfo.h" 30 #include "SIInstrInfo.h" 31 #include "SIMachineFunctionInfo.h" 32 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 33 #include "llvm/CodeGen/Analysis.h" 34 #include "llvm/CodeGen/CallingConvLower.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/SelectionDAG.h" 38 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DiagnosticInfo.h" 41 #include "llvm/Support/KnownBits.h" 42 using namespace llvm; 43 44 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 45 CCValAssign::LocInfo LocInfo, 46 ISD::ArgFlagsTy ArgFlags, CCState &State, 47 const TargetRegisterClass *RC, 48 unsigned NumRegs) { 49 ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs); 50 unsigned RegResult = State.AllocateReg(RegList); 51 if (RegResult == AMDGPU::NoRegister) 52 return false; 53 54 State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo)); 55 return true; 56 } 57 58 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 59 CCValAssign::LocInfo LocInfo, 60 ISD::ArgFlagsTy ArgFlags, CCState &State) { 61 switch (LocVT.SimpleTy) { 62 case MVT::i64: 63 case MVT::f64: 64 case MVT::v2i32: 65 case MVT::v2f32: 66 case MVT::v4i16: 67 case MVT::v4f16: { 68 // Up to SGPR0-SGPR39 69 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 70 &AMDGPU::SGPR_64RegClass, 20); 71 } 72 default: 73 return false; 74 } 75 } 76 77 // Allocate up to VGPR31. 78 // 79 // TODO: Since there are no VGPR alignent requirements would it be better to 80 // split into individual scalar registers? 81 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 82 CCValAssign::LocInfo LocInfo, 83 ISD::ArgFlagsTy ArgFlags, CCState &State) { 84 switch (LocVT.SimpleTy) { 85 case MVT::i64: 86 case MVT::f64: 87 case MVT::v2i32: 88 case MVT::v2f32: 89 case MVT::v4i16: 90 case MVT::v4f16: { 91 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 92 &AMDGPU::VReg_64RegClass, 31); 93 } 94 case MVT::v4i32: 95 case MVT::v4f32: 96 case MVT::v2i64: 97 case MVT::v2f64: { 98 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 99 &AMDGPU::VReg_128RegClass, 29); 100 } 101 case MVT::v8i32: 102 case MVT::v8f32: { 103 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 104 &AMDGPU::VReg_256RegClass, 25); 105 106 } 107 case MVT::v16i32: 108 case MVT::v16f32: { 109 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 110 &AMDGPU::VReg_512RegClass, 17); 111 112 } 113 default: 114 return false; 115 } 116 } 117 118 #include "AMDGPUGenCallingConv.inc" 119 120 // Find a larger type to do a load / store of a vector with. 121 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 122 unsigned StoreSize = VT.getStoreSizeInBits(); 123 if (StoreSize <= 32) 124 return EVT::getIntegerVT(Ctx, StoreSize); 125 126 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 127 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 128 } 129 130 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) { 131 KnownBits Known; 132 EVT VT = Op.getValueType(); 133 DAG.computeKnownBits(Op, Known); 134 135 return VT.getSizeInBits() - Known.countMinLeadingZeros(); 136 } 137 138 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) { 139 EVT VT = Op.getValueType(); 140 141 // In order for this to be a signed 24-bit value, bit 23, must 142 // be a sign bit. 143 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); 144 } 145 146 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, 147 const AMDGPUSubtarget &STI) 148 : TargetLowering(TM), Subtarget(&STI) { 149 AMDGPUASI = AMDGPU::getAMDGPUAS(TM); 150 // Lower floating point store/load to integer store/load to reduce the number 151 // of patterns in tablegen. 152 setOperationAction(ISD::LOAD, MVT::f32, Promote); 153 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 154 155 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 156 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 157 158 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 159 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 160 161 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 162 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 163 164 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 165 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 166 167 setOperationAction(ISD::LOAD, MVT::i64, Promote); 168 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 169 170 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 171 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); 172 173 setOperationAction(ISD::LOAD, MVT::f64, Promote); 174 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32); 175 176 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 177 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32); 178 179 // There are no 64-bit extloads. These should be done as a 32-bit extload and 180 // an extension to 64-bit. 181 for (MVT VT : MVT::integer_valuetypes()) { 182 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 183 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 184 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 185 } 186 187 for (MVT VT : MVT::integer_valuetypes()) { 188 if (VT == MVT::i64) 189 continue; 190 191 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); 193 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); 194 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 195 196 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 197 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); 198 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); 199 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 200 201 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 202 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); 203 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); 204 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 205 } 206 207 for (MVT VT : MVT::integer_vector_valuetypes()) { 208 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 209 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 210 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 211 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 212 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 213 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 214 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 215 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 216 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 217 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 218 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 219 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 220 } 221 222 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 223 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 224 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 225 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 226 227 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 228 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); 229 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand); 230 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand); 231 232 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 233 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 234 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 235 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 236 237 setOperationAction(ISD::STORE, MVT::f32, Promote); 238 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 239 240 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 241 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 242 243 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 244 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 245 246 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 247 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 248 249 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 250 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 251 252 setOperationAction(ISD::STORE, MVT::i64, Promote); 253 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 254 255 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 256 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); 257 258 setOperationAction(ISD::STORE, MVT::f64, Promote); 259 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32); 260 261 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 262 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32); 263 264 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 265 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 266 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 267 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 268 269 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 270 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand); 271 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand); 272 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); 273 274 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 275 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 276 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 277 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 278 279 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 280 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 281 282 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 283 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); 284 285 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand); 286 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand); 287 288 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand); 289 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand); 290 291 292 setOperationAction(ISD::Constant, MVT::i32, Legal); 293 setOperationAction(ISD::Constant, MVT::i64, Legal); 294 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 295 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 296 297 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 298 setOperationAction(ISD::BRIND, MVT::Other, Expand); 299 300 // This is totally unsupported, just custom lower to produce an error. 301 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 302 303 // Library functions. These default to Expand, but we have instructions 304 // for them. 305 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 306 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 307 setOperationAction(ISD::FPOW, MVT::f32, Legal); 308 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 309 setOperationAction(ISD::FABS, MVT::f32, Legal); 310 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 311 setOperationAction(ISD::FRINT, MVT::f32, Legal); 312 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 313 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 314 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 315 316 setOperationAction(ISD::FROUND, MVT::f32, Custom); 317 setOperationAction(ISD::FROUND, MVT::f64, Custom); 318 319 setOperationAction(ISD::FLOG, MVT::f32, Custom); 320 setOperationAction(ISD::FLOG10, MVT::f32, Custom); 321 322 323 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 324 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 325 326 setOperationAction(ISD::FREM, MVT::f32, Custom); 327 setOperationAction(ISD::FREM, MVT::f64, Custom); 328 329 // Expand to fneg + fadd. 330 setOperationAction(ISD::FSUB, MVT::f64, Expand); 331 332 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 333 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 334 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 335 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 336 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 337 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 338 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 339 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 340 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 341 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 342 343 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 344 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom); 345 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom); 346 347 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 348 for (MVT VT : ScalarIntVTs) { 349 // These should use [SU]DIVREM, so set them to expand 350 setOperationAction(ISD::SDIV, VT, Expand); 351 setOperationAction(ISD::UDIV, VT, Expand); 352 setOperationAction(ISD::SREM, VT, Expand); 353 setOperationAction(ISD::UREM, VT, Expand); 354 355 // GPU does not have divrem function for signed or unsigned. 356 setOperationAction(ISD::SDIVREM, VT, Custom); 357 setOperationAction(ISD::UDIVREM, VT, Custom); 358 359 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 360 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 361 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 362 363 setOperationAction(ISD::BSWAP, VT, Expand); 364 setOperationAction(ISD::CTTZ, VT, Expand); 365 setOperationAction(ISD::CTLZ, VT, Expand); 366 367 // AMDGPU uses ADDC/SUBC/ADDE/SUBE 368 setOperationAction(ISD::ADDC, VT, Legal); 369 setOperationAction(ISD::SUBC, VT, Legal); 370 setOperationAction(ISD::ADDE, VT, Legal); 371 setOperationAction(ISD::SUBE, VT, Legal); 372 } 373 374 // The hardware supports 32-bit ROTR, but not ROTL. 375 setOperationAction(ISD::ROTL, MVT::i32, Expand); 376 setOperationAction(ISD::ROTL, MVT::i64, Expand); 377 setOperationAction(ISD::ROTR, MVT::i64, Expand); 378 379 setOperationAction(ISD::MUL, MVT::i64, Expand); 380 setOperationAction(ISD::MULHU, MVT::i64, Expand); 381 setOperationAction(ISD::MULHS, MVT::i64, Expand); 382 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 383 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 384 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 385 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 386 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 387 388 setOperationAction(ISD::SMIN, MVT::i32, Legal); 389 setOperationAction(ISD::UMIN, MVT::i32, Legal); 390 setOperationAction(ISD::SMAX, MVT::i32, Legal); 391 setOperationAction(ISD::UMAX, MVT::i32, Legal); 392 393 setOperationAction(ISD::CTTZ, MVT::i64, Custom); 394 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); 395 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 396 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 397 398 static const MVT::SimpleValueType VectorIntTypes[] = { 399 MVT::v2i32, MVT::v4i32 400 }; 401 402 for (MVT VT : VectorIntTypes) { 403 // Expand the following operations for the current type by default. 404 setOperationAction(ISD::ADD, VT, Expand); 405 setOperationAction(ISD::AND, VT, Expand); 406 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 407 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 408 setOperationAction(ISD::MUL, VT, Expand); 409 setOperationAction(ISD::MULHU, VT, Expand); 410 setOperationAction(ISD::MULHS, VT, Expand); 411 setOperationAction(ISD::OR, VT, Expand); 412 setOperationAction(ISD::SHL, VT, Expand); 413 setOperationAction(ISD::SRA, VT, Expand); 414 setOperationAction(ISD::SRL, VT, Expand); 415 setOperationAction(ISD::ROTL, VT, Expand); 416 setOperationAction(ISD::ROTR, VT, Expand); 417 setOperationAction(ISD::SUB, VT, Expand); 418 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 419 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 420 setOperationAction(ISD::SDIV, VT, Expand); 421 setOperationAction(ISD::UDIV, VT, Expand); 422 setOperationAction(ISD::SREM, VT, Expand); 423 setOperationAction(ISD::UREM, VT, Expand); 424 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 425 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 426 setOperationAction(ISD::SDIVREM, VT, Custom); 427 setOperationAction(ISD::UDIVREM, VT, Expand); 428 setOperationAction(ISD::SELECT, VT, Expand); 429 setOperationAction(ISD::VSELECT, VT, Expand); 430 setOperationAction(ISD::SELECT_CC, VT, Expand); 431 setOperationAction(ISD::XOR, VT, Expand); 432 setOperationAction(ISD::BSWAP, VT, Expand); 433 setOperationAction(ISD::CTPOP, VT, Expand); 434 setOperationAction(ISD::CTTZ, VT, Expand); 435 setOperationAction(ISD::CTLZ, VT, Expand); 436 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 437 setOperationAction(ISD::SETCC, VT, Expand); 438 } 439 440 static const MVT::SimpleValueType FloatVectorTypes[] = { 441 MVT::v2f32, MVT::v4f32 442 }; 443 444 for (MVT VT : FloatVectorTypes) { 445 setOperationAction(ISD::FABS, VT, Expand); 446 setOperationAction(ISD::FMINNUM, VT, Expand); 447 setOperationAction(ISD::FMAXNUM, VT, Expand); 448 setOperationAction(ISD::FADD, VT, Expand); 449 setOperationAction(ISD::FCEIL, VT, Expand); 450 setOperationAction(ISD::FCOS, VT, Expand); 451 setOperationAction(ISD::FDIV, VT, Expand); 452 setOperationAction(ISD::FEXP2, VT, Expand); 453 setOperationAction(ISD::FLOG2, VT, Expand); 454 setOperationAction(ISD::FREM, VT, Expand); 455 setOperationAction(ISD::FLOG, VT, Expand); 456 setOperationAction(ISD::FLOG10, VT, Expand); 457 setOperationAction(ISD::FPOW, VT, Expand); 458 setOperationAction(ISD::FFLOOR, VT, Expand); 459 setOperationAction(ISD::FTRUNC, VT, Expand); 460 setOperationAction(ISD::FMUL, VT, Expand); 461 setOperationAction(ISD::FMA, VT, Expand); 462 setOperationAction(ISD::FRINT, VT, Expand); 463 setOperationAction(ISD::FNEARBYINT, VT, Expand); 464 setOperationAction(ISD::FSQRT, VT, Expand); 465 setOperationAction(ISD::FSIN, VT, Expand); 466 setOperationAction(ISD::FSUB, VT, Expand); 467 setOperationAction(ISD::FNEG, VT, Expand); 468 setOperationAction(ISD::VSELECT, VT, Expand); 469 setOperationAction(ISD::SELECT_CC, VT, Expand); 470 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 471 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 472 setOperationAction(ISD::SETCC, VT, Expand); 473 } 474 475 // This causes using an unrolled select operation rather than expansion with 476 // bit operations. This is in general better, but the alternative using BFI 477 // instructions may be better if the select sources are SGPRs. 478 setOperationAction(ISD::SELECT, MVT::v2f32, Promote); 479 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32); 480 481 setOperationAction(ISD::SELECT, MVT::v4f32, Promote); 482 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32); 483 484 // There are no libcalls of any kind. 485 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) 486 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); 487 488 setBooleanContents(ZeroOrNegativeOneBooleanContent); 489 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 490 491 setSchedulingPreference(Sched::RegPressure); 492 setJumpIsExpensive(true); 493 494 // FIXME: This is only partially true. If we have to do vector compares, any 495 // SGPR pair can be a condition register. If we have a uniform condition, we 496 // are better off doing SALU operations, where there is only one SCC. For now, 497 // we don't have a way of knowing during instruction selection if a condition 498 // will be uniform and we always use vector compares. Assume we are using 499 // vector compares until that is fixed. 500 setHasMultipleConditionRegisters(true); 501 502 PredictableSelectIsExpensive = false; 503 504 // We want to find all load dependencies for long chains of stores to enable 505 // merging into very wide vectors. The problem is with vectors with > 4 506 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 507 // vectors are a legal type, even though we have to split the loads 508 // usually. When we can more precisely specify load legality per address 509 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 510 // smarter so that they can figure out what to do in 2 iterations without all 511 // N > 4 stores on the same chain. 512 GatherAllAliasesMaxDepth = 16; 513 514 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry 515 // about these during lowering. 516 MaxStoresPerMemcpy = 0xffffffff; 517 MaxStoresPerMemmove = 0xffffffff; 518 MaxStoresPerMemset = 0xffffffff; 519 520 setTargetDAGCombine(ISD::BITCAST); 521 setTargetDAGCombine(ISD::SHL); 522 setTargetDAGCombine(ISD::SRA); 523 setTargetDAGCombine(ISD::SRL); 524 setTargetDAGCombine(ISD::TRUNCATE); 525 setTargetDAGCombine(ISD::MUL); 526 setTargetDAGCombine(ISD::MULHU); 527 setTargetDAGCombine(ISD::MULHS); 528 setTargetDAGCombine(ISD::SELECT); 529 setTargetDAGCombine(ISD::SELECT_CC); 530 setTargetDAGCombine(ISD::STORE); 531 setTargetDAGCombine(ISD::FADD); 532 setTargetDAGCombine(ISD::FSUB); 533 setTargetDAGCombine(ISD::FNEG); 534 setTargetDAGCombine(ISD::FABS); 535 setTargetDAGCombine(ISD::AssertZext); 536 setTargetDAGCombine(ISD::AssertSext); 537 } 538 539 //===----------------------------------------------------------------------===// 540 // Target Information 541 //===----------------------------------------------------------------------===// 542 543 LLVM_READNONE 544 static bool fnegFoldsIntoOp(unsigned Opc) { 545 switch (Opc) { 546 case ISD::FADD: 547 case ISD::FSUB: 548 case ISD::FMUL: 549 case ISD::FMA: 550 case ISD::FMAD: 551 case ISD::FMINNUM: 552 case ISD::FMAXNUM: 553 case ISD::FSIN: 554 case ISD::FTRUNC: 555 case ISD::FRINT: 556 case ISD::FNEARBYINT: 557 case ISD::FCANONICALIZE: 558 case AMDGPUISD::RCP: 559 case AMDGPUISD::RCP_LEGACY: 560 case AMDGPUISD::RCP_IFLAG: 561 case AMDGPUISD::SIN_HW: 562 case AMDGPUISD::FMUL_LEGACY: 563 case AMDGPUISD::FMIN_LEGACY: 564 case AMDGPUISD::FMAX_LEGACY: 565 return true; 566 default: 567 return false; 568 } 569 } 570 571 /// \p returns true if the operation will definitely need to use a 64-bit 572 /// encoding, and thus will use a VOP3 encoding regardless of the source 573 /// modifiers. 574 LLVM_READONLY 575 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) { 576 return N->getNumOperands() > 2 || VT == MVT::f64; 577 } 578 579 // Most FP instructions support source modifiers, but this could be refined 580 // slightly. 581 LLVM_READONLY 582 static bool hasSourceMods(const SDNode *N) { 583 if (isa<MemSDNode>(N)) 584 return false; 585 586 switch (N->getOpcode()) { 587 case ISD::CopyToReg: 588 case ISD::SELECT: 589 case ISD::FDIV: 590 case ISD::FREM: 591 case ISD::INLINEASM: 592 case AMDGPUISD::INTERP_P1: 593 case AMDGPUISD::INTERP_P2: 594 case AMDGPUISD::DIV_SCALE: 595 596 // TODO: Should really be looking at the users of the bitcast. These are 597 // problematic because bitcasts are used to legalize all stores to integer 598 // types. 599 case ISD::BITCAST: 600 return false; 601 default: 602 return true; 603 } 604 } 605 606 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N, 607 unsigned CostThreshold) { 608 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus 609 // it is truly free to use a source modifier in all cases. If there are 610 // multiple users but for each one will necessitate using VOP3, there will be 611 // a code size increase. Try to avoid increasing code size unless we know it 612 // will save on the instruction count. 613 unsigned NumMayIncreaseSize = 0; 614 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); 615 616 // XXX - Should this limit number of uses to check? 617 for (const SDNode *U : N->uses()) { 618 if (!hasSourceMods(U)) 619 return false; 620 621 if (!opMustUseVOP3Encoding(U, VT)) { 622 if (++NumMayIncreaseSize > CostThreshold) 623 return false; 624 } 625 } 626 627 return true; 628 } 629 630 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 631 return MVT::i32; 632 } 633 634 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 635 return true; 636 } 637 638 // The backend supports 32 and 64 bit floating point immediates. 639 // FIXME: Why are we reporting vectors of FP immediates as legal? 640 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 641 EVT ScalarVT = VT.getScalarType(); 642 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 || 643 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); 644 } 645 646 // We don't want to shrink f64 / f32 constants. 647 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 648 EVT ScalarVT = VT.getScalarType(); 649 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 650 } 651 652 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 653 ISD::LoadExtType, 654 EVT NewVT) const { 655 656 unsigned NewSize = NewVT.getStoreSizeInBits(); 657 658 // If we are reducing to a 32-bit load, this is always better. 659 if (NewSize == 32) 660 return true; 661 662 EVT OldVT = N->getValueType(0); 663 unsigned OldSize = OldVT.getStoreSizeInBits(); 664 665 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 666 // extloads, so doing one requires using a buffer_load. In cases where we 667 // still couldn't use a scalar load, using the wider load shouldn't really 668 // hurt anything. 669 670 // If the old size already had to be an extload, there's no harm in continuing 671 // to reduce the width. 672 return (OldSize < 32); 673 } 674 675 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, 676 EVT CastTy) const { 677 678 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); 679 680 if (LoadTy.getScalarType() == MVT::i32) 681 return false; 682 683 unsigned LScalarSize = LoadTy.getScalarSizeInBits(); 684 unsigned CastScalarSize = CastTy.getScalarSizeInBits(); 685 686 return (LScalarSize < CastScalarSize) || 687 (CastScalarSize >= 32); 688 } 689 690 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 691 // profitable with the expansion for 64-bit since it's generally good to 692 // speculate things. 693 // FIXME: These should really have the size as a parameter. 694 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 695 return true; 696 } 697 698 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 699 return true; 700 } 701 702 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const { 703 switch (N->getOpcode()) { 704 default: 705 return false; 706 case ISD::EntryToken: 707 case ISD::TokenFactor: 708 return true; 709 case ISD::INTRINSIC_WO_CHAIN: 710 { 711 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 712 switch (IntrID) { 713 default: 714 return false; 715 case Intrinsic::amdgcn_readfirstlane: 716 case Intrinsic::amdgcn_readlane: 717 return true; 718 } 719 } 720 break; 721 case ISD::LOAD: 722 { 723 const LoadSDNode * L = dyn_cast<LoadSDNode>(N); 724 if (L->getMemOperand()->getAddrSpace() 725 == AMDGPUASI.CONSTANT_ADDRESS_32BIT) 726 return true; 727 return false; 728 } 729 break; 730 } 731 } 732 733 //===---------------------------------------------------------------------===// 734 // Target Properties 735 //===---------------------------------------------------------------------===// 736 737 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 738 assert(VT.isFloatingPoint()); 739 740 // Packed operations do not have a fabs modifier. 741 return VT == MVT::f32 || VT == MVT::f64 || 742 (Subtarget->has16BitInsts() && VT == MVT::f16); 743 } 744 745 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 746 assert(VT.isFloatingPoint()); 747 return VT == MVT::f32 || VT == MVT::f64 || 748 (Subtarget->has16BitInsts() && VT == MVT::f16) || 749 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16); 750 } 751 752 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 753 unsigned NumElem, 754 unsigned AS) const { 755 return true; 756 } 757 758 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 759 // There are few operations which truly have vector input operands. Any vector 760 // operation is going to involve operations on each component, and a 761 // build_vector will be a copy per element, so it always makes sense to use a 762 // build_vector input in place of the extracted element to avoid a copy into a 763 // super register. 764 // 765 // We should probably only do this if all users are extracts only, but this 766 // should be the common case. 767 return true; 768 } 769 770 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 771 // Truncate is just accessing a subregister. 772 773 unsigned SrcSize = Source.getSizeInBits(); 774 unsigned DestSize = Dest.getSizeInBits(); 775 776 return DestSize < SrcSize && DestSize % 32 == 0 ; 777 } 778 779 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 780 // Truncate is just accessing a subregister. 781 782 unsigned SrcSize = Source->getScalarSizeInBits(); 783 unsigned DestSize = Dest->getScalarSizeInBits(); 784 785 if (DestSize== 16 && Subtarget->has16BitInsts()) 786 return SrcSize >= 32; 787 788 return DestSize < SrcSize && DestSize % 32 == 0; 789 } 790 791 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 792 unsigned SrcSize = Src->getScalarSizeInBits(); 793 unsigned DestSize = Dest->getScalarSizeInBits(); 794 795 if (SrcSize == 16 && Subtarget->has16BitInsts()) 796 return DestSize >= 32; 797 798 return SrcSize == 32 && DestSize == 64; 799 } 800 801 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 802 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 803 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 804 // this will enable reducing 64-bit operations the 32-bit, which is always 805 // good. 806 807 if (Src == MVT::i16) 808 return Dest == MVT::i32 ||Dest == MVT::i64 ; 809 810 return Src == MVT::i32 && Dest == MVT::i64; 811 } 812 813 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 814 return isZExtFree(Val.getValueType(), VT2); 815 } 816 817 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 818 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 819 // limited number of native 64-bit operations. Shrinking an operation to fit 820 // in a single 32-bit register should always be helpful. As currently used, 821 // this is much less general than the name suggests, and is only used in 822 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 823 // not profitable, and may actually be harmful. 824 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 825 } 826 827 //===---------------------------------------------------------------------===// 828 // TargetLowering Callbacks 829 //===---------------------------------------------------------------------===// 830 831 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC, 832 bool IsVarArg) { 833 switch (CC) { 834 case CallingConv::AMDGPU_KERNEL: 835 case CallingConv::SPIR_KERNEL: 836 llvm_unreachable("kernels should not be handled here"); 837 case CallingConv::AMDGPU_VS: 838 case CallingConv::AMDGPU_GS: 839 case CallingConv::AMDGPU_PS: 840 case CallingConv::AMDGPU_CS: 841 case CallingConv::AMDGPU_HS: 842 case CallingConv::AMDGPU_ES: 843 case CallingConv::AMDGPU_LS: 844 return CC_AMDGPU; 845 case CallingConv::C: 846 case CallingConv::Fast: 847 case CallingConv::Cold: 848 return CC_AMDGPU_Func; 849 default: 850 report_fatal_error("Unsupported calling convention."); 851 } 852 } 853 854 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC, 855 bool IsVarArg) { 856 switch (CC) { 857 case CallingConv::AMDGPU_KERNEL: 858 case CallingConv::SPIR_KERNEL: 859 llvm_unreachable("kernels should not be handled here"); 860 case CallingConv::AMDGPU_VS: 861 case CallingConv::AMDGPU_GS: 862 case CallingConv::AMDGPU_PS: 863 case CallingConv::AMDGPU_CS: 864 case CallingConv::AMDGPU_HS: 865 case CallingConv::AMDGPU_ES: 866 case CallingConv::AMDGPU_LS: 867 return RetCC_SI_Shader; 868 case CallingConv::C: 869 case CallingConv::Fast: 870 case CallingConv::Cold: 871 return RetCC_AMDGPU_Func; 872 default: 873 report_fatal_error("Unsupported calling convention."); 874 } 875 } 876 877 /// The SelectionDAGBuilder will automatically promote function arguments 878 /// with illegal types. However, this does not work for the AMDGPU targets 879 /// since the function arguments are stored in memory as these illegal types. 880 /// In order to handle this properly we need to get the original types sizes 881 /// from the LLVM IR Function and fixup the ISD:InputArg values before 882 /// passing them to AnalyzeFormalArguments() 883 884 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting 885 /// input values across multiple registers. Each item in the Ins array 886 /// represents a single value that will be stored in registers. Ins[x].VT is 887 /// the value type of the value that will be stored in the register, so 888 /// whatever SDNode we lower the argument to needs to be this type. 889 /// 890 /// In order to correctly lower the arguments we need to know the size of each 891 /// argument. Since Ins[x].VT gives us the size of the register that will 892 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type 893 /// for the orignal function argument so that we can deduce the correct memory 894 /// type to use for Ins[x]. In most cases the correct memory type will be 895 /// Ins[x].ArgVT. However, this will not always be the case. If, for example, 896 /// we have a kernel argument of type v8i8, this argument will be split into 897 /// 8 parts and each part will be represented by its own item in the Ins array. 898 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of 899 /// the argument before it was split. From this, we deduce that the memory type 900 /// for each individual part is i8. We pass the memory type as LocVT to the 901 /// calling convention analysis function and the register type (Ins[x].VT) as 902 /// the ValVT. 903 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute( 904 CCState &State, 905 const SmallVectorImpl<ISD::InputArg> &Ins) const { 906 const MachineFunction &MF = State.getMachineFunction(); 907 const Function &Fn = MF.getFunction(); 908 LLVMContext &Ctx = Fn.getParent()->getContext(); 909 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF); 910 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn); 911 CallingConv::ID CC = Fn.getCallingConv(); 912 913 unsigned MaxAlign = 1; 914 uint64_t ExplicitArgOffset = 0; 915 const DataLayout &DL = Fn.getParent()->getDataLayout(); 916 917 unsigned InIndex = 0; 918 919 for (const Argument &Arg : Fn.args()) { 920 Type *BaseArgTy = Arg.getType(); 921 unsigned Align = DL.getABITypeAlignment(BaseArgTy); 922 MaxAlign = std::max(Align, MaxAlign); 923 unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy); 924 925 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset; 926 ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize; 927 928 // We're basically throwing away everything passed into us and starting over 929 // to get accurate in-memory offsets. The "PartOffset" is completely useless 930 // to us as computed in Ins. 931 // 932 // We also need to figure out what type legalization is trying to do to get 933 // the correct memory offsets. 934 935 SmallVector<EVT, 16> ValueVTs; 936 SmallVector<uint64_t, 16> Offsets; 937 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset); 938 939 for (unsigned Value = 0, NumValues = ValueVTs.size(); 940 Value != NumValues; ++Value) { 941 uint64_t BasePartOffset = Offsets[Value]; 942 943 EVT ArgVT = ValueVTs[Value]; 944 EVT MemVT = ArgVT; 945 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT); 946 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT); 947 948 if (NumRegs == 1) { 949 // This argument is not split, so the IR type is the memory type. 950 if (ArgVT.isExtended()) { 951 // We have an extended type, like i24, so we should just use the 952 // register type. 953 MemVT = RegisterVT; 954 } else { 955 MemVT = ArgVT; 956 } 957 } else if (ArgVT.isVector() && RegisterVT.isVector() && 958 ArgVT.getScalarType() == RegisterVT.getScalarType()) { 959 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements()); 960 // We have a vector value which has been split into a vector with 961 // the same scalar type, but fewer elements. This should handle 962 // all the floating-point vector types. 963 MemVT = RegisterVT; 964 } else if (ArgVT.isVector() && 965 ArgVT.getVectorNumElements() == NumRegs) { 966 // This arg has been split so that each element is stored in a separate 967 // register. 968 MemVT = ArgVT.getScalarType(); 969 } else if (ArgVT.isExtended()) { 970 // We have an extended type, like i65. 971 MemVT = RegisterVT; 972 } else { 973 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs; 974 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0); 975 if (RegisterVT.isInteger()) { 976 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); 977 } else if (RegisterVT.isVector()) { 978 assert(!RegisterVT.getScalarType().isFloatingPoint()); 979 unsigned NumElements = RegisterVT.getVectorNumElements(); 980 assert(MemoryBits % NumElements == 0); 981 // This vector type has been split into another vector type with 982 // a different elements size. 983 EVT ScalarVT = EVT::getIntegerVT(State.getContext(), 984 MemoryBits / NumElements); 985 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements); 986 } else { 987 llvm_unreachable("cannot deduce memory type."); 988 } 989 } 990 991 // Convert one element vectors to scalar. 992 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1) 993 MemVT = MemVT.getScalarType(); 994 995 if (MemVT.isExtended()) { 996 // This should really only happen if we have vec3 arguments 997 assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3); 998 MemVT = MemVT.getPow2VectorType(State.getContext()); 999 } 1000 1001 unsigned PartOffset = 0; 1002 for (unsigned i = 0; i != NumRegs; ++i) { 1003 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT, 1004 BasePartOffset + PartOffset, 1005 MemVT.getSimpleVT(), 1006 CCValAssign::Full)); 1007 PartOffset += MemVT.getStoreSize(); 1008 } 1009 } 1010 } 1011 } 1012 1013 SDValue AMDGPUTargetLowering::LowerReturn( 1014 SDValue Chain, CallingConv::ID CallConv, 1015 bool isVarArg, 1016 const SmallVectorImpl<ISD::OutputArg> &Outs, 1017 const SmallVectorImpl<SDValue> &OutVals, 1018 const SDLoc &DL, SelectionDAG &DAG) const { 1019 // FIXME: Fails for r600 tests 1020 //assert(!isVarArg && Outs.empty() && OutVals.empty() && 1021 // "wave terminate should not have return values"); 1022 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain); 1023 } 1024 1025 //===---------------------------------------------------------------------===// 1026 // Target specific lowering 1027 //===---------------------------------------------------------------------===// 1028 1029 /// Selects the correct CCAssignFn for a given CallingConvention value. 1030 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1031 bool IsVarArg) { 1032 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg); 1033 } 1034 1035 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 1036 bool IsVarArg) { 1037 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg); 1038 } 1039 1040 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain, 1041 SelectionDAG &DAG, 1042 MachineFrameInfo &MFI, 1043 int ClobberedFI) const { 1044 SmallVector<SDValue, 8> ArgChains; 1045 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); 1046 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; 1047 1048 // Include the original chain at the beginning of the list. When this is 1049 // used by target LowerCall hooks, this helps legalize find the 1050 // CALLSEQ_BEGIN node. 1051 ArgChains.push_back(Chain); 1052 1053 // Add a chain value for each stack argument corresponding 1054 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1055 UE = DAG.getEntryNode().getNode()->use_end(); 1056 U != UE; ++U) { 1057 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) { 1058 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { 1059 if (FI->getIndex() < 0) { 1060 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); 1061 int64_t InLastByte = InFirstByte; 1062 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; 1063 1064 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1065 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1066 ArgChains.push_back(SDValue(L, 1)); 1067 } 1068 } 1069 } 1070 } 1071 1072 // Build a tokenfactor for all the chains. 1073 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 1074 } 1075 1076 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI, 1077 SmallVectorImpl<SDValue> &InVals, 1078 StringRef Reason) const { 1079 SDValue Callee = CLI.Callee; 1080 SelectionDAG &DAG = CLI.DAG; 1081 1082 const Function &Fn = DAG.getMachineFunction().getFunction(); 1083 1084 StringRef FuncName("<unknown>"); 1085 1086 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 1087 FuncName = G->getSymbol(); 1088 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1089 FuncName = G->getGlobal()->getName(); 1090 1091 DiagnosticInfoUnsupported NoCalls( 1092 Fn, Reason + FuncName, CLI.DL.getDebugLoc()); 1093 DAG.getContext()->diagnose(NoCalls); 1094 1095 if (!CLI.IsTailCall) { 1096 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) 1097 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); 1098 } 1099 1100 return DAG.getEntryNode(); 1101 } 1102 1103 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 1104 SmallVectorImpl<SDValue> &InVals) const { 1105 return lowerUnhandledCall(CLI, InVals, "unsupported call to function "); 1106 } 1107 1108 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 1109 SelectionDAG &DAG) const { 1110 const Function &Fn = DAG.getMachineFunction().getFunction(); 1111 1112 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", 1113 SDLoc(Op).getDebugLoc()); 1114 DAG.getContext()->diagnose(NoDynamicAlloca); 1115 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)}; 1116 return DAG.getMergeValues(Ops, SDLoc()); 1117 } 1118 1119 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 1120 SelectionDAG &DAG) const { 1121 switch (Op.getOpcode()) { 1122 default: 1123 Op->print(errs(), &DAG); 1124 llvm_unreachable("Custom lowering code for this" 1125 "instruction is not implemented yet!"); 1126 break; 1127 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 1128 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 1129 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 1130 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 1131 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 1132 case ISD::FREM: return LowerFREM(Op, DAG); 1133 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 1134 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 1135 case ISD::FRINT: return LowerFRINT(Op, DAG); 1136 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 1137 case ISD::FROUND: return LowerFROUND(Op, DAG); 1138 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 1139 case ISD::FLOG: 1140 return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F); 1141 case ISD::FLOG10: 1142 return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F); 1143 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 1144 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 1145 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); 1146 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 1147 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 1148 case ISD::CTTZ: 1149 case ISD::CTTZ_ZERO_UNDEF: 1150 case ISD::CTLZ: 1151 case ISD::CTLZ_ZERO_UNDEF: 1152 return LowerCTLZ_CTTZ(Op, DAG); 1153 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1154 } 1155 return Op; 1156 } 1157 1158 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 1159 SmallVectorImpl<SDValue> &Results, 1160 SelectionDAG &DAG) const { 1161 switch (N->getOpcode()) { 1162 case ISD::SIGN_EXTEND_INREG: 1163 // Different parts of legalization seem to interpret which type of 1164 // sign_extend_inreg is the one to check for custom lowering. The extended 1165 // from type is what really matters, but some places check for custom 1166 // lowering of the result type. This results in trying to use 1167 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 1168 // nothing here and let the illegal result integer be handled normally. 1169 return; 1170 default: 1171 return; 1172 } 1173 } 1174 1175 static bool hasDefinedInitializer(const GlobalValue *GV) { 1176 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1177 if (!GVar || !GVar->hasInitializer()) 1178 return false; 1179 1180 return !isa<UndefValue>(GVar->getInitializer()); 1181 } 1182 1183 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 1184 SDValue Op, 1185 SelectionDAG &DAG) const { 1186 1187 const DataLayout &DL = DAG.getDataLayout(); 1188 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 1189 const GlobalValue *GV = G->getGlobal(); 1190 1191 if (G->getAddressSpace() == AMDGPUASI.LOCAL_ADDRESS || 1192 G->getAddressSpace() == AMDGPUASI.REGION_ADDRESS) { 1193 if (!MFI->isEntryFunction()) { 1194 const Function &Fn = DAG.getMachineFunction().getFunction(); 1195 DiagnosticInfoUnsupported BadLDSDecl( 1196 Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc()); 1197 DAG.getContext()->diagnose(BadLDSDecl); 1198 } 1199 1200 // XXX: What does the value of G->getOffset() mean? 1201 assert(G->getOffset() == 0 && 1202 "Do not know what to do with an non-zero offset"); 1203 1204 // TODO: We could emit code to handle the initialization somewhere. 1205 if (!hasDefinedInitializer(GV)) { 1206 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV); 1207 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType()); 1208 } 1209 } 1210 1211 const Function &Fn = DAG.getMachineFunction().getFunction(); 1212 DiagnosticInfoUnsupported BadInit( 1213 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); 1214 DAG.getContext()->diagnose(BadInit); 1215 return SDValue(); 1216 } 1217 1218 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 1219 SelectionDAG &DAG) const { 1220 SmallVector<SDValue, 8> Args; 1221 1222 EVT VT = Op.getValueType(); 1223 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 1224 SDLoc SL(Op); 1225 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0)); 1226 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1)); 1227 1228 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi }); 1229 return DAG.getNode(ISD::BITCAST, SL, VT, BV); 1230 } 1231 1232 for (const SDUse &U : Op->ops()) 1233 DAG.ExtractVectorElements(U.get(), Args); 1234 1235 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1236 } 1237 1238 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 1239 SelectionDAG &DAG) const { 1240 1241 SmallVector<SDValue, 8> Args; 1242 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1243 EVT VT = Op.getValueType(); 1244 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 1245 VT.getVectorNumElements()); 1246 1247 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1248 } 1249 1250 /// Generate Min/Max node 1251 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, 1252 SDValue LHS, SDValue RHS, 1253 SDValue True, SDValue False, 1254 SDValue CC, 1255 DAGCombinerInfo &DCI) const { 1256 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 1257 return SDValue(); 1258 1259 SelectionDAG &DAG = DCI.DAG; 1260 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 1261 switch (CCOpcode) { 1262 case ISD::SETOEQ: 1263 case ISD::SETONE: 1264 case ISD::SETUNE: 1265 case ISD::SETNE: 1266 case ISD::SETUEQ: 1267 case ISD::SETEQ: 1268 case ISD::SETFALSE: 1269 case ISD::SETFALSE2: 1270 case ISD::SETTRUE: 1271 case ISD::SETTRUE2: 1272 case ISD::SETUO: 1273 case ISD::SETO: 1274 break; 1275 case ISD::SETULE: 1276 case ISD::SETULT: { 1277 if (LHS == True) 1278 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1279 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1280 } 1281 case ISD::SETOLE: 1282 case ISD::SETOLT: 1283 case ISD::SETLE: 1284 case ISD::SETLT: { 1285 // Ordered. Assume ordered for undefined. 1286 1287 // Only do this after legalization to avoid interfering with other combines 1288 // which might occur. 1289 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1290 !DCI.isCalledByLegalizer()) 1291 return SDValue(); 1292 1293 // We need to permute the operands to get the correct NaN behavior. The 1294 // selected operand is the second one based on the failing compare with NaN, 1295 // so permute it based on the compare type the hardware uses. 1296 if (LHS == True) 1297 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1298 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1299 } 1300 case ISD::SETUGE: 1301 case ISD::SETUGT: { 1302 if (LHS == True) 1303 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1304 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1305 } 1306 case ISD::SETGT: 1307 case ISD::SETGE: 1308 case ISD::SETOGE: 1309 case ISD::SETOGT: { 1310 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1311 !DCI.isCalledByLegalizer()) 1312 return SDValue(); 1313 1314 if (LHS == True) 1315 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1316 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1317 } 1318 case ISD::SETCC_INVALID: 1319 llvm_unreachable("Invalid setcc condcode!"); 1320 } 1321 return SDValue(); 1322 } 1323 1324 std::pair<SDValue, SDValue> 1325 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1326 SDLoc SL(Op); 1327 1328 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1329 1330 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1331 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1332 1333 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1334 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1335 1336 return std::make_pair(Lo, Hi); 1337 } 1338 1339 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1340 SDLoc SL(Op); 1341 1342 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1343 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1344 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1345 } 1346 1347 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1348 SDLoc SL(Op); 1349 1350 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1351 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1352 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1353 } 1354 1355 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1356 SelectionDAG &DAG) const { 1357 LoadSDNode *Load = cast<LoadSDNode>(Op); 1358 EVT VT = Op.getValueType(); 1359 1360 1361 // If this is a 2 element vector, we really want to scalarize and not create 1362 // weird 1 element vectors. 1363 if (VT.getVectorNumElements() == 2) 1364 return scalarizeVectorLoad(Load, DAG); 1365 1366 SDValue BasePtr = Load->getBasePtr(); 1367 EVT MemVT = Load->getMemoryVT(); 1368 SDLoc SL(Op); 1369 1370 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1371 1372 EVT LoVT, HiVT; 1373 EVT LoMemVT, HiMemVT; 1374 SDValue Lo, Hi; 1375 1376 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1377 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1378 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT); 1379 1380 unsigned Size = LoMemVT.getStoreSize(); 1381 unsigned BaseAlign = Load->getAlignment(); 1382 unsigned HiAlign = MinAlign(BaseAlign, Size); 1383 1384 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1385 Load->getChain(), BasePtr, SrcValue, LoMemVT, 1386 BaseAlign, Load->getMemOperand()->getFlags()); 1387 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size); 1388 SDValue HiLoad = 1389 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), 1390 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1391 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); 1392 1393 SDValue Ops[] = { 1394 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad), 1395 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1396 LoLoad.getValue(1), HiLoad.getValue(1)) 1397 }; 1398 1399 return DAG.getMergeValues(Ops, SL); 1400 } 1401 1402 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1403 SelectionDAG &DAG) const { 1404 StoreSDNode *Store = cast<StoreSDNode>(Op); 1405 SDValue Val = Store->getValue(); 1406 EVT VT = Val.getValueType(); 1407 1408 // If this is a 2 element vector, we really want to scalarize and not create 1409 // weird 1 element vectors. 1410 if (VT.getVectorNumElements() == 2) 1411 return scalarizeVectorStore(Store, DAG); 1412 1413 EVT MemVT = Store->getMemoryVT(); 1414 SDValue Chain = Store->getChain(); 1415 SDValue BasePtr = Store->getBasePtr(); 1416 SDLoc SL(Op); 1417 1418 EVT LoVT, HiVT; 1419 EVT LoMemVT, HiMemVT; 1420 SDValue Lo, Hi; 1421 1422 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1423 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1424 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT); 1425 1426 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize()); 1427 1428 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1429 unsigned BaseAlign = Store->getAlignment(); 1430 unsigned Size = LoMemVT.getStoreSize(); 1431 unsigned HiAlign = MinAlign(BaseAlign, Size); 1432 1433 SDValue LoStore = 1434 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign, 1435 Store->getMemOperand()->getFlags()); 1436 SDValue HiStore = 1437 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size), 1438 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); 1439 1440 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1441 } 1442 1443 // This is a shortcut for integer division because we have fast i32<->f32 1444 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1445 // float is enough to accurately represent up to a 24-bit signed integer. 1446 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, 1447 bool Sign) const { 1448 SDLoc DL(Op); 1449 EVT VT = Op.getValueType(); 1450 SDValue LHS = Op.getOperand(0); 1451 SDValue RHS = Op.getOperand(1); 1452 MVT IntVT = MVT::i32; 1453 MVT FltVT = MVT::f32; 1454 1455 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS); 1456 if (LHSSignBits < 9) 1457 return SDValue(); 1458 1459 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS); 1460 if (RHSSignBits < 9) 1461 return SDValue(); 1462 1463 unsigned BitSize = VT.getSizeInBits(); 1464 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 1465 unsigned DivBits = BitSize - SignBits; 1466 if (Sign) 1467 ++DivBits; 1468 1469 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1470 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1471 1472 SDValue jq = DAG.getConstant(1, DL, IntVT); 1473 1474 if (Sign) { 1475 // char|short jq = ia ^ ib; 1476 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1477 1478 // jq = jq >> (bitsize - 2) 1479 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1480 DAG.getConstant(BitSize - 2, DL, VT)); 1481 1482 // jq = jq | 0x1 1483 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1484 } 1485 1486 // int ia = (int)LHS; 1487 SDValue ia = LHS; 1488 1489 // int ib, (int)RHS; 1490 SDValue ib = RHS; 1491 1492 // float fa = (float)ia; 1493 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1494 1495 // float fb = (float)ib; 1496 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1497 1498 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1499 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1500 1501 // fq = trunc(fq); 1502 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1503 1504 // float fqneg = -fq; 1505 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1506 1507 // float fr = mad(fqneg, fb, fa); 1508 unsigned OpCode = Subtarget->hasFP32Denormals() ? 1509 (unsigned)AMDGPUISD::FMAD_FTZ : 1510 (unsigned)ISD::FMAD; 1511 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa); 1512 1513 // int iq = (int)fq; 1514 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1515 1516 // fr = fabs(fr); 1517 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1518 1519 // fb = fabs(fb); 1520 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1521 1522 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1523 1524 // int cv = fr >= fb; 1525 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1526 1527 // jq = (cv ? jq : 0); 1528 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1529 1530 // dst = iq + jq; 1531 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1532 1533 // Rem needs compensation, it's easier to recompute it 1534 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1535 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1536 1537 // Truncate to number of bits this divide really is. 1538 if (Sign) { 1539 SDValue InRegSize 1540 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits)); 1541 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize); 1542 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize); 1543 } else { 1544 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); 1545 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask); 1546 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask); 1547 } 1548 1549 return DAG.getMergeValues({ Div, Rem }, DL); 1550 } 1551 1552 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1553 SelectionDAG &DAG, 1554 SmallVectorImpl<SDValue> &Results) const { 1555 SDLoc DL(Op); 1556 EVT VT = Op.getValueType(); 1557 1558 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64"); 1559 1560 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1561 1562 SDValue One = DAG.getConstant(1, DL, HalfVT); 1563 SDValue Zero = DAG.getConstant(0, DL, HalfVT); 1564 1565 //HiLo split 1566 SDValue LHS = Op.getOperand(0); 1567 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1568 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One); 1569 1570 SDValue RHS = Op.getOperand(1); 1571 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1572 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One); 1573 1574 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1575 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1576 1577 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1578 LHS_Lo, RHS_Lo); 1579 1580 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero}); 1581 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero}); 1582 1583 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV)); 1584 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM)); 1585 return; 1586 } 1587 1588 if (isTypeLegal(MVT::i64)) { 1589 // Compute denominator reciprocal. 1590 unsigned FMAD = Subtarget->hasFP32Denormals() ? 1591 (unsigned)AMDGPUISD::FMAD_FTZ : 1592 (unsigned)ISD::FMAD; 1593 1594 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo); 1595 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi); 1596 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi, 1597 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32), 1598 Cvt_Lo); 1599 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1); 1600 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp, 1601 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32)); 1602 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1, 1603 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32)); 1604 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2); 1605 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc, 1606 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32), 1607 Mul1); 1608 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2); 1609 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc); 1610 SDValue Rcp64 = DAG.getBitcast(VT, 1611 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi})); 1612 1613 SDValue Zero64 = DAG.getConstant(0, DL, VT); 1614 SDValue One64 = DAG.getConstant(1, DL, VT); 1615 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1); 1616 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1); 1617 1618 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS); 1619 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64); 1620 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1); 1621 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1622 Zero); 1623 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1624 One); 1625 1626 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo, 1627 Mulhi1_Lo, Zero1); 1628 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi, 1629 Mulhi1_Hi, Add1_Lo.getValue(1)); 1630 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi); 1631 SDValue Add1 = DAG.getBitcast(VT, 1632 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi})); 1633 1634 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1); 1635 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2); 1636 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1637 Zero); 1638 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1639 One); 1640 1641 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo, 1642 Mulhi2_Lo, Zero1); 1643 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc, 1644 Mulhi2_Hi, Add1_Lo.getValue(1)); 1645 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC, 1646 Zero, Add2_Lo.getValue(1)); 1647 SDValue Add2 = DAG.getBitcast(VT, 1648 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi})); 1649 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2); 1650 1651 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3); 1652 1653 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero); 1654 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One); 1655 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo, 1656 Mul3_Lo, Zero1); 1657 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi, 1658 Mul3_Hi, Sub1_Lo.getValue(1)); 1659 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi); 1660 SDValue Sub1 = DAG.getBitcast(VT, 1661 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi})); 1662 1663 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT); 1664 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero, 1665 ISD::SETUGE); 1666 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero, 1667 ISD::SETUGE); 1668 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ); 1669 1670 // TODO: Here and below portions of the code can be enclosed into if/endif. 1671 // Currently control flow is unconditional and we have 4 selects after 1672 // potential endif to substitute PHIs. 1673 1674 // if C3 != 0 ... 1675 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo, 1676 RHS_Lo, Zero1); 1677 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi, 1678 RHS_Hi, Sub1_Lo.getValue(1)); 1679 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1680 Zero, Sub2_Lo.getValue(1)); 1681 SDValue Sub2 = DAG.getBitcast(VT, 1682 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi})); 1683 1684 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64); 1685 1686 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero, 1687 ISD::SETUGE); 1688 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero, 1689 ISD::SETUGE); 1690 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ); 1691 1692 // if (C6 != 0) 1693 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64); 1694 1695 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo, 1696 RHS_Lo, Zero1); 1697 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1698 RHS_Hi, Sub2_Lo.getValue(1)); 1699 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi, 1700 Zero, Sub3_Lo.getValue(1)); 1701 SDValue Sub3 = DAG.getBitcast(VT, 1702 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi})); 1703 1704 // endif C6 1705 // endif C3 1706 1707 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE); 1708 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE); 1709 1710 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE); 1711 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE); 1712 1713 Results.push_back(Div); 1714 Results.push_back(Rem); 1715 1716 return; 1717 } 1718 1719 // r600 expandion. 1720 // Get Speculative values 1721 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1722 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1723 1724 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ); 1725 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero}); 1726 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM); 1727 1728 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ); 1729 SDValue DIV_Lo = Zero; 1730 1731 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1732 1733 for (unsigned i = 0; i < halfBitWidth; ++i) { 1734 const unsigned bitPos = halfBitWidth - i - 1; 1735 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1736 // Get value of high bit 1737 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1738 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One); 1739 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1740 1741 // Shift 1742 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1743 // Add LHS high bit 1744 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1745 1746 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT); 1747 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE); 1748 1749 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1750 1751 // Update REM 1752 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1753 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1754 } 1755 1756 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi}); 1757 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV); 1758 Results.push_back(DIV); 1759 Results.push_back(REM); 1760 } 1761 1762 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1763 SelectionDAG &DAG) const { 1764 SDLoc DL(Op); 1765 EVT VT = Op.getValueType(); 1766 1767 if (VT == MVT::i64) { 1768 SmallVector<SDValue, 2> Results; 1769 LowerUDIVREM64(Op, DAG, Results); 1770 return DAG.getMergeValues(Results, DL); 1771 } 1772 1773 if (VT == MVT::i32) { 1774 if (SDValue Res = LowerDIVREM24(Op, DAG, false)) 1775 return Res; 1776 } 1777 1778 SDValue Num = Op.getOperand(0); 1779 SDValue Den = Op.getOperand(1); 1780 1781 // RCP = URECIP(Den) = 2^32 / Den + e 1782 // e is rounding error. 1783 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1784 1785 // RCP_LO = mul(RCP, Den) */ 1786 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1787 1788 // RCP_HI = mulhu (RCP, Den) */ 1789 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1790 1791 // NEG_RCP_LO = -RCP_LO 1792 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1793 RCP_LO); 1794 1795 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1796 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1797 NEG_RCP_LO, RCP_LO, 1798 ISD::SETEQ); 1799 // Calculate the rounding error from the URECIP instruction 1800 // E = mulhu(ABS_RCP_LO, RCP) 1801 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1802 1803 // RCP_A_E = RCP + E 1804 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1805 1806 // RCP_S_E = RCP - E 1807 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1808 1809 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1810 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1811 RCP_A_E, RCP_S_E, 1812 ISD::SETEQ); 1813 // Quotient = mulhu(Tmp0, Num) 1814 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1815 1816 // Num_S_Remainder = Quotient * Den 1817 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1818 1819 // Remainder = Num - Num_S_Remainder 1820 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1821 1822 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1823 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1824 DAG.getConstant(-1, DL, VT), 1825 DAG.getConstant(0, DL, VT), 1826 ISD::SETUGE); 1827 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1828 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1829 Num_S_Remainder, 1830 DAG.getConstant(-1, DL, VT), 1831 DAG.getConstant(0, DL, VT), 1832 ISD::SETUGE); 1833 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1834 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1835 Remainder_GE_Zero); 1836 1837 // Calculate Division result: 1838 1839 // Quotient_A_One = Quotient + 1 1840 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1841 DAG.getConstant(1, DL, VT)); 1842 1843 // Quotient_S_One = Quotient - 1 1844 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1845 DAG.getConstant(1, DL, VT)); 1846 1847 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1848 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1849 Quotient, Quotient_A_One, ISD::SETEQ); 1850 1851 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1852 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1853 Quotient_S_One, Div, ISD::SETEQ); 1854 1855 // Calculate Rem result: 1856 1857 // Remainder_S_Den = Remainder - Den 1858 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 1859 1860 // Remainder_A_Den = Remainder + Den 1861 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 1862 1863 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 1864 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1865 Remainder, Remainder_S_Den, ISD::SETEQ); 1866 1867 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 1868 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1869 Remainder_A_Den, Rem, ISD::SETEQ); 1870 SDValue Ops[2] = { 1871 Div, 1872 Rem 1873 }; 1874 return DAG.getMergeValues(Ops, DL); 1875 } 1876 1877 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 1878 SelectionDAG &DAG) const { 1879 SDLoc DL(Op); 1880 EVT VT = Op.getValueType(); 1881 1882 SDValue LHS = Op.getOperand(0); 1883 SDValue RHS = Op.getOperand(1); 1884 1885 SDValue Zero = DAG.getConstant(0, DL, VT); 1886 SDValue NegOne = DAG.getConstant(-1, DL, VT); 1887 1888 if (VT == MVT::i32) { 1889 if (SDValue Res = LowerDIVREM24(Op, DAG, true)) 1890 return Res; 1891 } 1892 1893 if (VT == MVT::i64 && 1894 DAG.ComputeNumSignBits(LHS) > 32 && 1895 DAG.ComputeNumSignBits(RHS) > 32) { 1896 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1897 1898 //HiLo split 1899 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1900 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1901 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1902 LHS_Lo, RHS_Lo); 1903 SDValue Res[2] = { 1904 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 1905 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 1906 }; 1907 return DAG.getMergeValues(Res, DL); 1908 } 1909 1910 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 1911 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 1912 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 1913 SDValue RSign = LHSign; // Remainder sign is the same as LHS 1914 1915 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 1916 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 1917 1918 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 1919 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 1920 1921 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 1922 SDValue Rem = Div.getValue(1); 1923 1924 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 1925 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 1926 1927 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 1928 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 1929 1930 SDValue Res[2] = { 1931 Div, 1932 Rem 1933 }; 1934 return DAG.getMergeValues(Res, DL); 1935 } 1936 1937 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 1938 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 1939 SDLoc SL(Op); 1940 EVT VT = Op.getValueType(); 1941 SDValue X = Op.getOperand(0); 1942 SDValue Y = Op.getOperand(1); 1943 1944 // TODO: Should this propagate fast-math-flags? 1945 1946 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 1947 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 1948 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 1949 1950 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 1951 } 1952 1953 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 1954 SDLoc SL(Op); 1955 SDValue Src = Op.getOperand(0); 1956 1957 // result = trunc(src) 1958 // if (src > 0.0 && src != result) 1959 // result += 1.0 1960 1961 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 1962 1963 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 1964 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 1965 1966 EVT SetCCVT = 1967 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 1968 1969 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 1970 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 1971 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 1972 1973 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 1974 // TODO: Should this propagate fast-math-flags? 1975 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 1976 } 1977 1978 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, 1979 SelectionDAG &DAG) { 1980 const unsigned FractBits = 52; 1981 const unsigned ExpBits = 11; 1982 1983 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 1984 Hi, 1985 DAG.getConstant(FractBits - 32, SL, MVT::i32), 1986 DAG.getConstant(ExpBits, SL, MVT::i32)); 1987 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 1988 DAG.getConstant(1023, SL, MVT::i32)); 1989 1990 return Exp; 1991 } 1992 1993 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 1994 SDLoc SL(Op); 1995 SDValue Src = Op.getOperand(0); 1996 1997 assert(Op.getValueType() == MVT::f64); 1998 1999 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2000 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2001 2002 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2003 2004 // Extract the upper half, since this is where we will find the sign and 2005 // exponent. 2006 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 2007 2008 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2009 2010 const unsigned FractBits = 52; 2011 2012 // Extract the sign bit. 2013 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 2014 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 2015 2016 // Extend back to 64-bits. 2017 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit}); 2018 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 2019 2020 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 2021 const SDValue FractMask 2022 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 2023 2024 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 2025 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 2026 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 2027 2028 EVT SetCCVT = 2029 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2030 2031 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 2032 2033 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2034 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2035 2036 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 2037 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 2038 2039 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 2040 } 2041 2042 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 2043 SDLoc SL(Op); 2044 SDValue Src = Op.getOperand(0); 2045 2046 assert(Op.getValueType() == MVT::f64); 2047 2048 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52"); 2049 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 2050 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 2051 2052 // TODO: Should this propagate fast-math-flags? 2053 2054 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 2055 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 2056 2057 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 2058 2059 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51"); 2060 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 2061 2062 EVT SetCCVT = 2063 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2064 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 2065 2066 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 2067 } 2068 2069 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 2070 // FNEARBYINT and FRINT are the same, except in their handling of FP 2071 // exceptions. Those aren't really meaningful for us, and OpenCL only has 2072 // rint, so just treat them as equivalent. 2073 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 2074 } 2075 2076 // XXX - May require not supporting f32 denormals? 2077 2078 // Don't handle v2f16. The extra instructions to scalarize and repack around the 2079 // compare and vselect end up producing worse code than scalarizing the whole 2080 // operation. 2081 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const { 2082 SDLoc SL(Op); 2083 SDValue X = Op.getOperand(0); 2084 EVT VT = Op.getValueType(); 2085 2086 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X); 2087 2088 // TODO: Should this propagate fast-math-flags? 2089 2090 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T); 2091 2092 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff); 2093 2094 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT); 2095 const SDValue One = DAG.getConstantFP(1.0, SL, VT); 2096 const SDValue Half = DAG.getConstantFP(0.5, SL, VT); 2097 2098 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X); 2099 2100 EVT SetCCVT = 2101 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 2102 2103 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 2104 2105 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero); 2106 2107 return DAG.getNode(ISD::FADD, SL, VT, T, Sel); 2108 } 2109 2110 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const { 2111 SDLoc SL(Op); 2112 SDValue X = Op.getOperand(0); 2113 2114 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X); 2115 2116 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2117 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2118 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32); 2119 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32); 2120 EVT SetCCVT = 2121 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2122 2123 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2124 2125 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One); 2126 2127 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2128 2129 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL, 2130 MVT::i64); 2131 2132 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp); 2133 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64, 2134 DAG.getConstant(INT64_C(0x0008000000000000), SL, 2135 MVT::i64), 2136 Exp); 2137 2138 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M); 2139 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT, 2140 DAG.getConstant(0, SL, MVT::i64), Tmp0, 2141 ISD::SETNE); 2142 2143 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1, 2144 D, DAG.getConstant(0, SL, MVT::i64)); 2145 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2); 2146 2147 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64)); 2148 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K); 2149 2150 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2151 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2152 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ); 2153 2154 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64, 2155 ExpEqNegOne, 2156 DAG.getConstantFP(1.0, SL, MVT::f64), 2157 DAG.getConstantFP(0.0, SL, MVT::f64)); 2158 2159 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X); 2160 2161 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K); 2162 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K); 2163 2164 return K; 2165 } 2166 2167 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 2168 EVT VT = Op.getValueType(); 2169 2170 if (VT == MVT::f32 || VT == MVT::f16) 2171 return LowerFROUND32_16(Op, DAG); 2172 2173 if (VT == MVT::f64) 2174 return LowerFROUND64(Op, DAG); 2175 2176 llvm_unreachable("unhandled type"); 2177 } 2178 2179 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 2180 SDLoc SL(Op); 2181 SDValue Src = Op.getOperand(0); 2182 2183 // result = trunc(src); 2184 // if (src < 0.0 && src != result) 2185 // result += -1.0. 2186 2187 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2188 2189 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2190 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 2191 2192 EVT SetCCVT = 2193 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2194 2195 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 2196 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2197 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2198 2199 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 2200 // TODO: Should this propagate fast-math-flags? 2201 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2202 } 2203 2204 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG, 2205 double Log2BaseInverted) const { 2206 EVT VT = Op.getValueType(); 2207 2208 SDLoc SL(Op); 2209 SDValue Operand = Op.getOperand(0); 2210 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand); 2211 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT); 2212 2213 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); 2214 } 2215 2216 static bool isCtlzOpc(unsigned Opc) { 2217 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2218 } 2219 2220 static bool isCttzOpc(unsigned Opc) { 2221 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF; 2222 } 2223 2224 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const { 2225 SDLoc SL(Op); 2226 SDValue Src = Op.getOperand(0); 2227 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF || 2228 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 2229 2230 unsigned ISDOpc, NewOpc; 2231 if (isCtlzOpc(Op.getOpcode())) { 2232 ISDOpc = ISD::CTLZ_ZERO_UNDEF; 2233 NewOpc = AMDGPUISD::FFBH_U32; 2234 } else if (isCttzOpc(Op.getOpcode())) { 2235 ISDOpc = ISD::CTTZ_ZERO_UNDEF; 2236 NewOpc = AMDGPUISD::FFBL_B32; 2237 } else 2238 llvm_unreachable("Unexpected OPCode!!!"); 2239 2240 2241 if (ZeroUndef && Src.getValueType() == MVT::i32) 2242 return DAG.getNode(NewOpc, SL, MVT::i32, Src); 2243 2244 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2245 2246 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2247 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2248 2249 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 2250 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 2251 2252 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2253 *DAG.getContext(), MVT::i32); 2254 2255 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo; 2256 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ); 2257 2258 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo); 2259 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi); 2260 2261 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 2262 SDValue Add, NewOpr; 2263 if (isCtlzOpc(Op.getOpcode())) { 2264 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32); 2265 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 2266 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi); 2267 } else { 2268 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32); 2269 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x)) 2270 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo); 2271 } 2272 2273 if (!ZeroUndef) { 2274 // Test if the full 64-bit input is zero. 2275 2276 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 2277 // which we probably don't want. 2278 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi; 2279 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ); 2280 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0); 2281 2282 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 2283 // with the same cycles, otherwise it is slower. 2284 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 2285 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 2286 2287 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 2288 2289 // The instruction returns -1 for 0 input, but the defined intrinsic 2290 // behavior is to return the number of bits. 2291 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, 2292 SrcIsZero, Bits32, NewOpr); 2293 } 2294 2295 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr); 2296 } 2297 2298 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 2299 bool Signed) const { 2300 // Unsigned 2301 // cul2f(ulong u) 2302 //{ 2303 // uint lz = clz(u); 2304 // uint e = (u != 0) ? 127U + 63U - lz : 0; 2305 // u = (u << lz) & 0x7fffffffffffffffUL; 2306 // ulong t = u & 0xffffffffffUL; 2307 // uint v = (e << 23) | (uint)(u >> 40); 2308 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 2309 // return as_float(v + r); 2310 //} 2311 // Signed 2312 // cl2f(long l) 2313 //{ 2314 // long s = l >> 63; 2315 // float r = cul2f((l + s) ^ s); 2316 // return s ? -r : r; 2317 //} 2318 2319 SDLoc SL(Op); 2320 SDValue Src = Op.getOperand(0); 2321 SDValue L = Src; 2322 2323 SDValue S; 2324 if (Signed) { 2325 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 2326 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 2327 2328 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 2329 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 2330 } 2331 2332 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2333 *DAG.getContext(), MVT::f32); 2334 2335 2336 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 2337 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 2338 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 2339 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 2340 2341 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 2342 SDValue E = DAG.getSelect(SL, MVT::i32, 2343 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 2344 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 2345 ZeroI32); 2346 2347 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 2348 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 2349 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 2350 2351 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 2352 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 2353 2354 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 2355 U, DAG.getConstant(40, SL, MVT::i64)); 2356 2357 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 2358 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 2359 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 2360 2361 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 2362 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 2363 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 2364 2365 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2366 2367 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 2368 2369 SDValue R = DAG.getSelect(SL, MVT::i32, 2370 RCmp, 2371 One, 2372 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 2373 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 2374 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 2375 2376 if (!Signed) 2377 return R; 2378 2379 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 2380 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 2381 } 2382 2383 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 2384 bool Signed) const { 2385 SDLoc SL(Op); 2386 SDValue Src = Op.getOperand(0); 2387 2388 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2389 2390 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2391 DAG.getConstant(0, SL, MVT::i32)); 2392 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2393 DAG.getConstant(1, SL, MVT::i32)); 2394 2395 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 2396 SL, MVT::f64, Hi); 2397 2398 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 2399 2400 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 2401 DAG.getConstant(32, SL, MVT::i32)); 2402 // TODO: Should this propagate fast-math-flags? 2403 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 2404 } 2405 2406 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 2407 SelectionDAG &DAG) const { 2408 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2409 "operation should be legal"); 2410 2411 // TODO: Factor out code common with LowerSINT_TO_FP. 2412 2413 EVT DestVT = Op.getValueType(); 2414 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2415 SDLoc DL(Op); 2416 SDValue Src = Op.getOperand(0); 2417 2418 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2419 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2420 SDValue FPRound = 2421 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2422 2423 return FPRound; 2424 } 2425 2426 if (DestVT == MVT::f32) 2427 return LowerINT_TO_FP32(Op, DAG, false); 2428 2429 assert(DestVT == MVT::f64); 2430 return LowerINT_TO_FP64(Op, DAG, false); 2431 } 2432 2433 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2434 SelectionDAG &DAG) const { 2435 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2436 "operation should be legal"); 2437 2438 // TODO: Factor out code common with LowerUINT_TO_FP. 2439 2440 EVT DestVT = Op.getValueType(); 2441 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2442 SDLoc DL(Op); 2443 SDValue Src = Op.getOperand(0); 2444 2445 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2446 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2447 SDValue FPRound = 2448 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2449 2450 return FPRound; 2451 } 2452 2453 if (DestVT == MVT::f32) 2454 return LowerINT_TO_FP32(Op, DAG, true); 2455 2456 assert(DestVT == MVT::f64); 2457 return LowerINT_TO_FP64(Op, DAG, true); 2458 } 2459 2460 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2461 bool Signed) const { 2462 SDLoc SL(Op); 2463 2464 SDValue Src = Op.getOperand(0); 2465 2466 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2467 2468 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2469 MVT::f64); 2470 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2471 MVT::f64); 2472 // TODO: Should this propagate fast-math-flags? 2473 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2474 2475 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2476 2477 2478 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2479 2480 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2481 MVT::i32, FloorMul); 2482 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2483 2484 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}); 2485 2486 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2487 } 2488 2489 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const { 2490 SDLoc DL(Op); 2491 SDValue N0 = Op.getOperand(0); 2492 2493 // Convert to target node to get known bits 2494 if (N0.getValueType() == MVT::f32) 2495 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0); 2496 2497 if (getTargetMachine().Options.UnsafeFPMath) { 2498 // There is a generic expand for FP_TO_FP16 with unsafe fast math. 2499 return SDValue(); 2500 } 2501 2502 assert(N0.getSimpleValueType() == MVT::f64); 2503 2504 // f64 -> f16 conversion using round-to-nearest-even rounding mode. 2505 const unsigned ExpMask = 0x7ff; 2506 const unsigned ExpBiasf64 = 1023; 2507 const unsigned ExpBiasf16 = 15; 2508 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2509 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2510 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0); 2511 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U, 2512 DAG.getConstant(32, DL, MVT::i64)); 2513 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32); 2514 U = DAG.getZExtOrTrunc(U, DL, MVT::i32); 2515 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2516 DAG.getConstant(20, DL, MVT::i64)); 2517 E = DAG.getNode(ISD::AND, DL, MVT::i32, E, 2518 DAG.getConstant(ExpMask, DL, MVT::i32)); 2519 // Subtract the fp64 exponent bias (1023) to get the real exponent and 2520 // add the f16 bias (15) to get the biased exponent for the f16 format. 2521 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E, 2522 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); 2523 2524 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2525 DAG.getConstant(8, DL, MVT::i32)); 2526 M = DAG.getNode(ISD::AND, DL, MVT::i32, M, 2527 DAG.getConstant(0xffe, DL, MVT::i32)); 2528 2529 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH, 2530 DAG.getConstant(0x1ff, DL, MVT::i32)); 2531 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U); 2532 2533 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ); 2534 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set); 2535 2536 // (M != 0 ? 0x0200 : 0) | 0x7c00; 2537 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32, 2538 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32), 2539 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32)); 2540 2541 // N = M | (E << 12); 2542 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2543 DAG.getNode(ISD::SHL, DL, MVT::i32, E, 2544 DAG.getConstant(12, DL, MVT::i32))); 2545 2546 // B = clamp(1-E, 0, 13); 2547 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32, 2548 One, E); 2549 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero); 2550 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B, 2551 DAG.getConstant(13, DL, MVT::i32)); 2552 2553 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2554 DAG.getConstant(0x1000, DL, MVT::i32)); 2555 2556 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B); 2557 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B); 2558 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE); 2559 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1); 2560 2561 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT); 2562 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V, 2563 DAG.getConstant(0x7, DL, MVT::i32)); 2564 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V, 2565 DAG.getConstant(2, DL, MVT::i32)); 2566 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32), 2567 One, Zero, ISD::SETEQ); 2568 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32), 2569 One, Zero, ISD::SETGT); 2570 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1); 2571 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1); 2572 2573 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32), 2574 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT); 2575 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32), 2576 I, V, ISD::SETEQ); 2577 2578 // Extract the sign bit. 2579 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2580 DAG.getConstant(16, DL, MVT::i32)); 2581 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign, 2582 DAG.getConstant(0x8000, DL, MVT::i32)); 2583 2584 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V); 2585 return DAG.getZExtOrTrunc(V, DL, Op.getValueType()); 2586 } 2587 2588 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2589 SelectionDAG &DAG) const { 2590 SDValue Src = Op.getOperand(0); 2591 2592 // TODO: Factor out code common with LowerFP_TO_UINT. 2593 2594 EVT SrcVT = Src.getValueType(); 2595 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2596 SDLoc DL(Op); 2597 2598 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2599 SDValue FpToInt32 = 2600 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2601 2602 return FpToInt32; 2603 } 2604 2605 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2606 return LowerFP64_TO_INT(Op, DAG, true); 2607 2608 return SDValue(); 2609 } 2610 2611 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2612 SelectionDAG &DAG) const { 2613 SDValue Src = Op.getOperand(0); 2614 2615 // TODO: Factor out code common with LowerFP_TO_SINT. 2616 2617 EVT SrcVT = Src.getValueType(); 2618 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2619 SDLoc DL(Op); 2620 2621 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2622 SDValue FpToInt32 = 2623 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2624 2625 return FpToInt32; 2626 } 2627 2628 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2629 return LowerFP64_TO_INT(Op, DAG, false); 2630 2631 return SDValue(); 2632 } 2633 2634 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2635 SelectionDAG &DAG) const { 2636 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2637 MVT VT = Op.getSimpleValueType(); 2638 MVT ScalarVT = VT.getScalarType(); 2639 2640 assert(VT.isVector()); 2641 2642 SDValue Src = Op.getOperand(0); 2643 SDLoc DL(Op); 2644 2645 // TODO: Don't scalarize on Evergreen? 2646 unsigned NElts = VT.getVectorNumElements(); 2647 SmallVector<SDValue, 8> Args; 2648 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2649 2650 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2651 for (unsigned I = 0; I < NElts; ++I) 2652 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2653 2654 return DAG.getBuildVector(VT, DL, Args); 2655 } 2656 2657 //===----------------------------------------------------------------------===// 2658 // Custom DAG optimizations 2659 //===----------------------------------------------------------------------===// 2660 2661 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2662 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24; 2663 } 2664 2665 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2666 EVT VT = Op.getValueType(); 2667 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2668 // as unsigned 24-bit values. 2669 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24; 2670 } 2671 2672 static bool simplifyI24(SDNode *Node24, unsigned OpIdx, 2673 TargetLowering::DAGCombinerInfo &DCI) { 2674 2675 SelectionDAG &DAG = DCI.DAG; 2676 SDValue Op = Node24->getOperand(OpIdx); 2677 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2678 EVT VT = Op.getValueType(); 2679 2680 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24); 2681 APInt KnownZero, KnownOne; 2682 TargetLowering::TargetLoweringOpt TLO(DAG, true, true); 2683 if (TLI.SimplifyDemandedBits(Node24, OpIdx, Demanded, DCI, TLO)) 2684 return true; 2685 2686 return false; 2687 } 2688 2689 template <typename IntTy> 2690 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, 2691 uint32_t Width, const SDLoc &DL) { 2692 if (Width + Offset < 32) { 2693 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2694 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2695 return DAG.getConstant(Result, DL, MVT::i32); 2696 } 2697 2698 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2699 } 2700 2701 static bool hasVolatileUser(SDNode *Val) { 2702 for (SDNode *U : Val->uses()) { 2703 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) { 2704 if (M->isVolatile()) 2705 return true; 2706 } 2707 } 2708 2709 return false; 2710 } 2711 2712 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const { 2713 // i32 vectors are the canonical memory type. 2714 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT)) 2715 return false; 2716 2717 if (!VT.isByteSized()) 2718 return false; 2719 2720 unsigned Size = VT.getStoreSize(); 2721 2722 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector()) 2723 return false; 2724 2725 if (Size == 3 || (Size > 4 && (Size % 4 != 0))) 2726 return false; 2727 2728 return true; 2729 } 2730 2731 // Replace load of an illegal type with a store of a bitcast to a friendlier 2732 // type. 2733 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N, 2734 DAGCombinerInfo &DCI) const { 2735 if (!DCI.isBeforeLegalize()) 2736 return SDValue(); 2737 2738 LoadSDNode *LN = cast<LoadSDNode>(N); 2739 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) 2740 return SDValue(); 2741 2742 SDLoc SL(N); 2743 SelectionDAG &DAG = DCI.DAG; 2744 EVT VT = LN->getMemoryVT(); 2745 2746 unsigned Size = VT.getStoreSize(); 2747 unsigned Align = LN->getAlignment(); 2748 if (Align < Size && isTypeLegal(VT)) { 2749 bool IsFast; 2750 unsigned AS = LN->getAddressSpace(); 2751 2752 // Expand unaligned loads earlier than legalization. Due to visitation order 2753 // problems during legalization, the emitted instructions to pack and unpack 2754 // the bytes again are not eliminated in the case of an unaligned copy. 2755 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { 2756 if (VT.isVector()) 2757 return scalarizeVectorLoad(LN, DAG); 2758 2759 SDValue Ops[2]; 2760 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG); 2761 return DAG.getMergeValues(Ops, SDLoc(N)); 2762 } 2763 2764 if (!IsFast) 2765 return SDValue(); 2766 } 2767 2768 if (!shouldCombineMemoryType(VT)) 2769 return SDValue(); 2770 2771 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2772 2773 SDValue NewLoad 2774 = DAG.getLoad(NewVT, SL, LN->getChain(), 2775 LN->getBasePtr(), LN->getMemOperand()); 2776 2777 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad); 2778 DCI.CombineTo(N, BC, NewLoad.getValue(1)); 2779 return SDValue(N, 0); 2780 } 2781 2782 // Replace store of an illegal type with a store of a bitcast to a friendlier 2783 // type. 2784 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 2785 DAGCombinerInfo &DCI) const { 2786 if (!DCI.isBeforeLegalize()) 2787 return SDValue(); 2788 2789 StoreSDNode *SN = cast<StoreSDNode>(N); 2790 if (SN->isVolatile() || !ISD::isNormalStore(SN)) 2791 return SDValue(); 2792 2793 EVT VT = SN->getMemoryVT(); 2794 unsigned Size = VT.getStoreSize(); 2795 2796 SDLoc SL(N); 2797 SelectionDAG &DAG = DCI.DAG; 2798 unsigned Align = SN->getAlignment(); 2799 if (Align < Size && isTypeLegal(VT)) { 2800 bool IsFast; 2801 unsigned AS = SN->getAddressSpace(); 2802 2803 // Expand unaligned stores earlier than legalization. Due to visitation 2804 // order problems during legalization, the emitted instructions to pack and 2805 // unpack the bytes again are not eliminated in the case of an unaligned 2806 // copy. 2807 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { 2808 if (VT.isVector()) 2809 return scalarizeVectorStore(SN, DAG); 2810 2811 return expandUnalignedStore(SN, DAG); 2812 } 2813 2814 if (!IsFast) 2815 return SDValue(); 2816 } 2817 2818 if (!shouldCombineMemoryType(VT)) 2819 return SDValue(); 2820 2821 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2822 SDValue Val = SN->getValue(); 2823 2824 //DCI.AddToWorklist(Val.getNode()); 2825 2826 bool OtherUses = !Val.hasOneUse(); 2827 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val); 2828 if (OtherUses) { 2829 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal); 2830 DAG.ReplaceAllUsesOfValueWith(Val, CastBack); 2831 } 2832 2833 return DAG.getStore(SN->getChain(), SL, CastVal, 2834 SN->getBasePtr(), SN->getMemOperand()); 2835 } 2836 2837 // FIXME: This should go in generic DAG combiner with an isTruncateFree check, 2838 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU 2839 // issues. 2840 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N, 2841 DAGCombinerInfo &DCI) const { 2842 SelectionDAG &DAG = DCI.DAG; 2843 SDValue N0 = N->getOperand(0); 2844 2845 // (vt2 (assertzext (truncate vt0:x), vt1)) -> 2846 // (vt2 (truncate (assertzext vt0:x, vt1))) 2847 if (N0.getOpcode() == ISD::TRUNCATE) { 2848 SDValue N1 = N->getOperand(1); 2849 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); 2850 SDLoc SL(N); 2851 2852 SDValue Src = N0.getOperand(0); 2853 EVT SrcVT = Src.getValueType(); 2854 if (SrcVT.bitsGE(ExtVT)) { 2855 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); 2856 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); 2857 } 2858 } 2859 2860 return SDValue(); 2861 } 2862 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the 2863 /// binary operation \p Opc to it with the corresponding constant operands. 2864 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl( 2865 DAGCombinerInfo &DCI, const SDLoc &SL, 2866 unsigned Opc, SDValue LHS, 2867 uint32_t ValLo, uint32_t ValHi) const { 2868 SelectionDAG &DAG = DCI.DAG; 2869 SDValue Lo, Hi; 2870 std::tie(Lo, Hi) = split64BitValue(LHS, DAG); 2871 2872 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32); 2873 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32); 2874 2875 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS); 2876 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS); 2877 2878 // Re-visit the ands. It's possible we eliminated one of them and it could 2879 // simplify the vector. 2880 DCI.AddToWorklist(Lo.getNode()); 2881 DCI.AddToWorklist(Hi.getNode()); 2882 2883 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd}); 2884 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2885 } 2886 2887 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 2888 DAGCombinerInfo &DCI) const { 2889 EVT VT = N->getValueType(0); 2890 2891 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2892 if (!RHS) 2893 return SDValue(); 2894 2895 SDValue LHS = N->getOperand(0); 2896 unsigned RHSVal = RHS->getZExtValue(); 2897 if (!RHSVal) 2898 return LHS; 2899 2900 SDLoc SL(N); 2901 SelectionDAG &DAG = DCI.DAG; 2902 2903 switch (LHS->getOpcode()) { 2904 default: 2905 break; 2906 case ISD::ZERO_EXTEND: 2907 case ISD::SIGN_EXTEND: 2908 case ISD::ANY_EXTEND: { 2909 SDValue X = LHS->getOperand(0); 2910 2911 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 && 2912 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) { 2913 // Prefer build_vector as the canonical form if packed types are legal. 2914 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x 2915 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL, 2916 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); 2917 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2918 } 2919 2920 // shl (ext x) => zext (shl x), if shift does not overflow int 2921 if (VT != MVT::i64) 2922 break; 2923 KnownBits Known; 2924 DAG.computeKnownBits(X, Known); 2925 unsigned LZ = Known.countMinLeadingZeros(); 2926 if (LZ < RHSVal) 2927 break; 2928 EVT XVT = X.getValueType(); 2929 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0)); 2930 return DAG.getZExtOrTrunc(Shl, SL, VT); 2931 } 2932 } 2933 2934 if (VT != MVT::i64) 2935 return SDValue(); 2936 2937 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 2938 2939 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 2940 // common case, splitting this into a move and a 32-bit shift is faster and 2941 // the same code size. 2942 if (RHSVal < 32) 2943 return SDValue(); 2944 2945 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 2946 2947 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 2948 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 2949 2950 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2951 2952 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift}); 2953 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2954 } 2955 2956 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 2957 DAGCombinerInfo &DCI) const { 2958 if (N->getValueType(0) != MVT::i64) 2959 return SDValue(); 2960 2961 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2962 if (!RHS) 2963 return SDValue(); 2964 2965 SelectionDAG &DAG = DCI.DAG; 2966 SDLoc SL(N); 2967 unsigned RHSVal = RHS->getZExtValue(); 2968 2969 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 2970 if (RHSVal == 32) { 2971 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2972 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2973 DAG.getConstant(31, SL, MVT::i32)); 2974 2975 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift}); 2976 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2977 } 2978 2979 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 2980 if (RHSVal == 63) { 2981 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2982 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2983 DAG.getConstant(31, SL, MVT::i32)); 2984 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift}); 2985 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2986 } 2987 2988 return SDValue(); 2989 } 2990 2991 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 2992 DAGCombinerInfo &DCI) const { 2993 if (N->getValueType(0) != MVT::i64) 2994 return SDValue(); 2995 2996 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2997 if (!RHS) 2998 return SDValue(); 2999 3000 unsigned ShiftAmt = RHS->getZExtValue(); 3001 if (ShiftAmt < 32) 3002 return SDValue(); 3003 3004 // srl i64:x, C for C >= 32 3005 // => 3006 // build_pair (srl hi_32(x), C - 32), 0 3007 3008 SelectionDAG &DAG = DCI.DAG; 3009 SDLoc SL(N); 3010 3011 SDValue One = DAG.getConstant(1, SL, MVT::i32); 3012 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3013 3014 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0)); 3015 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, 3016 VecOp, One); 3017 3018 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 3019 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 3020 3021 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero}); 3022 3023 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 3024 } 3025 3026 SDValue AMDGPUTargetLowering::performTruncateCombine( 3027 SDNode *N, DAGCombinerInfo &DCI) const { 3028 SDLoc SL(N); 3029 SelectionDAG &DAG = DCI.DAG; 3030 EVT VT = N->getValueType(0); 3031 SDValue Src = N->getOperand(0); 3032 3033 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x) 3034 if (Src.getOpcode() == ISD::BITCAST) { 3035 SDValue Vec = Src.getOperand(0); 3036 if (Vec.getOpcode() == ISD::BUILD_VECTOR) { 3037 SDValue Elt0 = Vec.getOperand(0); 3038 EVT EltVT = Elt0.getValueType(); 3039 if (VT.getSizeInBits() <= EltVT.getSizeInBits()) { 3040 if (EltVT.isFloatingPoint()) { 3041 Elt0 = DAG.getNode(ISD::BITCAST, SL, 3042 EltVT.changeTypeToInteger(), Elt0); 3043 } 3044 3045 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0); 3046 } 3047 } 3048 } 3049 3050 // Equivalent of above for accessing the high element of a vector as an 3051 // integer operation. 3052 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y) 3053 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) { 3054 if (auto K = isConstOrConstSplat(Src.getOperand(1))) { 3055 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) { 3056 SDValue BV = stripBitcast(Src.getOperand(0)); 3057 if (BV.getOpcode() == ISD::BUILD_VECTOR && 3058 BV.getValueType().getVectorNumElements() == 2) { 3059 SDValue SrcElt = BV.getOperand(1); 3060 EVT SrcEltVT = SrcElt.getValueType(); 3061 if (SrcEltVT.isFloatingPoint()) { 3062 SrcElt = DAG.getNode(ISD::BITCAST, SL, 3063 SrcEltVT.changeTypeToInteger(), SrcElt); 3064 } 3065 3066 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt); 3067 } 3068 } 3069 } 3070 } 3071 3072 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit. 3073 // 3074 // i16 (trunc (srl i64:x, K)), K <= 16 -> 3075 // i16 (trunc (srl (i32 (trunc x), K))) 3076 if (VT.getScalarSizeInBits() < 32) { 3077 EVT SrcVT = Src.getValueType(); 3078 if (SrcVT.getScalarSizeInBits() > 32 && 3079 (Src.getOpcode() == ISD::SRL || 3080 Src.getOpcode() == ISD::SRA || 3081 Src.getOpcode() == ISD::SHL)) { 3082 SDValue Amt = Src.getOperand(1); 3083 KnownBits Known; 3084 DAG.computeKnownBits(Amt, Known); 3085 unsigned Size = VT.getScalarSizeInBits(); 3086 if ((Known.isConstant() && Known.getConstant().ule(Size)) || 3087 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) { 3088 EVT MidVT = VT.isVector() ? 3089 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 3090 VT.getVectorNumElements()) : MVT::i32; 3091 3092 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout()); 3093 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT, 3094 Src.getOperand(0)); 3095 DCI.AddToWorklist(Trunc.getNode()); 3096 3097 if (Amt.getValueType() != NewShiftVT) { 3098 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT); 3099 DCI.AddToWorklist(Amt.getNode()); 3100 } 3101 3102 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT, 3103 Trunc, Amt); 3104 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift); 3105 } 3106 } 3107 } 3108 3109 return SDValue(); 3110 } 3111 3112 // We need to specifically handle i64 mul here to avoid unnecessary conversion 3113 // instructions. If we only match on the legalized i64 mul expansion, 3114 // SimplifyDemandedBits will be unable to remove them because there will be 3115 // multiple uses due to the separate mul + mulh[su]. 3116 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, 3117 SDValue N0, SDValue N1, unsigned Size, bool Signed) { 3118 if (Size <= 32) { 3119 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3120 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1); 3121 } 3122 3123 // Because we want to eliminate extension instructions before the 3124 // operation, we need to create a single user here (i.e. not the separate 3125 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it. 3126 3127 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24; 3128 3129 SDValue Mul = DAG.getNode(MulOpc, SL, 3130 DAG.getVTList(MVT::i32, MVT::i32), N0, N1); 3131 3132 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, 3133 Mul.getValue(0), Mul.getValue(1)); 3134 } 3135 3136 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 3137 DAGCombinerInfo &DCI) const { 3138 EVT VT = N->getValueType(0); 3139 3140 unsigned Size = VT.getSizeInBits(); 3141 if (VT.isVector() || Size > 64) 3142 return SDValue(); 3143 3144 // There are i16 integer mul/mad. 3145 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) 3146 return SDValue(); 3147 3148 SelectionDAG &DAG = DCI.DAG; 3149 SDLoc DL(N); 3150 3151 SDValue N0 = N->getOperand(0); 3152 SDValue N1 = N->getOperand(1); 3153 3154 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends 3155 // in the source into any_extends if the result of the mul is truncated. Since 3156 // we can assume the high bits are whatever we want, use the underlying value 3157 // to avoid the unknown high bits from interfering. 3158 if (N0.getOpcode() == ISD::ANY_EXTEND) 3159 N0 = N0.getOperand(0); 3160 3161 if (N1.getOpcode() == ISD::ANY_EXTEND) 3162 N1 = N1.getOperand(0); 3163 3164 SDValue Mul; 3165 3166 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 3167 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3168 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3169 Mul = getMul24(DAG, DL, N0, N1, Size, false); 3170 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 3171 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3172 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3173 Mul = getMul24(DAG, DL, N0, N1, Size, true); 3174 } else { 3175 return SDValue(); 3176 } 3177 3178 // We need to use sext even for MUL_U24, because MUL_U24 is used 3179 // for signed multiply of 8 and 16-bit types. 3180 return DAG.getSExtOrTrunc(Mul, DL, VT); 3181 } 3182 3183 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N, 3184 DAGCombinerInfo &DCI) const { 3185 EVT VT = N->getValueType(0); 3186 3187 if (!Subtarget->hasMulI24() || VT.isVector()) 3188 return SDValue(); 3189 3190 SelectionDAG &DAG = DCI.DAG; 3191 SDLoc DL(N); 3192 3193 SDValue N0 = N->getOperand(0); 3194 SDValue N1 = N->getOperand(1); 3195 3196 if (!isI24(N0, DAG) || !isI24(N1, DAG)) 3197 return SDValue(); 3198 3199 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3200 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3201 3202 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1); 3203 DCI.AddToWorklist(Mulhi.getNode()); 3204 return DAG.getSExtOrTrunc(Mulhi, DL, VT); 3205 } 3206 3207 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N, 3208 DAGCombinerInfo &DCI) const { 3209 EVT VT = N->getValueType(0); 3210 3211 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) 3212 return SDValue(); 3213 3214 SelectionDAG &DAG = DCI.DAG; 3215 SDLoc DL(N); 3216 3217 SDValue N0 = N->getOperand(0); 3218 SDValue N1 = N->getOperand(1); 3219 3220 if (!isU24(N0, DAG) || !isU24(N1, DAG)) 3221 return SDValue(); 3222 3223 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3224 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3225 3226 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1); 3227 DCI.AddToWorklist(Mulhi.getNode()); 3228 return DAG.getZExtOrTrunc(Mulhi, DL, VT); 3229 } 3230 3231 SDValue AMDGPUTargetLowering::performMulLoHi24Combine( 3232 SDNode *N, DAGCombinerInfo &DCI) const { 3233 SelectionDAG &DAG = DCI.DAG; 3234 3235 // Simplify demanded bits before splitting into multiple users. 3236 if (simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI)) 3237 return SDValue(); 3238 3239 SDValue N0 = N->getOperand(0); 3240 SDValue N1 = N->getOperand(1); 3241 3242 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24); 3243 3244 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3245 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24; 3246 3247 SDLoc SL(N); 3248 3249 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1); 3250 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1); 3251 return DAG.getMergeValues({ MulLo, MulHi }, SL); 3252 } 3253 3254 static bool isNegativeOne(SDValue Val) { 3255 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 3256 return C->isAllOnesValue(); 3257 return false; 3258 } 3259 3260 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG, 3261 SDValue Op, 3262 const SDLoc &DL, 3263 unsigned Opc) const { 3264 EVT VT = Op.getValueType(); 3265 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT); 3266 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && 3267 LegalVT != MVT::i16)) 3268 return SDValue(); 3269 3270 if (VT != MVT::i32) 3271 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op); 3272 3273 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op); 3274 if (VT != MVT::i32) 3275 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX); 3276 3277 return FFBX; 3278 } 3279 3280 // The native instructions return -1 on 0 input. Optimize out a select that 3281 // produces -1 on 0. 3282 // 3283 // TODO: If zero is not undef, we could also do this if the output is compared 3284 // against the bitwidth. 3285 // 3286 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 3287 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, 3288 SDValue LHS, SDValue RHS, 3289 DAGCombinerInfo &DCI) const { 3290 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3291 if (!CmpRhs || !CmpRhs->isNullValue()) 3292 return SDValue(); 3293 3294 SelectionDAG &DAG = DCI.DAG; 3295 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 3296 SDValue CmpLHS = Cond.getOperand(0); 3297 3298 unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : 3299 AMDGPUISD::FFBH_U32; 3300 3301 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 3302 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x 3303 if (CCOpcode == ISD::SETEQ && 3304 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3305 RHS.getOperand(0) == CmpLHS && 3306 isNegativeOne(LHS)) { 3307 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3308 } 3309 3310 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 3311 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x 3312 if (CCOpcode == ISD::SETNE && 3313 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3314 LHS.getOperand(0) == CmpLHS && 3315 isNegativeOne(RHS)) { 3316 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3317 } 3318 3319 return SDValue(); 3320 } 3321 3322 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, 3323 unsigned Op, 3324 const SDLoc &SL, 3325 SDValue Cond, 3326 SDValue N1, 3327 SDValue N2) { 3328 SelectionDAG &DAG = DCI.DAG; 3329 EVT VT = N1.getValueType(); 3330 3331 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond, 3332 N1.getOperand(0), N2.getOperand(0)); 3333 DCI.AddToWorklist(NewSelect.getNode()); 3334 return DAG.getNode(Op, SL, VT, NewSelect); 3335 } 3336 3337 // Pull a free FP operation out of a select so it may fold into uses. 3338 // 3339 // select c, (fneg x), (fneg y) -> fneg (select c, x, y) 3340 // select c, (fneg x), k -> fneg (select c, x, (fneg k)) 3341 // 3342 // select c, (fabs x), (fabs y) -> fabs (select c, x, y) 3343 // select c, (fabs x), +k -> fabs (select c, x, k) 3344 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, 3345 SDValue N) { 3346 SelectionDAG &DAG = DCI.DAG; 3347 SDValue Cond = N.getOperand(0); 3348 SDValue LHS = N.getOperand(1); 3349 SDValue RHS = N.getOperand(2); 3350 3351 EVT VT = N.getValueType(); 3352 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) || 3353 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) { 3354 return distributeOpThroughSelect(DCI, LHS.getOpcode(), 3355 SDLoc(N), Cond, LHS, RHS); 3356 } 3357 3358 bool Inv = false; 3359 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) { 3360 std::swap(LHS, RHS); 3361 Inv = true; 3362 } 3363 3364 // TODO: Support vector constants. 3365 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3366 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) { 3367 SDLoc SL(N); 3368 // If one side is an fneg/fabs and the other is a constant, we can push the 3369 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. 3370 SDValue NewLHS = LHS.getOperand(0); 3371 SDValue NewRHS = RHS; 3372 3373 // Careful: if the neg can be folded up, don't try to pull it back down. 3374 bool ShouldFoldNeg = true; 3375 3376 if (NewLHS.hasOneUse()) { 3377 unsigned Opc = NewLHS.getOpcode(); 3378 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc)) 3379 ShouldFoldNeg = false; 3380 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL) 3381 ShouldFoldNeg = false; 3382 } 3383 3384 if (ShouldFoldNeg) { 3385 if (LHS.getOpcode() == ISD::FNEG) 3386 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3387 else if (CRHS->isNegative()) 3388 return SDValue(); 3389 3390 if (Inv) 3391 std::swap(NewLHS, NewRHS); 3392 3393 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, 3394 Cond, NewLHS, NewRHS); 3395 DCI.AddToWorklist(NewSelect.getNode()); 3396 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect); 3397 } 3398 } 3399 3400 return SDValue(); 3401 } 3402 3403 3404 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 3405 DAGCombinerInfo &DCI) const { 3406 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0))) 3407 return Folded; 3408 3409 SDValue Cond = N->getOperand(0); 3410 if (Cond.getOpcode() != ISD::SETCC) 3411 return SDValue(); 3412 3413 EVT VT = N->getValueType(0); 3414 SDValue LHS = Cond.getOperand(0); 3415 SDValue RHS = Cond.getOperand(1); 3416 SDValue CC = Cond.getOperand(2); 3417 3418 SDValue True = N->getOperand(1); 3419 SDValue False = N->getOperand(2); 3420 3421 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses. 3422 SelectionDAG &DAG = DCI.DAG; 3423 if ((DAG.isConstantValueOfAnyType(True) || 3424 DAG.isConstantValueOfAnyType(True)) && 3425 (!DAG.isConstantValueOfAnyType(False) && 3426 !DAG.isConstantValueOfAnyType(False))) { 3427 // Swap cmp + select pair to move constant to false input. 3428 // This will allow using VOPC cndmasks more often. 3429 // select (setcc x, y), k, x -> select (setcc y, x) x, x 3430 3431 SDLoc SL(N); 3432 ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3433 LHS.getValueType().isInteger()); 3434 3435 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC); 3436 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True); 3437 } 3438 3439 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { 3440 SDValue MinMax 3441 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 3442 // Revisit this node so we can catch min3/max3/med3 patterns. 3443 //DCI.AddToWorklist(MinMax.getNode()); 3444 return MinMax; 3445 } 3446 } 3447 3448 // There's no reason to not do this if the condition has other uses. 3449 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI); 3450 } 3451 3452 static bool isConstantFPZero(SDValue N) { 3453 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) 3454 return C->isZero() && !C->isNegative(); 3455 return false; 3456 } 3457 3458 static unsigned inverseMinMax(unsigned Opc) { 3459 switch (Opc) { 3460 case ISD::FMAXNUM: 3461 return ISD::FMINNUM; 3462 case ISD::FMINNUM: 3463 return ISD::FMAXNUM; 3464 case AMDGPUISD::FMAX_LEGACY: 3465 return AMDGPUISD::FMIN_LEGACY; 3466 case AMDGPUISD::FMIN_LEGACY: 3467 return AMDGPUISD::FMAX_LEGACY; 3468 default: 3469 llvm_unreachable("invalid min/max opcode"); 3470 } 3471 } 3472 3473 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, 3474 DAGCombinerInfo &DCI) const { 3475 SelectionDAG &DAG = DCI.DAG; 3476 SDValue N0 = N->getOperand(0); 3477 EVT VT = N->getValueType(0); 3478 3479 unsigned Opc = N0.getOpcode(); 3480 3481 // If the input has multiple uses and we can either fold the negate down, or 3482 // the other uses cannot, give up. This both prevents unprofitable 3483 // transformations and infinite loops: we won't repeatedly try to fold around 3484 // a negate that has no 'good' form. 3485 if (N0.hasOneUse()) { 3486 // This may be able to fold into the source, but at a code size cost. Don't 3487 // fold if the fold into the user is free. 3488 if (allUsesHaveSourceMods(N, 0)) 3489 return SDValue(); 3490 } else { 3491 if (fnegFoldsIntoOp(Opc) && 3492 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode()))) 3493 return SDValue(); 3494 } 3495 3496 SDLoc SL(N); 3497 switch (Opc) { 3498 case ISD::FADD: { 3499 if (!mayIgnoreSignedZero(N0)) 3500 return SDValue(); 3501 3502 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) 3503 SDValue LHS = N0.getOperand(0); 3504 SDValue RHS = N0.getOperand(1); 3505 3506 if (LHS.getOpcode() != ISD::FNEG) 3507 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3508 else 3509 LHS = LHS.getOperand(0); 3510 3511 if (RHS.getOpcode() != ISD::FNEG) 3512 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3513 else 3514 RHS = RHS.getOperand(0); 3515 3516 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); 3517 if (!N0.hasOneUse()) 3518 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3519 return Res; 3520 } 3521 case ISD::FMUL: 3522 case AMDGPUISD::FMUL_LEGACY: { 3523 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) 3524 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) 3525 SDValue LHS = N0.getOperand(0); 3526 SDValue RHS = N0.getOperand(1); 3527 3528 if (LHS.getOpcode() == ISD::FNEG) 3529 LHS = LHS.getOperand(0); 3530 else if (RHS.getOpcode() == ISD::FNEG) 3531 RHS = RHS.getOperand(0); 3532 else 3533 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3534 3535 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); 3536 if (!N0.hasOneUse()) 3537 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3538 return Res; 3539 } 3540 case ISD::FMA: 3541 case ISD::FMAD: { 3542 if (!mayIgnoreSignedZero(N0)) 3543 return SDValue(); 3544 3545 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) 3546 SDValue LHS = N0.getOperand(0); 3547 SDValue MHS = N0.getOperand(1); 3548 SDValue RHS = N0.getOperand(2); 3549 3550 if (LHS.getOpcode() == ISD::FNEG) 3551 LHS = LHS.getOperand(0); 3552 else if (MHS.getOpcode() == ISD::FNEG) 3553 MHS = MHS.getOperand(0); 3554 else 3555 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS); 3556 3557 if (RHS.getOpcode() != ISD::FNEG) 3558 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3559 else 3560 RHS = RHS.getOperand(0); 3561 3562 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS); 3563 if (!N0.hasOneUse()) 3564 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3565 return Res; 3566 } 3567 case ISD::FMAXNUM: 3568 case ISD::FMINNUM: 3569 case AMDGPUISD::FMAX_LEGACY: 3570 case AMDGPUISD::FMIN_LEGACY: { 3571 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) 3572 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) 3573 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) 3574 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) 3575 3576 SDValue LHS = N0.getOperand(0); 3577 SDValue RHS = N0.getOperand(1); 3578 3579 // 0 doesn't have a negated inline immediate. 3580 // TODO: Shouldn't fold 1/2pi either, and should be generalized to other 3581 // operations. 3582 if (isConstantFPZero(RHS)) 3583 return SDValue(); 3584 3585 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3586 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3587 unsigned Opposite = inverseMinMax(Opc); 3588 3589 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); 3590 if (!N0.hasOneUse()) 3591 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3592 return Res; 3593 } 3594 case ISD::FP_EXTEND: 3595 case ISD::FTRUNC: 3596 case ISD::FRINT: 3597 case ISD::FNEARBYINT: // XXX - Should fround be handled? 3598 case ISD::FSIN: 3599 case ISD::FCANONICALIZE: 3600 case AMDGPUISD::RCP: 3601 case AMDGPUISD::RCP_LEGACY: 3602 case AMDGPUISD::RCP_IFLAG: 3603 case AMDGPUISD::SIN_HW: { 3604 SDValue CvtSrc = N0.getOperand(0); 3605 if (CvtSrc.getOpcode() == ISD::FNEG) { 3606 // (fneg (fp_extend (fneg x))) -> (fp_extend x) 3607 // (fneg (rcp (fneg x))) -> (rcp x) 3608 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0)); 3609 } 3610 3611 if (!N0.hasOneUse()) 3612 return SDValue(); 3613 3614 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) 3615 // (fneg (rcp x)) -> (rcp (fneg x)) 3616 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3617 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); 3618 } 3619 case ISD::FP_ROUND: { 3620 SDValue CvtSrc = N0.getOperand(0); 3621 3622 if (CvtSrc.getOpcode() == ISD::FNEG) { 3623 // (fneg (fp_round (fneg x))) -> (fp_round x) 3624 return DAG.getNode(ISD::FP_ROUND, SL, VT, 3625 CvtSrc.getOperand(0), N0.getOperand(1)); 3626 } 3627 3628 if (!N0.hasOneUse()) 3629 return SDValue(); 3630 3631 // (fneg (fp_round x)) -> (fp_round (fneg x)) 3632 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3633 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1)); 3634 } 3635 case ISD::FP16_TO_FP: { 3636 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal 3637 // f16, but legalization of f16 fneg ends up pulling it out of the source. 3638 // Put the fneg back as a legal source operation that can be matched later. 3639 SDLoc SL(N); 3640 3641 SDValue Src = N0.getOperand(0); 3642 EVT SrcVT = Src.getValueType(); 3643 3644 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) 3645 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src, 3646 DAG.getConstant(0x8000, SL, SrcVT)); 3647 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); 3648 } 3649 default: 3650 return SDValue(); 3651 } 3652 } 3653 3654 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N, 3655 DAGCombinerInfo &DCI) const { 3656 SelectionDAG &DAG = DCI.DAG; 3657 SDValue N0 = N->getOperand(0); 3658 3659 if (!N0.hasOneUse()) 3660 return SDValue(); 3661 3662 switch (N0.getOpcode()) { 3663 case ISD::FP16_TO_FP: { 3664 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); 3665 SDLoc SL(N); 3666 SDValue Src = N0.getOperand(0); 3667 EVT SrcVT = Src.getValueType(); 3668 3669 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) 3670 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src, 3671 DAG.getConstant(0x7fff, SL, SrcVT)); 3672 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); 3673 } 3674 default: 3675 return SDValue(); 3676 } 3677 } 3678 3679 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N, 3680 DAGCombinerInfo &DCI) const { 3681 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3682 if (!CFP) 3683 return SDValue(); 3684 3685 // XXX - Should this flush denormals? 3686 const APFloat &Val = CFP->getValueAPF(); 3687 APFloat One(Val.getSemantics(), "1.0"); 3688 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); 3689 } 3690 3691 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 3692 DAGCombinerInfo &DCI) const { 3693 SelectionDAG &DAG = DCI.DAG; 3694 SDLoc DL(N); 3695 3696 switch(N->getOpcode()) { 3697 default: 3698 break; 3699 case ISD::BITCAST: { 3700 EVT DestVT = N->getValueType(0); 3701 3702 // Push casts through vector builds. This helps avoid emitting a large 3703 // number of copies when materializing floating point vector constants. 3704 // 3705 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) => 3706 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y)) 3707 if (DestVT.isVector()) { 3708 SDValue Src = N->getOperand(0); 3709 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 3710 EVT SrcVT = Src.getValueType(); 3711 unsigned NElts = DestVT.getVectorNumElements(); 3712 3713 if (SrcVT.getVectorNumElements() == NElts) { 3714 EVT DestEltVT = DestVT.getVectorElementType(); 3715 3716 SmallVector<SDValue, 8> CastedElts; 3717 SDLoc SL(N); 3718 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) { 3719 SDValue Elt = Src.getOperand(I); 3720 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt)); 3721 } 3722 3723 return DAG.getBuildVector(DestVT, SL, CastedElts); 3724 } 3725 } 3726 } 3727 3728 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector()) 3729 break; 3730 3731 // Fold bitcasts of constants. 3732 // 3733 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) 3734 // TODO: Generalize and move to DAGCombiner 3735 SDValue Src = N->getOperand(0); 3736 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) { 3737 if (Src.getValueType() == MVT::i64) { 3738 SDLoc SL(N); 3739 uint64_t CVal = C->getZExtValue(); 3740 return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT, 3741 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3742 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3743 } 3744 } 3745 3746 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) { 3747 const APInt &Val = C->getValueAPF().bitcastToAPInt(); 3748 SDLoc SL(N); 3749 uint64_t CVal = Val.getZExtValue(); 3750 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3751 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3752 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3753 3754 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec); 3755 } 3756 3757 break; 3758 } 3759 case ISD::SHL: { 3760 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3761 break; 3762 3763 return performShlCombine(N, DCI); 3764 } 3765 case ISD::SRL: { 3766 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3767 break; 3768 3769 return performSrlCombine(N, DCI); 3770 } 3771 case ISD::SRA: { 3772 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3773 break; 3774 3775 return performSraCombine(N, DCI); 3776 } 3777 case ISD::TRUNCATE: 3778 return performTruncateCombine(N, DCI); 3779 case ISD::MUL: 3780 return performMulCombine(N, DCI); 3781 case ISD::MULHS: 3782 return performMulhsCombine(N, DCI); 3783 case ISD::MULHU: 3784 return performMulhuCombine(N, DCI); 3785 case AMDGPUISD::MUL_I24: 3786 case AMDGPUISD::MUL_U24: 3787 case AMDGPUISD::MULHI_I24: 3788 case AMDGPUISD::MULHI_U24: { 3789 // If the first call to simplify is successfull, then N may end up being 3790 // deleted, so we shouldn't call simplifyI24 again. 3791 simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI); 3792 return SDValue(); 3793 } 3794 case AMDGPUISD::MUL_LOHI_I24: 3795 case AMDGPUISD::MUL_LOHI_U24: 3796 return performMulLoHi24Combine(N, DCI); 3797 case ISD::SELECT: 3798 return performSelectCombine(N, DCI); 3799 case ISD::FNEG: 3800 return performFNegCombine(N, DCI); 3801 case ISD::FABS: 3802 return performFAbsCombine(N, DCI); 3803 case AMDGPUISD::BFE_I32: 3804 case AMDGPUISD::BFE_U32: { 3805 assert(!N->getValueType(0).isVector() && 3806 "Vector handling of BFE not implemented"); 3807 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 3808 if (!Width) 3809 break; 3810 3811 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 3812 if (WidthVal == 0) 3813 return DAG.getConstant(0, DL, MVT::i32); 3814 3815 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3816 if (!Offset) 3817 break; 3818 3819 SDValue BitsFrom = N->getOperand(0); 3820 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 3821 3822 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 3823 3824 if (OffsetVal == 0) { 3825 // This is already sign / zero extended, so try to fold away extra BFEs. 3826 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 3827 3828 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 3829 if (OpSignBits >= SignBits) 3830 return BitsFrom; 3831 3832 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 3833 if (Signed) { 3834 // This is a sign_extend_inreg. Replace it to take advantage of existing 3835 // DAG Combines. If not eliminated, we will match back to BFE during 3836 // selection. 3837 3838 // TODO: The sext_inreg of extended types ends, although we can could 3839 // handle them in a single BFE. 3840 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 3841 DAG.getValueType(SmallVT)); 3842 } 3843 3844 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 3845 } 3846 3847 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 3848 if (Signed) { 3849 return constantFoldBFE<int32_t>(DAG, 3850 CVal->getSExtValue(), 3851 OffsetVal, 3852 WidthVal, 3853 DL); 3854 } 3855 3856 return constantFoldBFE<uint32_t>(DAG, 3857 CVal->getZExtValue(), 3858 OffsetVal, 3859 WidthVal, 3860 DL); 3861 } 3862 3863 if ((OffsetVal + WidthVal) >= 32 && 3864 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { 3865 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 3866 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 3867 BitsFrom, ShiftVal); 3868 } 3869 3870 if (BitsFrom.hasOneUse()) { 3871 APInt Demanded = APInt::getBitsSet(32, 3872 OffsetVal, 3873 OffsetVal + WidthVal); 3874 3875 KnownBits Known; 3876 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 3877 !DCI.isBeforeLegalizeOps()); 3878 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3879 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) || 3880 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) { 3881 DCI.CommitTargetLoweringOpt(TLO); 3882 } 3883 } 3884 3885 break; 3886 } 3887 case ISD::LOAD: 3888 return performLoadCombine(N, DCI); 3889 case ISD::STORE: 3890 return performStoreCombine(N, DCI); 3891 case AMDGPUISD::RCP: 3892 case AMDGPUISD::RCP_IFLAG: 3893 return performRcpCombine(N, DCI); 3894 case ISD::AssertZext: 3895 case ISD::AssertSext: 3896 return performAssertSZExtCombine(N, DCI); 3897 } 3898 return SDValue(); 3899 } 3900 3901 //===----------------------------------------------------------------------===// 3902 // Helper functions 3903 //===----------------------------------------------------------------------===// 3904 3905 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3906 const TargetRegisterClass *RC, 3907 unsigned Reg, EVT VT, 3908 const SDLoc &SL, 3909 bool RawReg) const { 3910 MachineFunction &MF = DAG.getMachineFunction(); 3911 MachineRegisterInfo &MRI = MF.getRegInfo(); 3912 unsigned VReg; 3913 3914 if (!MRI.isLiveIn(Reg)) { 3915 VReg = MRI.createVirtualRegister(RC); 3916 MRI.addLiveIn(Reg, VReg); 3917 } else { 3918 VReg = MRI.getLiveInVirtReg(Reg); 3919 } 3920 3921 if (RawReg) 3922 return DAG.getRegister(VReg, VT); 3923 3924 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT); 3925 } 3926 3927 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG, 3928 EVT VT, 3929 const SDLoc &SL, 3930 int64_t Offset) const { 3931 MachineFunction &MF = DAG.getMachineFunction(); 3932 MachineFrameInfo &MFI = MF.getFrameInfo(); 3933 3934 int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true); 3935 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset); 3936 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32); 3937 3938 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4, 3939 MachineMemOperand::MODereferenceable | 3940 MachineMemOperand::MOInvariant); 3941 } 3942 3943 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG, 3944 const SDLoc &SL, 3945 SDValue Chain, 3946 SDValue StackPtr, 3947 SDValue ArgVal, 3948 int64_t Offset) const { 3949 MachineFunction &MF = DAG.getMachineFunction(); 3950 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset); 3951 3952 SDValue Ptr = DAG.getObjectPtrOffset(SL, StackPtr, Offset); 3953 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4, 3954 MachineMemOperand::MODereferenceable); 3955 return Store; 3956 } 3957 3958 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG, 3959 const TargetRegisterClass *RC, 3960 EVT VT, const SDLoc &SL, 3961 const ArgDescriptor &Arg) const { 3962 assert(Arg && "Attempting to load missing argument"); 3963 3964 if (Arg.isRegister()) 3965 return CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL); 3966 return loadStackInputValue(DAG, VT, SL, Arg.getStackOffset()); 3967 } 3968 3969 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 3970 const MachineFunction &MF, const ImplicitParameter Param) const { 3971 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 3972 const AMDGPUSubtarget &ST = 3973 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction()); 3974 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction()); 3975 unsigned Alignment = ST.getAlignmentForImplicitArgPtr(); 3976 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) + 3977 ExplicitArgOffset; 3978 switch (Param) { 3979 case GRID_DIM: 3980 return ArgOffset; 3981 case GRID_OFFSET: 3982 return ArgOffset + 4; 3983 } 3984 llvm_unreachable("unexpected implicit parameter type"); 3985 } 3986 3987 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 3988 3989 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 3990 switch ((AMDGPUISD::NodeType)Opcode) { 3991 case AMDGPUISD::FIRST_NUMBER: break; 3992 // AMDIL DAG nodes 3993 NODE_NAME_CASE(UMUL); 3994 NODE_NAME_CASE(BRANCH_COND); 3995 3996 // AMDGPU DAG nodes 3997 NODE_NAME_CASE(IF) 3998 NODE_NAME_CASE(ELSE) 3999 NODE_NAME_CASE(LOOP) 4000 NODE_NAME_CASE(CALL) 4001 NODE_NAME_CASE(TC_RETURN) 4002 NODE_NAME_CASE(TRAP) 4003 NODE_NAME_CASE(RET_FLAG) 4004 NODE_NAME_CASE(RETURN_TO_EPILOG) 4005 NODE_NAME_CASE(ENDPGM) 4006 NODE_NAME_CASE(DWORDADDR) 4007 NODE_NAME_CASE(FRACT) 4008 NODE_NAME_CASE(SETCC) 4009 NODE_NAME_CASE(SETREG) 4010 NODE_NAME_CASE(FMA_W_CHAIN) 4011 NODE_NAME_CASE(FMUL_W_CHAIN) 4012 NODE_NAME_CASE(CLAMP) 4013 NODE_NAME_CASE(COS_HW) 4014 NODE_NAME_CASE(SIN_HW) 4015 NODE_NAME_CASE(FMAX_LEGACY) 4016 NODE_NAME_CASE(FMIN_LEGACY) 4017 NODE_NAME_CASE(FMAX3) 4018 NODE_NAME_CASE(SMAX3) 4019 NODE_NAME_CASE(UMAX3) 4020 NODE_NAME_CASE(FMIN3) 4021 NODE_NAME_CASE(SMIN3) 4022 NODE_NAME_CASE(UMIN3) 4023 NODE_NAME_CASE(FMED3) 4024 NODE_NAME_CASE(SMED3) 4025 NODE_NAME_CASE(UMED3) 4026 NODE_NAME_CASE(FDOT2) 4027 NODE_NAME_CASE(URECIP) 4028 NODE_NAME_CASE(DIV_SCALE) 4029 NODE_NAME_CASE(DIV_FMAS) 4030 NODE_NAME_CASE(DIV_FIXUP) 4031 NODE_NAME_CASE(FMAD_FTZ) 4032 NODE_NAME_CASE(TRIG_PREOP) 4033 NODE_NAME_CASE(RCP) 4034 NODE_NAME_CASE(RSQ) 4035 NODE_NAME_CASE(RCP_LEGACY) 4036 NODE_NAME_CASE(RSQ_LEGACY) 4037 NODE_NAME_CASE(RCP_IFLAG) 4038 NODE_NAME_CASE(FMUL_LEGACY) 4039 NODE_NAME_CASE(RSQ_CLAMP) 4040 NODE_NAME_CASE(LDEXP) 4041 NODE_NAME_CASE(FP_CLASS) 4042 NODE_NAME_CASE(DOT4) 4043 NODE_NAME_CASE(CARRY) 4044 NODE_NAME_CASE(BORROW) 4045 NODE_NAME_CASE(BFE_U32) 4046 NODE_NAME_CASE(BFE_I32) 4047 NODE_NAME_CASE(BFI) 4048 NODE_NAME_CASE(BFM) 4049 NODE_NAME_CASE(FFBH_U32) 4050 NODE_NAME_CASE(FFBH_I32) 4051 NODE_NAME_CASE(FFBL_B32) 4052 NODE_NAME_CASE(MUL_U24) 4053 NODE_NAME_CASE(MUL_I24) 4054 NODE_NAME_CASE(MULHI_U24) 4055 NODE_NAME_CASE(MULHI_I24) 4056 NODE_NAME_CASE(MUL_LOHI_U24) 4057 NODE_NAME_CASE(MUL_LOHI_I24) 4058 NODE_NAME_CASE(MAD_U24) 4059 NODE_NAME_CASE(MAD_I24) 4060 NODE_NAME_CASE(MAD_I64_I32) 4061 NODE_NAME_CASE(MAD_U64_U32) 4062 NODE_NAME_CASE(PERM) 4063 NODE_NAME_CASE(TEXTURE_FETCH) 4064 NODE_NAME_CASE(EXPORT) 4065 NODE_NAME_CASE(EXPORT_DONE) 4066 NODE_NAME_CASE(R600_EXPORT) 4067 NODE_NAME_CASE(CONST_ADDRESS) 4068 NODE_NAME_CASE(REGISTER_LOAD) 4069 NODE_NAME_CASE(REGISTER_STORE) 4070 NODE_NAME_CASE(SAMPLE) 4071 NODE_NAME_CASE(SAMPLEB) 4072 NODE_NAME_CASE(SAMPLED) 4073 NODE_NAME_CASE(SAMPLEL) 4074 NODE_NAME_CASE(CVT_F32_UBYTE0) 4075 NODE_NAME_CASE(CVT_F32_UBYTE1) 4076 NODE_NAME_CASE(CVT_F32_UBYTE2) 4077 NODE_NAME_CASE(CVT_F32_UBYTE3) 4078 NODE_NAME_CASE(CVT_PKRTZ_F16_F32) 4079 NODE_NAME_CASE(CVT_PKNORM_I16_F32) 4080 NODE_NAME_CASE(CVT_PKNORM_U16_F32) 4081 NODE_NAME_CASE(CVT_PK_I16_I32) 4082 NODE_NAME_CASE(CVT_PK_U16_U32) 4083 NODE_NAME_CASE(FP_TO_FP16) 4084 NODE_NAME_CASE(FP16_ZEXT) 4085 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 4086 NODE_NAME_CASE(CONST_DATA_PTR) 4087 NODE_NAME_CASE(PC_ADD_REL_OFFSET) 4088 NODE_NAME_CASE(KILL) 4089 NODE_NAME_CASE(DUMMY_CHAIN) 4090 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 4091 NODE_NAME_CASE(INIT_EXEC) 4092 NODE_NAME_CASE(INIT_EXEC_FROM_INPUT) 4093 NODE_NAME_CASE(SENDMSG) 4094 NODE_NAME_CASE(SENDMSGHALT) 4095 NODE_NAME_CASE(INTERP_MOV) 4096 NODE_NAME_CASE(INTERP_P1) 4097 NODE_NAME_CASE(INTERP_P2) 4098 NODE_NAME_CASE(STORE_MSKOR) 4099 NODE_NAME_CASE(LOAD_CONSTANT) 4100 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 4101 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_X3) 4102 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16) 4103 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT) 4104 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16) 4105 NODE_NAME_CASE(ATOMIC_CMP_SWAP) 4106 NODE_NAME_CASE(ATOMIC_INC) 4107 NODE_NAME_CASE(ATOMIC_DEC) 4108 NODE_NAME_CASE(ATOMIC_LOAD_FADD) 4109 NODE_NAME_CASE(ATOMIC_LOAD_FMIN) 4110 NODE_NAME_CASE(ATOMIC_LOAD_FMAX) 4111 NODE_NAME_CASE(BUFFER_LOAD) 4112 NODE_NAME_CASE(BUFFER_LOAD_FORMAT) 4113 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16) 4114 NODE_NAME_CASE(BUFFER_STORE) 4115 NODE_NAME_CASE(BUFFER_STORE_FORMAT) 4116 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16) 4117 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP) 4118 NODE_NAME_CASE(BUFFER_ATOMIC_ADD) 4119 NODE_NAME_CASE(BUFFER_ATOMIC_SUB) 4120 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN) 4121 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN) 4122 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX) 4123 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX) 4124 NODE_NAME_CASE(BUFFER_ATOMIC_AND) 4125 NODE_NAME_CASE(BUFFER_ATOMIC_OR) 4126 NODE_NAME_CASE(BUFFER_ATOMIC_XOR) 4127 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP) 4128 4129 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 4130 } 4131 return nullptr; 4132 } 4133 4134 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand, 4135 SelectionDAG &DAG, int Enabled, 4136 int &RefinementSteps, 4137 bool &UseOneConstNR, 4138 bool Reciprocal) const { 4139 EVT VT = Operand.getValueType(); 4140 4141 if (VT == MVT::f32) { 4142 RefinementSteps = 0; 4143 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 4144 } 4145 4146 // TODO: There is also f64 rsq instruction, but the documentation is less 4147 // clear on its precision. 4148 4149 return SDValue(); 4150 } 4151 4152 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 4153 SelectionDAG &DAG, int Enabled, 4154 int &RefinementSteps) const { 4155 EVT VT = Operand.getValueType(); 4156 4157 if (VT == MVT::f32) { 4158 // Reciprocal, < 1 ulp error. 4159 // 4160 // This reciprocal approximation converges to < 0.5 ulp error with one 4161 // newton rhapson performed with two fused multiple adds (FMAs). 4162 4163 RefinementSteps = 0; 4164 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 4165 } 4166 4167 // TODO: There is also f64 rcp instruction, but the documentation is less 4168 // clear on its precision. 4169 4170 return SDValue(); 4171 } 4172 4173 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 4174 const SDValue Op, KnownBits &Known, 4175 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { 4176 4177 Known.resetAll(); // Don't know anything. 4178 4179 unsigned Opc = Op.getOpcode(); 4180 4181 switch (Opc) { 4182 default: 4183 break; 4184 case AMDGPUISD::CARRY: 4185 case AMDGPUISD::BORROW: { 4186 Known.Zero = APInt::getHighBitsSet(32, 31); 4187 break; 4188 } 4189 4190 case AMDGPUISD::BFE_I32: 4191 case AMDGPUISD::BFE_U32: { 4192 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4193 if (!CWidth) 4194 return; 4195 4196 uint32_t Width = CWidth->getZExtValue() & 0x1f; 4197 4198 if (Opc == AMDGPUISD::BFE_U32) 4199 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); 4200 4201 break; 4202 } 4203 case AMDGPUISD::FP_TO_FP16: 4204 case AMDGPUISD::FP16_ZEXT: { 4205 unsigned BitWidth = Known.getBitWidth(); 4206 4207 // High bits are zero. 4208 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); 4209 break; 4210 } 4211 case AMDGPUISD::MUL_U24: 4212 case AMDGPUISD::MUL_I24: { 4213 KnownBits LHSKnown, RHSKnown; 4214 DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1); 4215 DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1); 4216 4217 unsigned TrailZ = LHSKnown.countMinTrailingZeros() + 4218 RHSKnown.countMinTrailingZeros(); 4219 Known.Zero.setLowBits(std::min(TrailZ, 32u)); 4220 4221 unsigned LHSValBits = 32 - std::max(LHSKnown.countMinSignBits(), 8u); 4222 unsigned RHSValBits = 32 - std::max(RHSKnown.countMinSignBits(), 8u); 4223 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4224 if (MaxValBits >= 32) 4225 break; 4226 bool Negative = false; 4227 if (Opc == AMDGPUISD::MUL_I24) { 4228 bool LHSNegative = !!(LHSKnown.One & (1 << 23)); 4229 bool LHSPositive = !!(LHSKnown.Zero & (1 << 23)); 4230 bool RHSNegative = !!(RHSKnown.One & (1 << 23)); 4231 bool RHSPositive = !!(RHSKnown.Zero & (1 << 23)); 4232 if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive)) 4233 break; 4234 Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative); 4235 } 4236 if (Negative) 4237 Known.One.setHighBits(32 - MaxValBits); 4238 else 4239 Known.Zero.setHighBits(32 - MaxValBits); 4240 break; 4241 } 4242 case AMDGPUISD::PERM: { 4243 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4244 if (!CMask) 4245 return; 4246 4247 KnownBits LHSKnown, RHSKnown; 4248 DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1); 4249 DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1); 4250 unsigned Sel = CMask->getZExtValue(); 4251 4252 for (unsigned I = 0; I < 32; I += 8) { 4253 unsigned SelBits = Sel & 0xff; 4254 if (SelBits < 4) { 4255 SelBits *= 8; 4256 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4257 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4258 } else if (SelBits < 7) { 4259 SelBits = (SelBits & 3) * 8; 4260 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4261 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4262 } else if (SelBits == 0x0c) { 4263 Known.Zero |= 0xff << I; 4264 } else if (SelBits > 0x0c) { 4265 Known.One |= 0xff << I; 4266 } 4267 Sel >>= 8; 4268 } 4269 break; 4270 } 4271 case ISD::INTRINSIC_WO_CHAIN: { 4272 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4273 switch (IID) { 4274 case Intrinsic::amdgcn_mbcnt_lo: 4275 case Intrinsic::amdgcn_mbcnt_hi: { 4276 const GCNSubtarget &ST = 4277 DAG.getMachineFunction().getSubtarget<GCNSubtarget>(); 4278 // These return at most the wavefront size - 1. 4279 unsigned Size = Op.getValueType().getSizeInBits(); 4280 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2()); 4281 break; 4282 } 4283 default: 4284 break; 4285 } 4286 } 4287 } 4288 } 4289 4290 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 4291 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4292 unsigned Depth) const { 4293 switch (Op.getOpcode()) { 4294 case AMDGPUISD::BFE_I32: { 4295 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4296 if (!Width) 4297 return 1; 4298 4299 unsigned SignBits = 32 - Width->getZExtValue() + 1; 4300 if (!isNullConstant(Op.getOperand(1))) 4301 return SignBits; 4302 4303 // TODO: Could probably figure something out with non-0 offsets. 4304 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4305 return std::max(SignBits, Op0SignBits); 4306 } 4307 4308 case AMDGPUISD::BFE_U32: { 4309 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4310 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 4311 } 4312 4313 case AMDGPUISD::CARRY: 4314 case AMDGPUISD::BORROW: 4315 return 31; 4316 case AMDGPUISD::FP_TO_FP16: 4317 case AMDGPUISD::FP16_ZEXT: 4318 return 16; 4319 default: 4320 return 1; 4321 } 4322 } 4323