1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains instruction defs that are common to all hw codegen 11 // targets. 12 // 13 //===----------------------------------------------------------------------===// 14 15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction { 16 field bit isRegisterLoad = 0; 17 field bit isRegisterStore = 0; 18 19 let Namespace = "AMDGPU"; 20 let OutOperandList = outs; 21 let InOperandList = ins; 22 let AsmString = asm; 23 let Pattern = pattern; 24 let Itinerary = NullALU; 25 26 let TSFlags{63} = isRegisterLoad; 27 let TSFlags{62} = isRegisterStore; 28 } 29 30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern> 31 : AMDGPUInst<outs, ins, asm, pattern> { 32 33 field bits<32> Inst = 0xffffffff; 34 35 } 36 37 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>; 38 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>; 39 40 def u32imm : Operand<i32> { 41 let PrintMethod = "printU32ImmOperand"; 42 } 43 44 def u16imm : Operand<i16> { 45 let PrintMethod = "printU16ImmOperand"; 46 } 47 48 def u8imm : Operand<i8> { 49 let PrintMethod = "printU8ImmOperand"; 50 } 51 52 //===--------------------------------------------------------------------===// 53 // Custom Operands 54 //===--------------------------------------------------------------------===// 55 def brtarget : Operand<OtherVT>; 56 57 //===----------------------------------------------------------------------===// 58 // PatLeafs for floating-point comparisons 59 //===----------------------------------------------------------------------===// 60 61 def COND_OEQ : PatLeaf < 62 (cond), 63 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}] 64 >; 65 66 def COND_OGT : PatLeaf < 67 (cond), 68 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}] 69 >; 70 71 def COND_OGE : PatLeaf < 72 (cond), 73 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}] 74 >; 75 76 def COND_OLT : PatLeaf < 77 (cond), 78 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}] 79 >; 80 81 def COND_OLE : PatLeaf < 82 (cond), 83 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}] 84 >; 85 86 def COND_UNE : PatLeaf < 87 (cond), 88 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}] 89 >; 90 91 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>; 92 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>; 93 94 //===----------------------------------------------------------------------===// 95 // PatLeafs for unsigned comparisons 96 //===----------------------------------------------------------------------===// 97 98 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>; 99 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>; 100 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>; 101 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>; 102 103 //===----------------------------------------------------------------------===// 104 // PatLeafs for signed comparisons 105 //===----------------------------------------------------------------------===// 106 107 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>; 108 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>; 109 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>; 110 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>; 111 112 //===----------------------------------------------------------------------===// 113 // PatLeafs for integer equality 114 //===----------------------------------------------------------------------===// 115 116 def COND_EQ : PatLeaf < 117 (cond), 118 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}] 119 >; 120 121 def COND_NE : PatLeaf < 122 (cond), 123 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}] 124 >; 125 126 def COND_NULL : PatLeaf < 127 (cond), 128 [{return false;}] 129 >; 130 131 //===----------------------------------------------------------------------===// 132 // Load/Store Pattern Fragments 133 //===----------------------------------------------------------------------===// 134 135 def global_store : PatFrag<(ops node:$val, node:$ptr), 136 (store node:$val, node:$ptr), [{ 137 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 138 }]>; 139 140 // Global address space loads 141 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 142 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 143 }]>; 144 145 // Constant address space loads 146 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 147 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 148 }]>; 149 150 def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ 151 LoadSDNode *L = cast<LoadSDNode>(N); 152 return L->getExtensionType() == ISD::ZEXTLOAD || 153 L->getExtensionType() == ISD::EXTLOAD; 154 }]>; 155 156 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 157 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; 158 }]>; 159 160 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 161 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 162 }]>; 163 164 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 165 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 166 }]>; 167 168 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 169 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 170 }]>; 171 172 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 173 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 174 }]>; 175 176 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 177 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 178 }]>; 179 180 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 181 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 182 }]>; 183 184 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 185 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; 186 }]>; 187 188 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 189 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 190 }]>; 191 192 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 193 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 194 }]>; 195 196 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 197 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 198 }]>; 199 200 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 201 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 202 }]>; 203 204 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 205 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 206 }]>; 207 208 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 209 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 210 }]>; 211 212 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 213 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; 214 }]>; 215 216 def az_extloadi32_global : PatFrag<(ops node:$ptr), 217 (az_extloadi32 node:$ptr), [{ 218 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 219 }]>; 220 221 def az_extloadi32_constant : PatFrag<(ops node:$ptr), 222 (az_extloadi32 node:$ptr), [{ 223 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 224 }]>; 225 226 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr), 227 (truncstorei8 node:$val, node:$ptr), [{ 228 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 229 }]>; 230 231 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr), 232 (truncstorei16 node:$val, node:$ptr), [{ 233 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 234 }]>; 235 236 def local_store : PatFrag<(ops node:$val, node:$ptr), 237 (store node:$val, node:$ptr), [{ 238 return isLocalStore(dyn_cast<StoreSDNode>(N)); 239 }]>; 240 241 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr), 242 (truncstorei8 node:$val, node:$ptr), [{ 243 return isLocalStore(dyn_cast<StoreSDNode>(N)); 244 }]>; 245 246 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr), 247 (truncstorei16 node:$val, node:$ptr), [{ 248 return isLocalStore(dyn_cast<StoreSDNode>(N)); 249 }]>; 250 251 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 252 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 253 }]>; 254 255 256 class local_binary_atomic_op<SDNode atomic_op> : 257 PatFrag<(ops node:$ptr, node:$value), 258 (atomic_op node:$ptr, node:$value), [{ 259 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 260 }]>; 261 262 263 def atomic_swap_local : local_binary_atomic_op<atomic_swap>; 264 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>; 265 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>; 266 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>; 267 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>; 268 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>; 269 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>; 270 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>; 271 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>; 272 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>; 273 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>; 274 275 def mskor_global : PatFrag<(ops node:$val, node:$ptr), 276 (AMDGPUstore_mskor node:$val, node:$ptr), [{ 277 return dyn_cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 278 }]>; 279 280 def atomic_cmp_swap_32_local : 281 PatFrag<(ops node:$ptr, node:$cmp, node:$swap), 282 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ 283 AtomicSDNode *AN = cast<AtomicSDNode>(N); 284 return AN->getMemoryVT() == MVT::i32 && 285 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 286 }]>; 287 288 def atomic_cmp_swap_64_local : 289 PatFrag<(ops node:$ptr, node:$cmp, node:$swap), 290 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ 291 AtomicSDNode *AN = cast<AtomicSDNode>(N); 292 return AN->getMemoryVT() == MVT::i64 && 293 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 294 }]>; 295 296 297 class Constants { 298 int TWO_PI = 0x40c90fdb; 299 int PI = 0x40490fdb; 300 int TWO_PI_INV = 0x3e22f983; 301 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding 302 int FP32_NEG_ONE = 0xbf800000; 303 int FP32_ONE = 0x3f800000; 304 } 305 def CONST : Constants; 306 307 def FP_ZERO : PatLeaf < 308 (fpimm), 309 [{return N->getValueAPF().isZero();}] 310 >; 311 312 def FP_ONE : PatLeaf < 313 (fpimm), 314 [{return N->isExactlyValue(1.0);}] 315 >; 316 317 let isCodeGenOnly = 1, isPseudo = 1 in { 318 319 let usesCustomInserter = 1 in { 320 321 class CLAMP <RegisterClass rc> : AMDGPUShaderInst < 322 (outs rc:$dst), 323 (ins rc:$src0), 324 "CLAMP $dst, $src0", 325 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] 326 >; 327 328 class FABS <RegisterClass rc> : AMDGPUShaderInst < 329 (outs rc:$dst), 330 (ins rc:$src0), 331 "FABS $dst, $src0", 332 [(set f32:$dst, (fabs f32:$src0))] 333 >; 334 335 class FNEG <RegisterClass rc> : AMDGPUShaderInst < 336 (outs rc:$dst), 337 (ins rc:$src0), 338 "FNEG $dst, $src0", 339 [(set f32:$dst, (fneg f32:$src0))] 340 >; 341 342 } // usesCustomInserter = 1 343 344 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, 345 ComplexPattern addrPat> { 346 let UseNamedOperandTable = 1 in { 347 348 def RegisterLoad : AMDGPUShaderInst < 349 (outs dstClass:$dst), 350 (ins addrClass:$addr, i32imm:$chan), 351 "RegisterLoad $dst, $addr", 352 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))] 353 > { 354 let isRegisterLoad = 1; 355 } 356 357 def RegisterStore : AMDGPUShaderInst < 358 (outs), 359 (ins dstClass:$val, addrClass:$addr, i32imm:$chan), 360 "RegisterStore $val, $addr", 361 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))] 362 > { 363 let isRegisterStore = 1; 364 } 365 } 366 } 367 368 } // End isCodeGenOnly = 1, isPseudo = 1 369 370 /* Generic helper patterns for intrinsics */ 371 /* -------------------------------------- */ 372 373 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul> 374 : Pat < 375 (fpow f32:$src0, f32:$src1), 376 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0))) 377 >; 378 379 /* Other helper patterns */ 380 /* --------------------- */ 381 382 /* Extract element pattern */ 383 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx, 384 SubRegIndex sub_reg> 385 : Pat< 386 (sub_type (vector_extract vec_type:$src, sub_idx)), 387 (EXTRACT_SUBREG $src, sub_reg) 388 >; 389 390 /* Insert element pattern */ 391 class Insert_Element <ValueType elem_type, ValueType vec_type, 392 int sub_idx, SubRegIndex sub_reg> 393 : Pat < 394 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx), 395 (INSERT_SUBREG $vec, $elem, sub_reg) 396 >; 397 398 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 399 // can handle COPY instructions. 400 // bitconvert pattern 401 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat < 402 (dt (bitconvert (st rc:$src0))), 403 (dt rc:$src0) 404 >; 405 406 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 407 // can handle COPY instructions. 408 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat < 409 (vt (AMDGPUdwordaddr (vt rc:$addr))), 410 (vt rc:$addr) 411 >; 412 413 // BFI_INT patterns 414 415 multiclass BFIPatterns <Instruction BFI_INT, Instruction LoadImm32> { 416 417 // Definition from ISA doc: 418 // (y & x) | (z & ~x) 419 def : Pat < 420 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))), 421 (BFI_INT $x, $y, $z) 422 >; 423 424 // SHA-256 Ch function 425 // z ^ (x & (y ^ z)) 426 def : Pat < 427 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))), 428 (BFI_INT $x, $y, $z) 429 >; 430 431 def : Pat < 432 (fcopysign f32:$src0, f32:$src1), 433 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1) 434 >; 435 436 def : Pat < 437 (f64 (fcopysign f64:$src0, f64:$src1)), 438 (INSERT_SUBREG (INSERT_SUBREG (f64 (IMPLICIT_DEF)), 439 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0), 440 (BFI_INT (LoadImm32 0x7fffffff), 441 (i32 (EXTRACT_SUBREG $src0, sub1)), 442 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1) 443 >; 444 } 445 446 // SHA-256 Ma patterns 447 448 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y 449 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat < 450 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))), 451 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y) 452 >; 453 454 // Bitfield extract patterns 455 456 /* 457 458 XXX: The BFE pattern is not working correctly because the XForm is not being 459 applied. 460 461 def legalshift32 : ImmLeaf <i32, [{return Imm >=0 && Imm < 32;}]>; 462 def bfemask : PatLeaf <(imm), [{return isMask_32(N->getZExtValue());}], 463 SDNodeXForm<imm, [{ return CurDAG->getTargetConstant(CountTrailingOnes_32(N->getZExtValue()), MVT::i32);}]>>; 464 465 class BFEPattern <Instruction BFE> : Pat < 466 (and (srl i32:$x, legalshift32:$y), bfemask:$z), 467 (BFE $x, $y, $z) 468 >; 469 470 */ 471 472 // rotr pattern 473 class ROTRPattern <Instruction BIT_ALIGN> : Pat < 474 (rotr i32:$src0, i32:$src1), 475 (BIT_ALIGN $src0, $src0, $src1) 476 >; 477 478 // 24-bit arithmetic patterns 479 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; 480 481 /* 482 class UMUL24Pattern <Instruction UMUL24> : Pat < 483 (mul U24:$x, U24:$y), 484 (UMUL24 $x, $y) 485 >; 486 */ 487 488 class IMad24Pat<Instruction Inst> : Pat < 489 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2), 490 (Inst $src0, $src1, $src2) 491 >; 492 493 class UMad24Pat<Instruction Inst> : Pat < 494 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2), 495 (Inst $src0, $src1, $src2) 496 >; 497 498 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> { 499 def _expand_imad24 : Pat < 500 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2), 501 (AddInst (MulInst $src0, $src1), $src2) 502 >; 503 504 def _expand_imul24 : Pat < 505 (AMDGPUmul_i24 i32:$src0, i32:$src1), 506 (MulInst $src0, $src1) 507 >; 508 } 509 510 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> { 511 def _expand_umad24 : Pat < 512 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2), 513 (AddInst (MulInst $src0, $src1), $src2) 514 >; 515 516 def _expand_umul24 : Pat < 517 (AMDGPUmul_u24 i32:$src0, i32:$src1), 518 (MulInst $src0, $src1) 519 >; 520 } 521 522 class RcpPat<Instruction RcpInst, ValueType vt> : Pat < 523 (fdiv FP_ONE, vt:$src), 524 (RcpInst $src) 525 >; 526 527 multiclass RsqPat<Instruction RsqInst, ValueType vt> { 528 def : Pat < 529 (fdiv FP_ONE, (fsqrt vt:$src)), 530 (RsqInst $src) 531 >; 532 533 def : Pat < 534 (AMDGPUrcp (fsqrt vt:$src)), 535 (RsqInst $src) 536 >; 537 } 538 539 include "R600Instructions.td" 540 include "R700Instructions.td" 541 include "EvergreenInstructions.td" 542 include "CaymanInstructions.td" 543 544 include "SIInstrInfo.td" 545 546