1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains instruction defs that are common to all hw codegen 11 // targets. 12 // 13 //===----------------------------------------------------------------------===// 14 15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction { 16 field bit isRegisterLoad = 0; 17 field bit isRegisterStore = 0; 18 19 let Namespace = "AMDGPU"; 20 let OutOperandList = outs; 21 let InOperandList = ins; 22 let AsmString = asm; 23 let Pattern = pattern; 24 let Itinerary = NullALU; 25 26 let TSFlags{63} = isRegisterLoad; 27 let TSFlags{62} = isRegisterStore; 28 } 29 30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern> 31 : AMDGPUInst<outs, ins, asm, pattern> { 32 33 field bits<32> Inst = 0xffffffff; 34 35 } 36 37 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">; 38 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">; 39 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">; 40 41 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>; 42 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>; 43 44 let OperandType = "OPERAND_IMMEDIATE" in { 45 46 def u32imm : Operand<i32> { 47 let PrintMethod = "printU32ImmOperand"; 48 } 49 50 def u16imm : Operand<i16> { 51 let PrintMethod = "printU16ImmOperand"; 52 } 53 54 def u8imm : Operand<i8> { 55 let PrintMethod = "printU8ImmOperand"; 56 } 57 58 } // End OperandType = "OPERAND_IMMEDIATE" 59 60 //===--------------------------------------------------------------------===// 61 // Custom Operands 62 //===--------------------------------------------------------------------===// 63 def brtarget : Operand<OtherVT>; 64 65 //===----------------------------------------------------------------------===// 66 // PatLeafs for floating-point comparisons 67 //===----------------------------------------------------------------------===// 68 69 def COND_OEQ : PatLeaf < 70 (cond), 71 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}] 72 >; 73 74 def COND_ONE : PatLeaf < 75 (cond), 76 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}] 77 >; 78 79 def COND_OGT : PatLeaf < 80 (cond), 81 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}] 82 >; 83 84 def COND_OGE : PatLeaf < 85 (cond), 86 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}] 87 >; 88 89 def COND_OLT : PatLeaf < 90 (cond), 91 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}] 92 >; 93 94 def COND_OLE : PatLeaf < 95 (cond), 96 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}] 97 >; 98 99 100 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>; 101 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>; 102 103 //===----------------------------------------------------------------------===// 104 // PatLeafs for unsigned / unordered comparisons 105 //===----------------------------------------------------------------------===// 106 107 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>; 108 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>; 109 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>; 110 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>; 111 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>; 112 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>; 113 114 // XXX - For some reason R600 version is preferring to use unordered 115 // for setne? 116 def COND_UNE_NE : PatLeaf < 117 (cond), 118 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}] 119 >; 120 121 //===----------------------------------------------------------------------===// 122 // PatLeafs for signed comparisons 123 //===----------------------------------------------------------------------===// 124 125 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>; 126 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>; 127 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>; 128 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>; 129 130 //===----------------------------------------------------------------------===// 131 // PatLeafs for integer equality 132 //===----------------------------------------------------------------------===// 133 134 def COND_EQ : PatLeaf < 135 (cond), 136 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}] 137 >; 138 139 def COND_NE : PatLeaf < 140 (cond), 141 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}] 142 >; 143 144 def COND_NULL : PatLeaf < 145 (cond), 146 [{(void)N; return false;}] 147 >; 148 149 //===----------------------------------------------------------------------===// 150 // Load/Store Pattern Fragments 151 //===----------------------------------------------------------------------===// 152 153 class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{ 154 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; 155 }]>; 156 157 class PrivateLoad <SDPatternOperator op> : PrivateMemOp < 158 (ops node:$ptr), (op node:$ptr) 159 >; 160 161 class PrivateStore <SDPatternOperator op> : PrivateMemOp < 162 (ops node:$value, node:$ptr), (op node:$value, node:$ptr) 163 >; 164 165 def load_private : PrivateLoad <load>; 166 167 def truncstorei8_private : PrivateStore <truncstorei8>; 168 def truncstorei16_private : PrivateStore <truncstorei16>; 169 def store_private : PrivateStore <store>; 170 171 def global_store : PatFrag<(ops node:$val, node:$ptr), 172 (store node:$val, node:$ptr), [{ 173 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 174 }]>; 175 176 // Global address space loads 177 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 178 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 179 }]>; 180 181 // Constant address space loads 182 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 183 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 184 }]>; 185 186 class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr), 187 (ld_node node:$ptr), [{ 188 LoadSDNode *L = cast<LoadSDNode>(N); 189 return L->getExtensionType() == ISD::ZEXTLOAD || 190 L->getExtensionType() == ISD::EXTLOAD; 191 }]>; 192 193 def az_extload : AZExtLoadBase <unindexedload>; 194 195 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 196 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; 197 }]>; 198 199 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 200 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 201 }]>; 202 203 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 204 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 205 }]>; 206 207 def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 208 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 209 }]>; 210 211 def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 212 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 213 }]>; 214 215 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 216 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 217 }]>; 218 219 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 220 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 221 }]>; 222 223 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 224 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 225 }]>; 226 227 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 228 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 229 }]>; 230 231 def extloadi8_private : PrivateLoad <az_extloadi8>; 232 def sextloadi8_private : PrivateLoad <sextloadi8>; 233 234 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 235 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; 236 }]>; 237 238 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 239 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 240 }]>; 241 242 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 243 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 244 }]>; 245 246 def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 247 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 248 }]>; 249 250 def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 251 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 252 }]>; 253 254 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 255 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 256 }]>; 257 258 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 259 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 260 }]>; 261 262 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 263 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 264 }]>; 265 266 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 267 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 268 }]>; 269 270 def extloadi16_private : PrivateLoad <az_extloadi16>; 271 def sextloadi16_private : PrivateLoad <sextloadi16>; 272 273 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 274 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; 275 }]>; 276 277 def az_extloadi32_global : PatFrag<(ops node:$ptr), 278 (az_extloadi32 node:$ptr), [{ 279 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 280 }]>; 281 282 def az_extloadi32_flat : PatFrag<(ops node:$ptr), 283 (az_extloadi32 node:$ptr), [{ 284 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 285 }]>; 286 287 def az_extloadi32_constant : PatFrag<(ops node:$ptr), 288 (az_extloadi32 node:$ptr), [{ 289 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 290 }]>; 291 292 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr), 293 (truncstorei8 node:$val, node:$ptr), [{ 294 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 295 }]>; 296 297 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr), 298 (truncstorei16 node:$val, node:$ptr), [{ 299 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 300 }]>; 301 302 def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr), 303 (truncstorei8 node:$val, node:$ptr), [{ 304 return isFlatStore(dyn_cast<StoreSDNode>(N)); 305 }]>; 306 307 def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr), 308 (truncstorei16 node:$val, node:$ptr), [{ 309 return isFlatStore(dyn_cast<StoreSDNode>(N)); 310 }]>; 311 312 def local_store : PatFrag<(ops node:$val, node:$ptr), 313 (store node:$val, node:$ptr), [{ 314 return isLocalStore(dyn_cast<StoreSDNode>(N)); 315 }]>; 316 317 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr), 318 (truncstorei8 node:$val, node:$ptr), [{ 319 return isLocalStore(dyn_cast<StoreSDNode>(N)); 320 }]>; 321 322 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr), 323 (truncstorei16 node:$val, node:$ptr), [{ 324 return isLocalStore(dyn_cast<StoreSDNode>(N)); 325 }]>; 326 327 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 328 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 329 }]>; 330 331 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{ 332 return cast<MemSDNode>(N)->getAlignment() % 8 == 0; 333 }]>; 334 335 def local_load_aligned8bytes : Aligned8Bytes < 336 (ops node:$ptr), (local_load node:$ptr) 337 >; 338 339 def local_store_aligned8bytes : Aligned8Bytes < 340 (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr) 341 >; 342 343 class local_binary_atomic_op<SDNode atomic_op> : 344 PatFrag<(ops node:$ptr, node:$value), 345 (atomic_op node:$ptr, node:$value), [{ 346 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 347 }]>; 348 349 350 def atomic_swap_local : local_binary_atomic_op<atomic_swap>; 351 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>; 352 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>; 353 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>; 354 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>; 355 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>; 356 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>; 357 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>; 358 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>; 359 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>; 360 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>; 361 362 def mskor_global : PatFrag<(ops node:$val, node:$ptr), 363 (AMDGPUstore_mskor node:$val, node:$ptr), [{ 364 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 365 }]>; 366 367 multiclass AtomicCmpSwapLocal <SDNode cmp_swap_node> { 368 369 def _32_local : PatFrag < 370 (ops node:$ptr, node:$cmp, node:$swap), 371 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{ 372 AtomicSDNode *AN = cast<AtomicSDNode>(N); 373 return AN->getMemoryVT() == MVT::i32 && 374 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 375 }]>; 376 377 def _64_local : PatFrag< 378 (ops node:$ptr, node:$cmp, node:$swap), 379 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{ 380 AtomicSDNode *AN = cast<AtomicSDNode>(N); 381 return AN->getMemoryVT() == MVT::i64 && 382 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 383 }]>; 384 } 385 386 defm atomic_cmp_swap : AtomicCmpSwapLocal <atomic_cmp_swap>; 387 388 def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 389 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 390 }]>; 391 392 def flat_store : PatFrag<(ops node:$val, node:$ptr), 393 (store node:$val, node:$ptr), [{ 394 return isFlatStore(dyn_cast<StoreSDNode>(N)); 395 }]>; 396 397 def mskor_flat : PatFrag<(ops node:$val, node:$ptr), 398 (AMDGPUstore_mskor node:$val, node:$ptr), [{ 399 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; 400 }]>; 401 402 class global_binary_atomic_op<SDNode atomic_op> : PatFrag< 403 (ops node:$ptr, node:$value), 404 (atomic_op node:$ptr, node:$value), 405 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}] 406 >; 407 408 def atomic_swap_global : global_binary_atomic_op<atomic_swap>; 409 def atomic_add_global : global_binary_atomic_op<atomic_load_add>; 410 def atomic_and_global : global_binary_atomic_op<atomic_load_and>; 411 def atomic_max_global : global_binary_atomic_op<atomic_load_max>; 412 def atomic_min_global : global_binary_atomic_op<atomic_load_min>; 413 def atomic_or_global : global_binary_atomic_op<atomic_load_or>; 414 def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>; 415 def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>; 416 def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>; 417 def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>; 418 419 //===----------------------------------------------------------------------===// 420 // Misc Pattern Fragments 421 //===----------------------------------------------------------------------===// 422 423 class Constants { 424 int TWO_PI = 0x40c90fdb; 425 int PI = 0x40490fdb; 426 int TWO_PI_INV = 0x3e22f983; 427 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding 428 int FP32_NEG_ONE = 0xbf800000; 429 int FP32_ONE = 0x3f800000; 430 } 431 def CONST : Constants; 432 433 def FP_ZERO : PatLeaf < 434 (fpimm), 435 [{return N->getValueAPF().isZero();}] 436 >; 437 438 def FP_ONE : PatLeaf < 439 (fpimm), 440 [{return N->isExactlyValue(1.0);}] 441 >; 442 443 def FP_HALF : PatLeaf < 444 (fpimm), 445 [{return N->isExactlyValue(0.5);}] 446 >; 447 448 let isCodeGenOnly = 1, isPseudo = 1 in { 449 450 let usesCustomInserter = 1 in { 451 452 class CLAMP <RegisterClass rc> : AMDGPUShaderInst < 453 (outs rc:$dst), 454 (ins rc:$src0), 455 "CLAMP $dst, $src0", 456 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] 457 >; 458 459 class FABS <RegisterClass rc> : AMDGPUShaderInst < 460 (outs rc:$dst), 461 (ins rc:$src0), 462 "FABS $dst, $src0", 463 [(set f32:$dst, (fabs f32:$src0))] 464 >; 465 466 class FNEG <RegisterClass rc> : AMDGPUShaderInst < 467 (outs rc:$dst), 468 (ins rc:$src0), 469 "FNEG $dst, $src0", 470 [(set f32:$dst, (fneg f32:$src0))] 471 >; 472 473 } // usesCustomInserter = 1 474 475 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, 476 ComplexPattern addrPat> { 477 let UseNamedOperandTable = 1 in { 478 479 def RegisterLoad : AMDGPUShaderInst < 480 (outs dstClass:$dst), 481 (ins addrClass:$addr, i32imm:$chan), 482 "RegisterLoad $dst, $addr", 483 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))] 484 > { 485 let isRegisterLoad = 1; 486 } 487 488 def RegisterStore : AMDGPUShaderInst < 489 (outs), 490 (ins dstClass:$val, addrClass:$addr, i32imm:$chan), 491 "RegisterStore $val, $addr", 492 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))] 493 > { 494 let isRegisterStore = 1; 495 } 496 } 497 } 498 499 } // End isCodeGenOnly = 1, isPseudo = 1 500 501 /* Generic helper patterns for intrinsics */ 502 /* -------------------------------------- */ 503 504 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul> 505 : Pat < 506 (fpow f32:$src0, f32:$src1), 507 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0))) 508 >; 509 510 /* Other helper patterns */ 511 /* --------------------- */ 512 513 /* Extract element pattern */ 514 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx, 515 SubRegIndex sub_reg> 516 : Pat< 517 (sub_type (extractelt vec_type:$src, sub_idx)), 518 (EXTRACT_SUBREG $src, sub_reg) 519 >; 520 521 /* Insert element pattern */ 522 class Insert_Element <ValueType elem_type, ValueType vec_type, 523 int sub_idx, SubRegIndex sub_reg> 524 : Pat < 525 (insertelt vec_type:$vec, elem_type:$elem, sub_idx), 526 (INSERT_SUBREG $vec, $elem, sub_reg) 527 >; 528 529 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 530 // can handle COPY instructions. 531 // bitconvert pattern 532 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat < 533 (dt (bitconvert (st rc:$src0))), 534 (dt rc:$src0) 535 >; 536 537 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 538 // can handle COPY instructions. 539 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat < 540 (vt (AMDGPUdwordaddr (vt rc:$addr))), 541 (vt rc:$addr) 542 >; 543 544 // BFI_INT patterns 545 546 multiclass BFIPatterns <Instruction BFI_INT, 547 Instruction LoadImm32, 548 RegisterClass RC64> { 549 // Definition from ISA doc: 550 // (y & x) | (z & ~x) 551 def : Pat < 552 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))), 553 (BFI_INT $x, $y, $z) 554 >; 555 556 // SHA-256 Ch function 557 // z ^ (x & (y ^ z)) 558 def : Pat < 559 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))), 560 (BFI_INT $x, $y, $z) 561 >; 562 563 def : Pat < 564 (fcopysign f32:$src0, f32:$src1), 565 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1) 566 >; 567 568 def : Pat < 569 (f64 (fcopysign f64:$src0, f64:$src1)), 570 (REG_SEQUENCE RC64, 571 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0, 572 (BFI_INT (LoadImm32 0x7fffffff), 573 (i32 (EXTRACT_SUBREG $src0, sub1)), 574 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1) 575 >; 576 } 577 578 // SHA-256 Ma patterns 579 580 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y 581 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat < 582 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))), 583 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y) 584 >; 585 586 // Bitfield extract patterns 587 588 def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{ 589 return isMask_32(N->getZExtValue()); 590 }]>; 591 592 def IMMPopCount : SDNodeXForm<imm, [{ 593 return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N), 594 MVT::i32); 595 }]>; 596 597 class BFEPattern <Instruction BFE, Instruction MOV> : Pat < 598 (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)), 599 (BFE $src, $rshift, (MOV (i32 (IMMPopCount $mask)))) 600 >; 601 602 // rotr pattern 603 class ROTRPattern <Instruction BIT_ALIGN> : Pat < 604 (rotr i32:$src0, i32:$src1), 605 (BIT_ALIGN $src0, $src0, $src1) 606 >; 607 608 // 24-bit arithmetic patterns 609 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; 610 611 // Special conversion patterns 612 613 def cvt_rpi_i32_f32 : PatFrag < 614 (ops node:$src), 615 (fp_to_sint (ffloor (fadd $src, FP_HALF))), 616 [{ (void) N; return TM.Options.NoNaNsFPMath; }] 617 >; 618 619 def cvt_flr_i32_f32 : PatFrag < 620 (ops node:$src), 621 (fp_to_sint (ffloor $src)), 622 [{ (void)N; return TM.Options.NoNaNsFPMath; }] 623 >; 624 625 /* 626 class UMUL24Pattern <Instruction UMUL24> : Pat < 627 (mul U24:$x, U24:$y), 628 (UMUL24 $x, $y) 629 >; 630 */ 631 632 class IMad24Pat<Instruction Inst> : Pat < 633 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2), 634 (Inst $src0, $src1, $src2) 635 >; 636 637 class UMad24Pat<Instruction Inst> : Pat < 638 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2), 639 (Inst $src0, $src1, $src2) 640 >; 641 642 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> { 643 def _expand_imad24 : Pat < 644 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2), 645 (AddInst (MulInst $src0, $src1), $src2) 646 >; 647 648 def _expand_imul24 : Pat < 649 (AMDGPUmul_i24 i32:$src0, i32:$src1), 650 (MulInst $src0, $src1) 651 >; 652 } 653 654 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> { 655 def _expand_umad24 : Pat < 656 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2), 657 (AddInst (MulInst $src0, $src1), $src2) 658 >; 659 660 def _expand_umul24 : Pat < 661 (AMDGPUmul_u24 i32:$src0, i32:$src1), 662 (MulInst $src0, $src1) 663 >; 664 } 665 666 class RcpPat<Instruction RcpInst, ValueType vt> : Pat < 667 (fdiv FP_ONE, vt:$src), 668 (RcpInst $src) 669 >; 670 671 class RsqPat<Instruction RsqInst, ValueType vt> : Pat < 672 (AMDGPUrcp (fsqrt vt:$src)), 673 (RsqInst $src) 674 >; 675 676 include "R600Instructions.td" 677 include "R700Instructions.td" 678 include "EvergreenInstructions.td" 679 include "CaymanInstructions.td" 680 681 include "SIInstrInfo.td" 682 683