1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains instruction defs that are common to all hw codegen 11 // targets. 12 // 13 //===----------------------------------------------------------------------===// 14 15 class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction { 16 field bit isRegisterLoad = 0; 17 field bit isRegisterStore = 0; 18 19 let Namespace = "AMDGPU"; 20 let OutOperandList = outs; 21 let InOperandList = ins; 22 let AsmString = asm; 23 let Pattern = pattern; 24 let Itinerary = NullALU; 25 26 let TSFlags{63} = isRegisterLoad; 27 let TSFlags{62} = isRegisterStore; 28 } 29 30 class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern> 31 : AMDGPUInst<outs, ins, asm, pattern> { 32 33 field bits<32> Inst = 0xffffffff; 34 35 } 36 37 def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">; 38 def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">; 39 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">; 40 41 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>; 42 def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>; 43 44 let OperandType = "OPERAND_IMMEDIATE" in { 45 46 def u32imm : Operand<i32> { 47 let PrintMethod = "printU32ImmOperand"; 48 } 49 50 def u16imm : Operand<i16> { 51 let PrintMethod = "printU16ImmOperand"; 52 } 53 54 def u8imm : Operand<i8> { 55 let PrintMethod = "printU8ImmOperand"; 56 } 57 58 } // End OperandType = "OPERAND_IMMEDIATE" 59 60 //===--------------------------------------------------------------------===// 61 // Custom Operands 62 //===--------------------------------------------------------------------===// 63 def brtarget : Operand<OtherVT>; 64 65 //===----------------------------------------------------------------------===// 66 // PatLeafs for floating-point comparisons 67 //===----------------------------------------------------------------------===// 68 69 def COND_OEQ : PatLeaf < 70 (cond), 71 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}] 72 >; 73 74 def COND_ONE : PatLeaf < 75 (cond), 76 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}] 77 >; 78 79 def COND_OGT : PatLeaf < 80 (cond), 81 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}] 82 >; 83 84 def COND_OGE : PatLeaf < 85 (cond), 86 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}] 87 >; 88 89 def COND_OLT : PatLeaf < 90 (cond), 91 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}] 92 >; 93 94 def COND_OLE : PatLeaf < 95 (cond), 96 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}] 97 >; 98 99 100 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>; 101 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>; 102 103 //===----------------------------------------------------------------------===// 104 // PatLeafs for unsigned / unordered comparisons 105 //===----------------------------------------------------------------------===// 106 107 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>; 108 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>; 109 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>; 110 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>; 111 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>; 112 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>; 113 114 // XXX - For some reason R600 version is preferring to use unordered 115 // for setne? 116 def COND_UNE_NE : PatLeaf < 117 (cond), 118 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}] 119 >; 120 121 //===----------------------------------------------------------------------===// 122 // PatLeafs for signed comparisons 123 //===----------------------------------------------------------------------===// 124 125 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>; 126 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>; 127 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>; 128 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>; 129 130 //===----------------------------------------------------------------------===// 131 // PatLeafs for integer equality 132 //===----------------------------------------------------------------------===// 133 134 def COND_EQ : PatLeaf < 135 (cond), 136 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}] 137 >; 138 139 def COND_NE : PatLeaf < 140 (cond), 141 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}] 142 >; 143 144 def COND_NULL : PatLeaf < 145 (cond), 146 [{(void)N; return false;}] 147 >; 148 149 //===----------------------------------------------------------------------===// 150 // Load/Store Pattern Fragments 151 //===----------------------------------------------------------------------===// 152 153 class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{ 154 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; 155 }]>; 156 157 class PrivateLoad <SDPatternOperator op> : PrivateMemOp < 158 (ops node:$ptr), (op node:$ptr) 159 >; 160 161 class PrivateStore <SDPatternOperator op> : PrivateMemOp < 162 (ops node:$value, node:$ptr), (op node:$value, node:$ptr) 163 >; 164 165 def load_private : PrivateLoad <load>; 166 167 def truncstorei8_private : PrivateStore <truncstorei8>; 168 def truncstorei16_private : PrivateStore <truncstorei16>; 169 def store_private : PrivateStore <store>; 170 171 def global_store : PatFrag<(ops node:$val, node:$ptr), 172 (store node:$val, node:$ptr), [{ 173 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 174 }]>; 175 176 // Global address space loads 177 def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 178 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 179 }]>; 180 181 // Constant address space loads 182 def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 183 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 184 }]>; 185 186 def az_extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{ 187 LoadSDNode *L = cast<LoadSDNode>(N); 188 return L->getExtensionType() == ISD::ZEXTLOAD || 189 L->getExtensionType() == ISD::EXTLOAD; 190 }]>; 191 192 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 193 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8; 194 }]>; 195 196 def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 197 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 198 }]>; 199 200 def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 201 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 202 }]>; 203 204 def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 205 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 206 }]>; 207 208 def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 209 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 210 }]>; 211 212 def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 213 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 214 }]>; 215 216 def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 217 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 218 }]>; 219 220 def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{ 221 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 222 }]>; 223 224 def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{ 225 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 226 }]>; 227 228 def extloadi8_private : PrivateLoad <az_extloadi8>; 229 def sextloadi8_private : PrivateLoad <sextloadi8>; 230 231 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 232 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16; 233 }]>; 234 235 def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 236 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 237 }]>; 238 239 def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 240 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 241 }]>; 242 243 def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 244 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 245 }]>; 246 247 def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 248 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 249 }]>; 250 251 def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 252 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 253 }]>; 254 255 def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 256 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 257 }]>; 258 259 def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{ 260 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 261 }]>; 262 263 def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{ 264 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 265 }]>; 266 267 def extloadi16_private : PrivateLoad <az_extloadi16>; 268 def sextloadi16_private : PrivateLoad <sextloadi16>; 269 270 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ 271 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; 272 }]>; 273 274 def az_extloadi32_global : PatFrag<(ops node:$ptr), 275 (az_extloadi32 node:$ptr), [{ 276 return isGlobalLoad(dyn_cast<LoadSDNode>(N)); 277 }]>; 278 279 def az_extloadi32_flat : PatFrag<(ops node:$ptr), 280 (az_extloadi32 node:$ptr), [{ 281 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 282 }]>; 283 284 def az_extloadi32_constant : PatFrag<(ops node:$ptr), 285 (az_extloadi32 node:$ptr), [{ 286 return isConstantLoad(dyn_cast<LoadSDNode>(N), -1); 287 }]>; 288 289 def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr), 290 (truncstorei8 node:$val, node:$ptr), [{ 291 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 292 }]>; 293 294 def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr), 295 (truncstorei16 node:$val, node:$ptr), [{ 296 return isGlobalStore(dyn_cast<StoreSDNode>(N)); 297 }]>; 298 299 def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr), 300 (truncstorei8 node:$val, node:$ptr), [{ 301 return isFlatStore(dyn_cast<StoreSDNode>(N)); 302 }]>; 303 304 def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr), 305 (truncstorei16 node:$val, node:$ptr), [{ 306 return isFlatStore(dyn_cast<StoreSDNode>(N)); 307 }]>; 308 309 def local_store : PatFrag<(ops node:$val, node:$ptr), 310 (store node:$val, node:$ptr), [{ 311 return isLocalStore(dyn_cast<StoreSDNode>(N)); 312 }]>; 313 314 def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr), 315 (truncstorei8 node:$val, node:$ptr), [{ 316 return isLocalStore(dyn_cast<StoreSDNode>(N)); 317 }]>; 318 319 def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr), 320 (truncstorei16 node:$val, node:$ptr), [{ 321 return isLocalStore(dyn_cast<StoreSDNode>(N)); 322 }]>; 323 324 def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 325 return isLocalLoad(dyn_cast<LoadSDNode>(N)); 326 }]>; 327 328 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{ 329 return cast<MemSDNode>(N)->getAlignment() % 8 == 0; 330 }]>; 331 332 def local_load_aligned8bytes : Aligned8Bytes < 333 (ops node:$ptr), (local_load node:$ptr) 334 >; 335 336 def local_store_aligned8bytes : Aligned8Bytes < 337 (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr) 338 >; 339 340 class local_binary_atomic_op<SDNode atomic_op> : 341 PatFrag<(ops node:$ptr, node:$value), 342 (atomic_op node:$ptr, node:$value), [{ 343 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 344 }]>; 345 346 347 def atomic_swap_local : local_binary_atomic_op<atomic_swap>; 348 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>; 349 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>; 350 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>; 351 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>; 352 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>; 353 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>; 354 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>; 355 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>; 356 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>; 357 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>; 358 359 def mskor_global : PatFrag<(ops node:$val, node:$ptr), 360 (AMDGPUstore_mskor node:$val, node:$ptr), [{ 361 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; 362 }]>; 363 364 365 def atomic_cmp_swap_32_local : 366 PatFrag<(ops node:$ptr, node:$cmp, node:$swap), 367 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ 368 AtomicSDNode *AN = cast<AtomicSDNode>(N); 369 return AN->getMemoryVT() == MVT::i32 && 370 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 371 }]>; 372 373 def atomic_cmp_swap_64_local : 374 PatFrag<(ops node:$ptr, node:$cmp, node:$swap), 375 (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{ 376 AtomicSDNode *AN = cast<AtomicSDNode>(N); 377 return AN->getMemoryVT() == MVT::i64 && 378 AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 379 }]>; 380 381 def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{ 382 return isFlatLoad(dyn_cast<LoadSDNode>(N)); 383 }]>; 384 385 def flat_store : PatFrag<(ops node:$val, node:$ptr), 386 (store node:$val, node:$ptr), [{ 387 return isFlatStore(dyn_cast<StoreSDNode>(N)); 388 }]>; 389 390 def mskor_flat : PatFrag<(ops node:$val, node:$ptr), 391 (AMDGPUstore_mskor node:$val, node:$ptr), [{ 392 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS; 393 }]>; 394 395 class global_binary_atomic_op<SDNode atomic_op> : PatFrag< 396 (ops node:$ptr, node:$value), 397 (atomic_op node:$ptr, node:$value), 398 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}] 399 >; 400 401 def atomic_swap_global : global_binary_atomic_op<atomic_swap>; 402 def atomic_add_global : global_binary_atomic_op<atomic_load_add>; 403 def atomic_and_global : global_binary_atomic_op<atomic_load_and>; 404 def atomic_max_global : global_binary_atomic_op<atomic_load_max>; 405 def atomic_min_global : global_binary_atomic_op<atomic_load_min>; 406 def atomic_or_global : global_binary_atomic_op<atomic_load_or>; 407 def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>; 408 def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>; 409 def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>; 410 def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>; 411 412 //===----------------------------------------------------------------------===// 413 // Misc Pattern Fragments 414 //===----------------------------------------------------------------------===// 415 416 class Constants { 417 int TWO_PI = 0x40c90fdb; 418 int PI = 0x40490fdb; 419 int TWO_PI_INV = 0x3e22f983; 420 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding 421 int FP32_NEG_ONE = 0xbf800000; 422 int FP32_ONE = 0x3f800000; 423 } 424 def CONST : Constants; 425 426 def FP_ZERO : PatLeaf < 427 (fpimm), 428 [{return N->getValueAPF().isZero();}] 429 >; 430 431 def FP_ONE : PatLeaf < 432 (fpimm), 433 [{return N->isExactlyValue(1.0);}] 434 >; 435 436 def FP_HALF : PatLeaf < 437 (fpimm), 438 [{return N->isExactlyValue(0.5);}] 439 >; 440 441 let isCodeGenOnly = 1, isPseudo = 1 in { 442 443 let usesCustomInserter = 1 in { 444 445 class CLAMP <RegisterClass rc> : AMDGPUShaderInst < 446 (outs rc:$dst), 447 (ins rc:$src0), 448 "CLAMP $dst, $src0", 449 [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))] 450 >; 451 452 class FABS <RegisterClass rc> : AMDGPUShaderInst < 453 (outs rc:$dst), 454 (ins rc:$src0), 455 "FABS $dst, $src0", 456 [(set f32:$dst, (fabs f32:$src0))] 457 >; 458 459 class FNEG <RegisterClass rc> : AMDGPUShaderInst < 460 (outs rc:$dst), 461 (ins rc:$src0), 462 "FNEG $dst, $src0", 463 [(set f32:$dst, (fneg f32:$src0))] 464 >; 465 466 } // usesCustomInserter = 1 467 468 multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass, 469 ComplexPattern addrPat> { 470 let UseNamedOperandTable = 1 in { 471 472 def RegisterLoad : AMDGPUShaderInst < 473 (outs dstClass:$dst), 474 (ins addrClass:$addr, i32imm:$chan), 475 "RegisterLoad $dst, $addr", 476 [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))] 477 > { 478 let isRegisterLoad = 1; 479 } 480 481 def RegisterStore : AMDGPUShaderInst < 482 (outs), 483 (ins dstClass:$val, addrClass:$addr, i32imm:$chan), 484 "RegisterStore $val, $addr", 485 [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))] 486 > { 487 let isRegisterStore = 1; 488 } 489 } 490 } 491 492 } // End isCodeGenOnly = 1, isPseudo = 1 493 494 /* Generic helper patterns for intrinsics */ 495 /* -------------------------------------- */ 496 497 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul> 498 : Pat < 499 (fpow f32:$src0, f32:$src1), 500 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0))) 501 >; 502 503 /* Other helper patterns */ 504 /* --------------------- */ 505 506 /* Extract element pattern */ 507 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx, 508 SubRegIndex sub_reg> 509 : Pat< 510 (sub_type (vector_extract vec_type:$src, sub_idx)), 511 (EXTRACT_SUBREG $src, sub_reg) 512 >; 513 514 /* Insert element pattern */ 515 class Insert_Element <ValueType elem_type, ValueType vec_type, 516 int sub_idx, SubRegIndex sub_reg> 517 : Pat < 518 (vector_insert vec_type:$vec, elem_type:$elem, sub_idx), 519 (INSERT_SUBREG $vec, $elem, sub_reg) 520 >; 521 522 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 523 // can handle COPY instructions. 524 // bitconvert pattern 525 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat < 526 (dt (bitconvert (st rc:$src0))), 527 (dt rc:$src0) 528 >; 529 530 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer 531 // can handle COPY instructions. 532 class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat < 533 (vt (AMDGPUdwordaddr (vt rc:$addr))), 534 (vt rc:$addr) 535 >; 536 537 // BFI_INT patterns 538 539 multiclass BFIPatterns <Instruction BFI_INT, 540 Instruction LoadImm32, 541 RegisterClass RC64> { 542 // Definition from ISA doc: 543 // (y & x) | (z & ~x) 544 def : Pat < 545 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))), 546 (BFI_INT $x, $y, $z) 547 >; 548 549 // SHA-256 Ch function 550 // z ^ (x & (y ^ z)) 551 def : Pat < 552 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))), 553 (BFI_INT $x, $y, $z) 554 >; 555 556 def : Pat < 557 (fcopysign f32:$src0, f32:$src1), 558 (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1) 559 >; 560 561 def : Pat < 562 (f64 (fcopysign f64:$src0, f64:$src1)), 563 (REG_SEQUENCE RC64, 564 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0, 565 (BFI_INT (LoadImm32 0x7fffffff), 566 (i32 (EXTRACT_SUBREG $src0, sub1)), 567 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1) 568 >; 569 } 570 571 // SHA-256 Ma patterns 572 573 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y 574 class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat < 575 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))), 576 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y) 577 >; 578 579 // Bitfield extract patterns 580 581 def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{ 582 return isMask_32(N->getZExtValue()); 583 }]>; 584 585 def IMMPopCount : SDNodeXForm<imm, [{ 586 return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), 587 MVT::i32); 588 }]>; 589 590 class BFEPattern <Instruction BFE, Instruction MOV> : Pat < 591 (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)), 592 (BFE $src, $rshift, (MOV (i32 (IMMPopCount $mask)))) 593 >; 594 595 // rotr pattern 596 class ROTRPattern <Instruction BIT_ALIGN> : Pat < 597 (rotr i32:$src0, i32:$src1), 598 (BIT_ALIGN $src0, $src0, $src1) 599 >; 600 601 // 24-bit arithmetic patterns 602 def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>; 603 604 // Special conversion patterns 605 606 def cvt_rpi_i32_f32 : PatFrag < 607 (ops node:$src), 608 (fp_to_sint (ffloor (fadd $src, FP_HALF))), 609 [{ (void) N; return TM.Options.NoNaNsFPMath; }] 610 >; 611 612 def cvt_flr_i32_f32 : PatFrag < 613 (ops node:$src), 614 (fp_to_sint (ffloor $src)), 615 [{ (void)N; return TM.Options.NoNaNsFPMath; }] 616 >; 617 618 /* 619 class UMUL24Pattern <Instruction UMUL24> : Pat < 620 (mul U24:$x, U24:$y), 621 (UMUL24 $x, $y) 622 >; 623 */ 624 625 class IMad24Pat<Instruction Inst> : Pat < 626 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2), 627 (Inst $src0, $src1, $src2) 628 >; 629 630 class UMad24Pat<Instruction Inst> : Pat < 631 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2), 632 (Inst $src0, $src1, $src2) 633 >; 634 635 multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> { 636 def _expand_imad24 : Pat < 637 (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2), 638 (AddInst (MulInst $src0, $src1), $src2) 639 >; 640 641 def _expand_imul24 : Pat < 642 (AMDGPUmul_i24 i32:$src0, i32:$src1), 643 (MulInst $src0, $src1) 644 >; 645 } 646 647 multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> { 648 def _expand_umad24 : Pat < 649 (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2), 650 (AddInst (MulInst $src0, $src1), $src2) 651 >; 652 653 def _expand_umul24 : Pat < 654 (AMDGPUmul_u24 i32:$src0, i32:$src1), 655 (MulInst $src0, $src1) 656 >; 657 } 658 659 class RcpPat<Instruction RcpInst, ValueType vt> : Pat < 660 (fdiv FP_ONE, vt:$src), 661 (RcpInst $src) 662 >; 663 664 class RsqPat<Instruction RsqInst, ValueType vt> : Pat < 665 (AMDGPUrcp (fsqrt vt:$src)), 666 (RsqInst $src) 667 >; 668 669 include "R600Instructions.td" 670 include "R700Instructions.td" 671 include "EvergreenInstructions.td" 672 include "CaymanInstructions.td" 673 674 include "SIInstrInfo.td" 675 676