1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZISelLowering.h" 15 #include "SystemZCallingConv.h" 16 #include "SystemZConstantPoolValue.h" 17 #include "SystemZMachineFunctionInfo.h" 18 #include "SystemZTargetMachine.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 23 #include <cctype> 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "systemz-lower" 28 29 namespace { 30 // Represents a sequence for extracting a 0/1 value from an IPM result: 31 // (((X ^ XORValue) + AddValue) >> Bit) 32 struct IPMConversion { 33 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 34 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 35 36 int64_t XORValue; 37 int64_t AddValue; 38 unsigned Bit; 39 }; 40 41 // Represents information about a comparison. 42 struct Comparison { 43 Comparison(SDValue Op0In, SDValue Op1In) 44 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {} 45 46 // The operands to the comparison. 47 SDValue Op0, Op1; 48 49 // The opcode that should be used to compare Op0 and Op1. 50 unsigned Opcode; 51 52 // A SystemZICMP value. Only used for integer comparisons. 53 unsigned ICmpType; 54 55 // The mask of CC values that Opcode can produce. 56 unsigned CCValid; 57 58 // The mask of CC values for which the original condition is true. 59 unsigned CCMask; 60 }; 61 } // end anonymous namespace 62 63 // Classify VT as either 32 or 64 bit. 64 static bool is32Bit(EVT VT) { 65 switch (VT.getSimpleVT().SimpleTy) { 66 case MVT::i32: 67 return true; 68 case MVT::i64: 69 return false; 70 default: 71 llvm_unreachable("Unsupported type"); 72 } 73 } 74 75 // Return a version of MachineOperand that can be safely used before the 76 // final use. 77 static MachineOperand earlyUseOperand(MachineOperand Op) { 78 if (Op.isReg()) 79 Op.setIsKill(false); 80 return Op; 81 } 82 83 SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm) 84 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 85 Subtarget(tm.getSubtarget<SystemZSubtarget>()) { 86 MVT PtrVT = getPointerTy(); 87 88 // Set up the register classes. 89 if (Subtarget.hasHighWord()) 90 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass); 91 else 92 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 93 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 94 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 95 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 96 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 97 98 // Compute derived properties from the register classes 99 computeRegisterProperties(); 100 101 // Set up special registers. 102 setExceptionPointerRegister(SystemZ::R6D); 103 setExceptionSelectorRegister(SystemZ::R7D); 104 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 105 106 // TODO: It may be better to default to latency-oriented scheduling, however 107 // LLVM's current latency-oriented scheduler can't handle physreg definitions 108 // such as SystemZ has with CC, so set this to the register-pressure 109 // scheduler, because it can. 110 setSchedulingPreference(Sched::RegPressure); 111 112 setBooleanContents(ZeroOrOneBooleanContent); 113 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 114 115 // Instructions are strings of 2-byte aligned 2-byte values. 116 setMinFunctionAlignment(2); 117 118 // Handle operations that are handled in a similar way for all types. 119 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 120 I <= MVT::LAST_FP_VALUETYPE; 121 ++I) { 122 MVT VT = MVT::SimpleValueType(I); 123 if (isTypeLegal(VT)) { 124 // Lower SET_CC into an IPM-based sequence. 125 setOperationAction(ISD::SETCC, VT, Custom); 126 127 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 128 setOperationAction(ISD::SELECT, VT, Expand); 129 130 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 131 setOperationAction(ISD::SELECT_CC, VT, Custom); 132 setOperationAction(ISD::BR_CC, VT, Custom); 133 } 134 } 135 136 // Expand jump table branches as address arithmetic followed by an 137 // indirect jump. 138 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 139 140 // Expand BRCOND into a BR_CC (see above). 141 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 142 143 // Handle integer types. 144 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 145 I <= MVT::LAST_INTEGER_VALUETYPE; 146 ++I) { 147 MVT VT = MVT::SimpleValueType(I); 148 if (isTypeLegal(VT)) { 149 // Expand individual DIV and REMs into DIVREMs. 150 setOperationAction(ISD::SDIV, VT, Expand); 151 setOperationAction(ISD::UDIV, VT, Expand); 152 setOperationAction(ISD::SREM, VT, Expand); 153 setOperationAction(ISD::UREM, VT, Expand); 154 setOperationAction(ISD::SDIVREM, VT, Custom); 155 setOperationAction(ISD::UDIVREM, VT, Custom); 156 157 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and 158 // stores, putting a serialization instruction after the stores. 159 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom); 160 setOperationAction(ISD::ATOMIC_STORE, VT, Custom); 161 162 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are 163 // available, or if the operand is constant. 164 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); 165 166 // No special instructions for these. 167 setOperationAction(ISD::CTPOP, VT, Expand); 168 setOperationAction(ISD::CTTZ, VT, Expand); 169 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 170 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 171 setOperationAction(ISD::ROTR, VT, Expand); 172 173 // Use *MUL_LOHI where possible instead of MULH*. 174 setOperationAction(ISD::MULHS, VT, Expand); 175 setOperationAction(ISD::MULHU, VT, Expand); 176 setOperationAction(ISD::SMUL_LOHI, VT, Custom); 177 setOperationAction(ISD::UMUL_LOHI, VT, Custom); 178 179 // Only z196 and above have native support for conversions to unsigned. 180 if (!Subtarget.hasFPExtension()) 181 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 182 } 183 } 184 185 // Type legalization will convert 8- and 16-bit atomic operations into 186 // forms that operate on i32s (but still keeping the original memory VT). 187 // Lower them into full i32 operations. 188 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 189 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 190 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 191 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 192 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 193 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 194 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 195 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 196 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 197 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 198 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 199 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 200 201 // z10 has instructions for signed but not unsigned FP conversion. 202 // Handle unsigned 32-bit types as signed 64-bit types. 203 if (!Subtarget.hasFPExtension()) { 204 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 205 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 206 } 207 208 // We have native support for a 64-bit CTLZ, via FLOGR. 209 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 210 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 211 212 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 213 setOperationAction(ISD::OR, MVT::i64, Custom); 214 215 // FIXME: Can we support these natively? 216 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 217 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 218 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 219 220 // We have native instructions for i8, i16 and i32 extensions, but not i1. 221 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 222 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 223 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 224 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 225 226 // Handle the various types of symbolic address. 227 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 228 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 229 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 230 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 231 setOperationAction(ISD::JumpTable, PtrVT, Custom); 232 233 // We need to handle dynamic allocations specially because of the 234 // 160-byte area at the bottom of the stack. 235 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 236 237 // Use custom expanders so that we can force the function to use 238 // a frame pointer. 239 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 240 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 241 242 // Handle prefetches with PFD or PFDRL. 243 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 244 245 // Handle floating-point types. 246 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 247 I <= MVT::LAST_FP_VALUETYPE; 248 ++I) { 249 MVT VT = MVT::SimpleValueType(I); 250 if (isTypeLegal(VT)) { 251 // We can use FI for FRINT. 252 setOperationAction(ISD::FRINT, VT, Legal); 253 254 // We can use the extended form of FI for other rounding operations. 255 if (Subtarget.hasFPExtension()) { 256 setOperationAction(ISD::FNEARBYINT, VT, Legal); 257 setOperationAction(ISD::FFLOOR, VT, Legal); 258 setOperationAction(ISD::FCEIL, VT, Legal); 259 setOperationAction(ISD::FTRUNC, VT, Legal); 260 setOperationAction(ISD::FROUND, VT, Legal); 261 } 262 263 // No special instructions for these. 264 setOperationAction(ISD::FSIN, VT, Expand); 265 setOperationAction(ISD::FCOS, VT, Expand); 266 setOperationAction(ISD::FREM, VT, Expand); 267 } 268 } 269 270 // We have fused multiply-addition for f32 and f64 but not f128. 271 setOperationAction(ISD::FMA, MVT::f32, Legal); 272 setOperationAction(ISD::FMA, MVT::f64, Legal); 273 setOperationAction(ISD::FMA, MVT::f128, Expand); 274 275 // Needed so that we don't try to implement f128 constant loads using 276 // a load-and-extend of a f80 constant (in cases where the constant 277 // would fit in an f80). 278 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 279 280 // Floating-point truncation and stores need to be done separately. 281 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 282 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 283 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 284 285 // We have 64-bit FPR<->GPR moves, but need special handling for 286 // 32-bit forms. 287 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 288 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 289 290 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 291 // structure, but VAEND is a no-op. 292 setOperationAction(ISD::VASTART, MVT::Other, Custom); 293 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 294 setOperationAction(ISD::VAEND, MVT::Other, Expand); 295 296 // Codes for which we want to perform some z-specific combinations. 297 setTargetDAGCombine(ISD::SIGN_EXTEND); 298 299 // We want to use MVC in preference to even a single load/store pair. 300 MaxStoresPerMemcpy = 0; 301 MaxStoresPerMemcpyOptSize = 0; 302 303 // The main memset sequence is a byte store followed by an MVC. 304 // Two STC or MV..I stores win over that, but the kind of fused stores 305 // generated by target-independent code don't when the byte value is 306 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 307 // than "STC;MVC". Handle the choice in target-specific code instead. 308 MaxStoresPerMemset = 0; 309 MaxStoresPerMemsetOptSize = 0; 310 } 311 312 EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 313 if (!VT.isVector()) 314 return MVT::i32; 315 return VT.changeVectorElementTypeToInteger(); 316 } 317 318 bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 319 VT = VT.getScalarType(); 320 321 if (!VT.isSimple()) 322 return false; 323 324 switch (VT.getSimpleVT().SimpleTy) { 325 case MVT::f32: 326 case MVT::f64: 327 return true; 328 case MVT::f128: 329 return false; 330 default: 331 break; 332 } 333 334 return false; 335 } 336 337 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 338 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 339 return Imm.isZero() || Imm.isNegZero(); 340 } 341 342 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 343 unsigned, 344 bool *Fast) const { 345 // Unaligned accesses should never be slower than the expanded version. 346 // We check specifically for aligned accesses in the few cases where 347 // they are required. 348 if (Fast) 349 *Fast = true; 350 return true; 351 } 352 353 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 354 Type *Ty) const { 355 // Punt on globals for now, although they can be used in limited 356 // RELATIVE LONG cases. 357 if (AM.BaseGV) 358 return false; 359 360 // Require a 20-bit signed offset. 361 if (!isInt<20>(AM.BaseOffs)) 362 return false; 363 364 // Indexing is OK but no scale factor can be applied. 365 return AM.Scale == 0 || AM.Scale == 1; 366 } 367 368 bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { 369 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) 370 return false; 371 unsigned FromBits = FromType->getPrimitiveSizeInBits(); 372 unsigned ToBits = ToType->getPrimitiveSizeInBits(); 373 return FromBits > ToBits; 374 } 375 376 bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { 377 if (!FromVT.isInteger() || !ToVT.isInteger()) 378 return false; 379 unsigned FromBits = FromVT.getSizeInBits(); 380 unsigned ToBits = ToVT.getSizeInBits(); 381 return FromBits > ToBits; 382 } 383 384 //===----------------------------------------------------------------------===// 385 // Inline asm support 386 //===----------------------------------------------------------------------===// 387 388 TargetLowering::ConstraintType 389 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 390 if (Constraint.size() == 1) { 391 switch (Constraint[0]) { 392 case 'a': // Address register 393 case 'd': // Data register (equivalent to 'r') 394 case 'f': // Floating-point register 395 case 'h': // High-part register 396 case 'r': // General-purpose register 397 return C_RegisterClass; 398 399 case 'Q': // Memory with base and unsigned 12-bit displacement 400 case 'R': // Likewise, plus an index 401 case 'S': // Memory with base and signed 20-bit displacement 402 case 'T': // Likewise, plus an index 403 case 'm': // Equivalent to 'T'. 404 return C_Memory; 405 406 case 'I': // Unsigned 8-bit constant 407 case 'J': // Unsigned 12-bit constant 408 case 'K': // Signed 16-bit constant 409 case 'L': // Signed 20-bit displacement (on all targets we support) 410 case 'M': // 0x7fffffff 411 return C_Other; 412 413 default: 414 break; 415 } 416 } 417 return TargetLowering::getConstraintType(Constraint); 418 } 419 420 TargetLowering::ConstraintWeight SystemZTargetLowering:: 421 getSingleConstraintMatchWeight(AsmOperandInfo &info, 422 const char *constraint) const { 423 ConstraintWeight weight = CW_Invalid; 424 Value *CallOperandVal = info.CallOperandVal; 425 // If we don't have a value, we can't do a match, 426 // but allow it at the lowest weight. 427 if (!CallOperandVal) 428 return CW_Default; 429 Type *type = CallOperandVal->getType(); 430 // Look at the constraint type. 431 switch (*constraint) { 432 default: 433 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 434 break; 435 436 case 'a': // Address register 437 case 'd': // Data register (equivalent to 'r') 438 case 'h': // High-part register 439 case 'r': // General-purpose register 440 if (CallOperandVal->getType()->isIntegerTy()) 441 weight = CW_Register; 442 break; 443 444 case 'f': // Floating-point register 445 if (type->isFloatingPointTy()) 446 weight = CW_Register; 447 break; 448 449 case 'I': // Unsigned 8-bit constant 450 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 451 if (isUInt<8>(C->getZExtValue())) 452 weight = CW_Constant; 453 break; 454 455 case 'J': // Unsigned 12-bit constant 456 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 457 if (isUInt<12>(C->getZExtValue())) 458 weight = CW_Constant; 459 break; 460 461 case 'K': // Signed 16-bit constant 462 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 463 if (isInt<16>(C->getSExtValue())) 464 weight = CW_Constant; 465 break; 466 467 case 'L': // Signed 20-bit displacement (on all targets we support) 468 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 469 if (isInt<20>(C->getSExtValue())) 470 weight = CW_Constant; 471 break; 472 473 case 'M': // 0x7fffffff 474 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) 475 if (C->getZExtValue() == 0x7fffffff) 476 weight = CW_Constant; 477 break; 478 } 479 return weight; 480 } 481 482 // Parse a "{tNNN}" register constraint for which the register type "t" 483 // has already been verified. MC is the class associated with "t" and 484 // Map maps 0-based register numbers to LLVM register numbers. 485 static std::pair<unsigned, const TargetRegisterClass *> 486 parseRegisterNumber(const std::string &Constraint, 487 const TargetRegisterClass *RC, const unsigned *Map) { 488 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 489 if (isdigit(Constraint[2])) { 490 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 491 unsigned Index = atoi(Suffix.c_str()); 492 if (Index < 16 && Map[Index]) 493 return std::make_pair(Map[Index], RC); 494 } 495 return std::make_pair(0U, nullptr); 496 } 497 498 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 499 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 500 if (Constraint.size() == 1) { 501 // GCC Constraint Letters 502 switch (Constraint[0]) { 503 default: break; 504 case 'd': // Data register (equivalent to 'r') 505 case 'r': // General-purpose register 506 if (VT == MVT::i64) 507 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 508 else if (VT == MVT::i128) 509 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 510 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 511 512 case 'a': // Address register 513 if (VT == MVT::i64) 514 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 515 else if (VT == MVT::i128) 516 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 517 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 518 519 case 'h': // High-part register (an LLVM extension) 520 return std::make_pair(0U, &SystemZ::GRH32BitRegClass); 521 522 case 'f': // Floating-point register 523 if (VT == MVT::f64) 524 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 525 else if (VT == MVT::f128) 526 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 527 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 528 } 529 } 530 if (Constraint[0] == '{') { 531 // We need to override the default register parsing for GPRs and FPRs 532 // because the interpretation depends on VT. The internal names of 533 // the registers are also different from the external names 534 // (F0D and F0S instead of F0, etc.). 535 if (Constraint[1] == 'r') { 536 if (VT == MVT::i32) 537 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 538 SystemZMC::GR32Regs); 539 if (VT == MVT::i128) 540 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 541 SystemZMC::GR128Regs); 542 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 543 SystemZMC::GR64Regs); 544 } 545 if (Constraint[1] == 'f') { 546 if (VT == MVT::f32) 547 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 548 SystemZMC::FP32Regs); 549 if (VT == MVT::f128) 550 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 551 SystemZMC::FP128Regs); 552 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 553 SystemZMC::FP64Regs); 554 } 555 } 556 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 557 } 558 559 void SystemZTargetLowering:: 560 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 561 std::vector<SDValue> &Ops, 562 SelectionDAG &DAG) const { 563 // Only support length 1 constraints for now. 564 if (Constraint.length() == 1) { 565 switch (Constraint[0]) { 566 case 'I': // Unsigned 8-bit constant 567 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 568 if (isUInt<8>(C->getZExtValue())) 569 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 570 Op.getValueType())); 571 return; 572 573 case 'J': // Unsigned 12-bit constant 574 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 575 if (isUInt<12>(C->getZExtValue())) 576 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 577 Op.getValueType())); 578 return; 579 580 case 'K': // Signed 16-bit constant 581 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 582 if (isInt<16>(C->getSExtValue())) 583 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 584 Op.getValueType())); 585 return; 586 587 case 'L': // Signed 20-bit displacement (on all targets we support) 588 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 589 if (isInt<20>(C->getSExtValue())) 590 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 591 Op.getValueType())); 592 return; 593 594 case 'M': // 0x7fffffff 595 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 596 if (C->getZExtValue() == 0x7fffffff) 597 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 598 Op.getValueType())); 599 return; 600 } 601 } 602 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 603 } 604 605 //===----------------------------------------------------------------------===// 606 // Calling conventions 607 //===----------------------------------------------------------------------===// 608 609 #include "SystemZGenCallingConv.inc" 610 611 bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType, 612 Type *ToType) const { 613 return isTruncateFree(FromType, ToType); 614 } 615 616 bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 617 if (!CI->isTailCall()) 618 return false; 619 return true; 620 } 621 622 // Value is a value that has been passed to us in the location described by VA 623 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 624 // any loads onto Chain. 625 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 626 CCValAssign &VA, SDValue Chain, 627 SDValue Value) { 628 // If the argument has been promoted from a smaller type, insert an 629 // assertion to capture this. 630 if (VA.getLocInfo() == CCValAssign::SExt) 631 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 632 DAG.getValueType(VA.getValVT())); 633 else if (VA.getLocInfo() == CCValAssign::ZExt) 634 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 635 DAG.getValueType(VA.getValVT())); 636 637 if (VA.isExtInLoc()) 638 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 639 else if (VA.getLocInfo() == CCValAssign::Indirect) 640 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 641 MachinePointerInfo(), false, false, false, 0); 642 else 643 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 644 return Value; 645 } 646 647 // Value is a value of type VA.getValVT() that we need to copy into 648 // the location described by VA. Return a copy of Value converted to 649 // VA.getValVT(). The caller is responsible for handling indirect values. 650 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 651 CCValAssign &VA, SDValue Value) { 652 switch (VA.getLocInfo()) { 653 case CCValAssign::SExt: 654 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 655 case CCValAssign::ZExt: 656 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 657 case CCValAssign::AExt: 658 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 659 case CCValAssign::Full: 660 return Value; 661 default: 662 llvm_unreachable("Unhandled getLocInfo()"); 663 } 664 } 665 666 SDValue SystemZTargetLowering:: 667 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 668 const SmallVectorImpl<ISD::InputArg> &Ins, 669 SDLoc DL, SelectionDAG &DAG, 670 SmallVectorImpl<SDValue> &InVals) const { 671 MachineFunction &MF = DAG.getMachineFunction(); 672 MachineFrameInfo *MFI = MF.getFrameInfo(); 673 MachineRegisterInfo &MRI = MF.getRegInfo(); 674 SystemZMachineFunctionInfo *FuncInfo = 675 MF.getInfo<SystemZMachineFunctionInfo>(); 676 auto *TFL = static_cast<const SystemZFrameLowering *>( 677 DAG.getTarget().getFrameLowering()); 678 679 // Assign locations to all of the incoming arguments. 680 SmallVector<CCValAssign, 16> ArgLocs; 681 CCState CCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs, 682 *DAG.getContext()); 683 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 684 685 unsigned NumFixedGPRs = 0; 686 unsigned NumFixedFPRs = 0; 687 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 688 SDValue ArgValue; 689 CCValAssign &VA = ArgLocs[I]; 690 EVT LocVT = VA.getLocVT(); 691 if (VA.isRegLoc()) { 692 // Arguments passed in registers 693 const TargetRegisterClass *RC; 694 switch (LocVT.getSimpleVT().SimpleTy) { 695 default: 696 // Integers smaller than i64 should be promoted to i64. 697 llvm_unreachable("Unexpected argument type"); 698 case MVT::i32: 699 NumFixedGPRs += 1; 700 RC = &SystemZ::GR32BitRegClass; 701 break; 702 case MVT::i64: 703 NumFixedGPRs += 1; 704 RC = &SystemZ::GR64BitRegClass; 705 break; 706 case MVT::f32: 707 NumFixedFPRs += 1; 708 RC = &SystemZ::FP32BitRegClass; 709 break; 710 case MVT::f64: 711 NumFixedFPRs += 1; 712 RC = &SystemZ::FP64BitRegClass; 713 break; 714 } 715 716 unsigned VReg = MRI.createVirtualRegister(RC); 717 MRI.addLiveIn(VA.getLocReg(), VReg); 718 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 719 } else { 720 assert(VA.isMemLoc() && "Argument not register or memory"); 721 722 // Create the frame index object for this incoming parameter. 723 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 724 VA.getLocMemOffset(), true); 725 726 // Create the SelectionDAG nodes corresponding to a load 727 // from this parameter. Unpromoted ints and floats are 728 // passed as right-justified 8-byte values. 729 EVT PtrVT = getPointerTy(); 730 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 731 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 732 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 733 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 734 MachinePointerInfo::getFixedStack(FI), 735 false, false, false, 0); 736 } 737 738 // Convert the value of the argument register into the value that's 739 // being passed. 740 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 741 } 742 743 if (IsVarArg) { 744 // Save the number of non-varargs registers for later use by va_start, etc. 745 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 746 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 747 748 // Likewise the address (in the form of a frame index) of where the 749 // first stack vararg would be. The 1-byte size here is arbitrary. 750 int64_t StackSize = CCInfo.getNextStackOffset(); 751 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 752 753 // ...and a similar frame index for the caller-allocated save area 754 // that will be used to store the incoming registers. 755 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 756 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 757 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 758 759 // Store the FPR varargs in the reserved frame slots. (We store the 760 // GPRs as part of the prologue.) 761 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 762 SDValue MemOps[SystemZ::NumArgFPRs]; 763 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 764 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 765 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 766 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 767 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 768 &SystemZ::FP64BitRegClass); 769 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 770 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 771 MachinePointerInfo::getFixedStack(FI), 772 false, false, 0); 773 774 } 775 // Join the stores, which are independent of one another. 776 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 777 makeArrayRef(&MemOps[NumFixedFPRs], 778 SystemZ::NumArgFPRs-NumFixedFPRs)); 779 } 780 } 781 782 return Chain; 783 } 784 785 static bool canUseSiblingCall(CCState ArgCCInfo, 786 SmallVectorImpl<CCValAssign> &ArgLocs) { 787 // Punt if there are any indirect or stack arguments, or if the call 788 // needs the call-saved argument register R6. 789 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 790 CCValAssign &VA = ArgLocs[I]; 791 if (VA.getLocInfo() == CCValAssign::Indirect) 792 return false; 793 if (!VA.isRegLoc()) 794 return false; 795 unsigned Reg = VA.getLocReg(); 796 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D) 797 return false; 798 } 799 return true; 800 } 801 802 SDValue 803 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 804 SmallVectorImpl<SDValue> &InVals) const { 805 SelectionDAG &DAG = CLI.DAG; 806 SDLoc &DL = CLI.DL; 807 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 808 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 809 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 810 SDValue Chain = CLI.Chain; 811 SDValue Callee = CLI.Callee; 812 bool &IsTailCall = CLI.IsTailCall; 813 CallingConv::ID CallConv = CLI.CallConv; 814 bool IsVarArg = CLI.IsVarArg; 815 MachineFunction &MF = DAG.getMachineFunction(); 816 EVT PtrVT = getPointerTy(); 817 818 // Analyze the operands of the call, assigning locations to each operand. 819 SmallVector<CCValAssign, 16> ArgLocs; 820 CCState ArgCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs, 821 *DAG.getContext()); 822 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 823 824 // We don't support GuaranteedTailCallOpt, only automatically-detected 825 // sibling calls. 826 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs)) 827 IsTailCall = false; 828 829 // Get a count of how many bytes are to be pushed on the stack. 830 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 831 832 // Mark the start of the call. 833 if (!IsTailCall) 834 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 835 DL); 836 837 // Copy argument values to their designated locations. 838 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 839 SmallVector<SDValue, 8> MemOpChains; 840 SDValue StackPtr; 841 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 842 CCValAssign &VA = ArgLocs[I]; 843 SDValue ArgValue = OutVals[I]; 844 845 if (VA.getLocInfo() == CCValAssign::Indirect) { 846 // Store the argument in a stack slot and pass its address. 847 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 848 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 849 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 850 MachinePointerInfo::getFixedStack(FI), 851 false, false, 0)); 852 ArgValue = SpillSlot; 853 } else 854 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 855 856 if (VA.isRegLoc()) 857 // Queue up the argument copies and emit them at the end. 858 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 859 else { 860 assert(VA.isMemLoc() && "Argument not register or memory"); 861 862 // Work out the address of the stack slot. Unpromoted ints and 863 // floats are passed as right-justified 8-byte values. 864 if (!StackPtr.getNode()) 865 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 866 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 867 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 868 Offset += 4; 869 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 870 DAG.getIntPtrConstant(Offset)); 871 872 // Emit the store. 873 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 874 MachinePointerInfo(), 875 false, false, 0)); 876 } 877 } 878 879 // Join the stores, which are independent of one another. 880 if (!MemOpChains.empty()) 881 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 882 883 // Accept direct calls by converting symbolic call addresses to the 884 // associated Target* opcodes. Force %r1 to be used for indirect 885 // tail calls. 886 SDValue Glue; 887 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 888 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 889 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 890 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 891 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 892 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 893 } else if (IsTailCall) { 894 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue); 895 Glue = Chain.getValue(1); 896 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType()); 897 } 898 899 // Build a sequence of copy-to-reg nodes, chained and glued together. 900 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 901 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 902 RegsToPass[I].second, Glue); 903 Glue = Chain.getValue(1); 904 } 905 906 // The first call operand is the chain and the second is the target address. 907 SmallVector<SDValue, 8> Ops; 908 Ops.push_back(Chain); 909 Ops.push_back(Callee); 910 911 // Add argument registers to the end of the list so that they are 912 // known live into the call. 913 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 914 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 915 RegsToPass[I].second.getValueType())); 916 917 // Add a register mask operand representing the call-preserved registers. 918 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 919 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 920 assert(Mask && "Missing call preserved mask for calling convention"); 921 Ops.push_back(DAG.getRegisterMask(Mask)); 922 923 // Glue the call to the argument copies, if any. 924 if (Glue.getNode()) 925 Ops.push_back(Glue); 926 927 // Emit the call. 928 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 929 if (IsTailCall) 930 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops); 931 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops); 932 Glue = Chain.getValue(1); 933 934 // Mark the end of the call, which is glued to the call itself. 935 Chain = DAG.getCALLSEQ_END(Chain, 936 DAG.getConstant(NumBytes, PtrVT, true), 937 DAG.getConstant(0, PtrVT, true), 938 Glue, DL); 939 Glue = Chain.getValue(1); 940 941 // Assign locations to each value returned by this call. 942 SmallVector<CCValAssign, 16> RetLocs; 943 CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs, 944 *DAG.getContext()); 945 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 946 947 // Copy all of the result registers out of their specified physreg. 948 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 949 CCValAssign &VA = RetLocs[I]; 950 951 // Copy the value out, gluing the copy to the end of the call sequence. 952 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 953 VA.getLocVT(), Glue); 954 Chain = RetValue.getValue(1); 955 Glue = RetValue.getValue(2); 956 957 // Convert the value of the return register into the value that's 958 // being returned. 959 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 960 } 961 962 return Chain; 963 } 964 965 SDValue 966 SystemZTargetLowering::LowerReturn(SDValue Chain, 967 CallingConv::ID CallConv, bool IsVarArg, 968 const SmallVectorImpl<ISD::OutputArg> &Outs, 969 const SmallVectorImpl<SDValue> &OutVals, 970 SDLoc DL, SelectionDAG &DAG) const { 971 MachineFunction &MF = DAG.getMachineFunction(); 972 973 // Assign locations to each returned value. 974 SmallVector<CCValAssign, 16> RetLocs; 975 CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs, 976 *DAG.getContext()); 977 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 978 979 // Quick exit for void returns 980 if (RetLocs.empty()) 981 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 982 983 // Copy the result values into the output registers. 984 SDValue Glue; 985 SmallVector<SDValue, 4> RetOps; 986 RetOps.push_back(Chain); 987 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 988 CCValAssign &VA = RetLocs[I]; 989 SDValue RetValue = OutVals[I]; 990 991 // Make the return register live on exit. 992 assert(VA.isRegLoc() && "Can only return in registers!"); 993 994 // Promote the value as required. 995 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 996 997 // Chain and glue the copies together. 998 unsigned Reg = VA.getLocReg(); 999 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 1000 Glue = Chain.getValue(1); 1001 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 1002 } 1003 1004 // Update chain and glue. 1005 RetOps[0] = Chain; 1006 if (Glue.getNode()) 1007 RetOps.push_back(Glue); 1008 1009 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps); 1010 } 1011 1012 SDValue SystemZTargetLowering:: 1013 prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const { 1014 return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain); 1015 } 1016 1017 // CC is a comparison that will be implemented using an integer or 1018 // floating-point comparison. Return the condition code mask for 1019 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 1020 // unsigned comparisons and clear for signed ones. In the floating-point 1021 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 1022 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 1023 #define CONV(X) \ 1024 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 1025 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 1026 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 1027 1028 switch (CC) { 1029 default: 1030 llvm_unreachable("Invalid integer condition!"); 1031 1032 CONV(EQ); 1033 CONV(NE); 1034 CONV(GT); 1035 CONV(GE); 1036 CONV(LT); 1037 CONV(LE); 1038 1039 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 1040 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 1041 } 1042 #undef CONV 1043 } 1044 1045 // Return a sequence for getting a 1 from an IPM result when CC has a 1046 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1047 // The handling of CC values outside CCValid doesn't matter. 1048 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1049 // Deal with cases where the result can be taken directly from a bit 1050 // of the IPM result. 1051 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1052 return IPMConversion(0, 0, SystemZ::IPM_CC); 1053 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1054 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1055 1056 // Deal with cases where we can add a value to force the sign bit 1057 // to contain the right value. Putting the bit in 31 means we can 1058 // use SRL rather than RISBG(L), and also makes it easier to get a 1059 // 0/-1 value, so it has priority over the other tests below. 1060 // 1061 // These sequences rely on the fact that the upper two bits of the 1062 // IPM result are zero. 1063 uint64_t TopBit = uint64_t(1) << 31; 1064 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1065 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1066 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1067 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1068 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1069 | SystemZ::CCMASK_1 1070 | SystemZ::CCMASK_2))) 1071 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1072 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1073 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1074 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1075 | SystemZ::CCMASK_2 1076 | SystemZ::CCMASK_3))) 1077 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1078 1079 // Next try inverting the value and testing a bit. 0/1 could be 1080 // handled this way too, but we dealt with that case above. 1081 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1082 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1083 1084 // Handle cases where adding a value forces a non-sign bit to contain 1085 // the right value. 1086 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1087 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1088 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1089 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1090 1091 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 1092 // can be done by inverting the low CC bit and applying one of the 1093 // sign-based extractions above. 1094 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1095 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1096 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1097 return IPMConversion(1 << SystemZ::IPM_CC, 1098 TopBit - (3 << SystemZ::IPM_CC), 31); 1099 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1100 | SystemZ::CCMASK_1 1101 | SystemZ::CCMASK_3))) 1102 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1103 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1104 | SystemZ::CCMASK_2 1105 | SystemZ::CCMASK_3))) 1106 return IPMConversion(1 << SystemZ::IPM_CC, 1107 TopBit - (1 << SystemZ::IPM_CC), 31); 1108 1109 llvm_unreachable("Unexpected CC combination"); 1110 } 1111 1112 // If C can be converted to a comparison against zero, adjust the operands 1113 // as necessary. 1114 static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) { 1115 if (C.ICmpType == SystemZICMP::UnsignedOnly) 1116 return; 1117 1118 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode()); 1119 if (!ConstOp1) 1120 return; 1121 1122 int64_t Value = ConstOp1->getSExtValue(); 1123 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || 1124 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || 1125 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) || 1126 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) { 1127 C.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1128 C.Op1 = DAG.getConstant(0, C.Op1.getValueType()); 1129 } 1130 } 1131 1132 // If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI, 1133 // adjust the operands as necessary. 1134 static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) { 1135 // For us to make any changes, it must a comparison between a single-use 1136 // load and a constant. 1137 if (!C.Op0.hasOneUse() || 1138 C.Op0.getOpcode() != ISD::LOAD || 1139 C.Op1.getOpcode() != ISD::Constant) 1140 return; 1141 1142 // We must have an 8- or 16-bit load. 1143 auto *Load = cast<LoadSDNode>(C.Op0); 1144 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 1145 if (NumBits != 8 && NumBits != 16) 1146 return; 1147 1148 // The load must be an extending one and the constant must be within the 1149 // range of the unextended value. 1150 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1); 1151 uint64_t Value = ConstOp1->getZExtValue(); 1152 uint64_t Mask = (1 << NumBits) - 1; 1153 if (Load->getExtensionType() == ISD::SEXTLOAD) { 1154 // Make sure that ConstOp1 is in range of C.Op0. 1155 int64_t SignedValue = ConstOp1->getSExtValue(); 1156 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) 1157 return; 1158 if (C.ICmpType != SystemZICMP::SignedOnly) { 1159 // Unsigned comparison between two sign-extended values is equivalent 1160 // to unsigned comparison between two zero-extended values. 1161 Value &= Mask; 1162 } else if (NumBits == 8) { 1163 // Try to treat the comparison as unsigned, so that we can use CLI. 1164 // Adjust CCMask and Value as necessary. 1165 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT) 1166 // Test whether the high bit of the byte is set. 1167 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT; 1168 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE) 1169 // Test whether the high bit of the byte is clear. 1170 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT; 1171 else 1172 // No instruction exists for this combination. 1173 return; 1174 C.ICmpType = SystemZICMP::UnsignedOnly; 1175 } 1176 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 1177 if (Value > Mask) 1178 return; 1179 assert(C.ICmpType == SystemZICMP::Any && 1180 "Signedness shouldn't matter here."); 1181 } else 1182 return; 1183 1184 // Make sure that the first operand is an i32 of the right extension type. 1185 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ? 1186 ISD::SEXTLOAD : 1187 ISD::ZEXTLOAD); 1188 if (C.Op0.getValueType() != MVT::i32 || 1189 Load->getExtensionType() != ExtType) 1190 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1191 Load->getChain(), Load->getBasePtr(), 1192 Load->getPointerInfo(), Load->getMemoryVT(), 1193 Load->isVolatile(), Load->isNonTemporal(), 1194 Load->getAlignment()); 1195 1196 // Make sure that the second operand is an i32 with the right value. 1197 if (C.Op1.getValueType() != MVT::i32 || 1198 Value != ConstOp1->getZExtValue()) 1199 C.Op1 = DAG.getConstant(Value, MVT::i32); 1200 } 1201 1202 // Return true if Op is either an unextended load, or a load suitable 1203 // for integer register-memory comparisons of type ICmpType. 1204 static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) { 1205 auto *Load = dyn_cast<LoadSDNode>(Op.getNode()); 1206 if (Load) { 1207 // There are no instructions to compare a register with a memory byte. 1208 if (Load->getMemoryVT() == MVT::i8) 1209 return false; 1210 // Otherwise decide on extension type. 1211 switch (Load->getExtensionType()) { 1212 case ISD::NON_EXTLOAD: 1213 return true; 1214 case ISD::SEXTLOAD: 1215 return ICmpType != SystemZICMP::UnsignedOnly; 1216 case ISD::ZEXTLOAD: 1217 return ICmpType != SystemZICMP::SignedOnly; 1218 default: 1219 break; 1220 } 1221 } 1222 return false; 1223 } 1224 1225 // Return true if it is better to swap the operands of C. 1226 static bool shouldSwapCmpOperands(const Comparison &C) { 1227 // Leave f128 comparisons alone, since they have no memory forms. 1228 if (C.Op0.getValueType() == MVT::f128) 1229 return false; 1230 1231 // Always keep a floating-point constant second, since comparisons with 1232 // zero can use LOAD TEST and comparisons with other constants make a 1233 // natural memory operand. 1234 if (isa<ConstantFPSDNode>(C.Op1)) 1235 return false; 1236 1237 // Never swap comparisons with zero since there are many ways to optimize 1238 // those later. 1239 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1240 if (ConstOp1 && ConstOp1->getZExtValue() == 0) 1241 return false; 1242 1243 // Also keep natural memory operands second if the loaded value is 1244 // only used here. Several comparisons have memory forms. 1245 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse()) 1246 return false; 1247 1248 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. 1249 // In that case we generally prefer the memory to be second. 1250 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { 1251 // The only exceptions are when the second operand is a constant and 1252 // we can use things like CHHSI. 1253 if (!ConstOp1) 1254 return true; 1255 // The unsigned memory-immediate instructions can handle 16-bit 1256 // unsigned integers. 1257 if (C.ICmpType != SystemZICMP::SignedOnly && 1258 isUInt<16>(ConstOp1->getZExtValue())) 1259 return false; 1260 // The signed memory-immediate instructions can handle 16-bit 1261 // signed integers. 1262 if (C.ICmpType != SystemZICMP::UnsignedOnly && 1263 isInt<16>(ConstOp1->getSExtValue())) 1264 return false; 1265 return true; 1266 } 1267 1268 // Try to promote the use of CGFR and CLGFR. 1269 unsigned Opcode0 = C.Op0.getOpcode(); 1270 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND) 1271 return true; 1272 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND) 1273 return true; 1274 if (C.ICmpType != SystemZICMP::SignedOnly && 1275 Opcode0 == ISD::AND && 1276 C.Op0.getOperand(1).getOpcode() == ISD::Constant && 1277 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff) 1278 return true; 1279 1280 return false; 1281 } 1282 1283 // Return a version of comparison CC mask CCMask in which the LT and GT 1284 // actions are swapped. 1285 static unsigned reverseCCMask(unsigned CCMask) { 1286 return ((CCMask & SystemZ::CCMASK_CMP_EQ) | 1287 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) | 1288 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) | 1289 (CCMask & SystemZ::CCMASK_CMP_UO)); 1290 } 1291 1292 // Check whether C tests for equality between X and Y and whether X - Y 1293 // or Y - X is also computed. In that case it's better to compare the 1294 // result of the subtraction against zero. 1295 static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) { 1296 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1297 C.CCMask == SystemZ::CCMASK_CMP_NE) { 1298 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1299 SDNode *N = *I; 1300 if (N->getOpcode() == ISD::SUB && 1301 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || 1302 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { 1303 C.Op0 = SDValue(N, 0); 1304 C.Op1 = DAG.getConstant(0, N->getValueType(0)); 1305 return; 1306 } 1307 } 1308 } 1309 } 1310 1311 // Check whether C compares a floating-point value with zero and if that 1312 // floating-point value is also negated. In this case we can use the 1313 // negation to set CC, so avoiding separate LOAD AND TEST and 1314 // LOAD (NEGATIVE/COMPLEMENT) instructions. 1315 static void adjustForFNeg(Comparison &C) { 1316 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1); 1317 if (C1 && C1->isZero()) { 1318 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) { 1319 SDNode *N = *I; 1320 if (N->getOpcode() == ISD::FNEG) { 1321 C.Op0 = SDValue(N, 0); 1322 C.CCMask = reverseCCMask(C.CCMask); 1323 return; 1324 } 1325 } 1326 } 1327 } 1328 1329 // Check whether C compares (shl X, 32) with 0 and whether X is 1330 // also sign-extended. In that case it is better to test the result 1331 // of the sign extension using LTGFR. 1332 // 1333 // This case is important because InstCombine transforms a comparison 1334 // with (sext (trunc X)) into a comparison with (shl X, 32). 1335 static void adjustForLTGFR(Comparison &C) { 1336 // Check for a comparison between (shl X, 32) and 0. 1337 if (C.Op0.getOpcode() == ISD::SHL && 1338 C.Op0.getValueType() == MVT::i64 && 1339 C.Op1.getOpcode() == ISD::Constant && 1340 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1341 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1)); 1342 if (C1 && C1->getZExtValue() == 32) { 1343 SDValue ShlOp0 = C.Op0.getOperand(0); 1344 // See whether X has any SIGN_EXTEND_INREG uses. 1345 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) { 1346 SDNode *N = *I; 1347 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && 1348 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { 1349 C.Op0 = SDValue(N, 0); 1350 return; 1351 } 1352 } 1353 } 1354 } 1355 } 1356 1357 // If C compares the truncation of an extending load, try to compare 1358 // the untruncated value instead. This exposes more opportunities to 1359 // reuse CC. 1360 static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) { 1361 if (C.Op0.getOpcode() == ISD::TRUNCATE && 1362 C.Op0.getOperand(0).getOpcode() == ISD::LOAD && 1363 C.Op1.getOpcode() == ISD::Constant && 1364 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1365 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0)); 1366 if (L->getMemoryVT().getStoreSizeInBits() 1367 <= C.Op0.getValueType().getSizeInBits()) { 1368 unsigned Type = L->getExtensionType(); 1369 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || 1370 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { 1371 C.Op0 = C.Op0.getOperand(0); 1372 C.Op1 = DAG.getConstant(0, C.Op0.getValueType()); 1373 } 1374 } 1375 } 1376 } 1377 1378 // Return true if shift operation N has an in-range constant shift value. 1379 // Store it in ShiftVal if so. 1380 static bool isSimpleShift(SDValue N, unsigned &ShiftVal) { 1381 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1)); 1382 if (!Shift) 1383 return false; 1384 1385 uint64_t Amount = Shift->getZExtValue(); 1386 if (Amount >= N.getValueType().getSizeInBits()) 1387 return false; 1388 1389 ShiftVal = Amount; 1390 return true; 1391 } 1392 1393 // Check whether an AND with Mask is suitable for a TEST UNDER MASK 1394 // instruction and whether the CC value is descriptive enough to handle 1395 // a comparison of type Opcode between the AND result and CmpVal. 1396 // CCMask says which comparison result is being tested and BitSize is 1397 // the number of bits in the operands. If TEST UNDER MASK can be used, 1398 // return the corresponding CC mask, otherwise return 0. 1399 static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, 1400 uint64_t Mask, uint64_t CmpVal, 1401 unsigned ICmpType) { 1402 assert(Mask != 0 && "ANDs with zero should have been removed by now"); 1403 1404 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL. 1405 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) && 1406 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask)) 1407 return 0; 1408 1409 // Work out the masks for the lowest and highest bits. 1410 unsigned HighShift = 63 - countLeadingZeros(Mask); 1411 uint64_t High = uint64_t(1) << HighShift; 1412 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask); 1413 1414 // Signed ordered comparisons are effectively unsigned if the sign 1415 // bit is dropped. 1416 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly); 1417 1418 // Check for equality comparisons with 0, or the equivalent. 1419 if (CmpVal == 0) { 1420 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1421 return SystemZ::CCMASK_TM_ALL_0; 1422 if (CCMask == SystemZ::CCMASK_CMP_NE) 1423 return SystemZ::CCMASK_TM_SOME_1; 1424 } 1425 if (EffectivelyUnsigned && CmpVal <= Low) { 1426 if (CCMask == SystemZ::CCMASK_CMP_LT) 1427 return SystemZ::CCMASK_TM_ALL_0; 1428 if (CCMask == SystemZ::CCMASK_CMP_GE) 1429 return SystemZ::CCMASK_TM_SOME_1; 1430 } 1431 if (EffectivelyUnsigned && CmpVal < Low) { 1432 if (CCMask == SystemZ::CCMASK_CMP_LE) 1433 return SystemZ::CCMASK_TM_ALL_0; 1434 if (CCMask == SystemZ::CCMASK_CMP_GT) 1435 return SystemZ::CCMASK_TM_SOME_1; 1436 } 1437 1438 // Check for equality comparisons with the mask, or the equivalent. 1439 if (CmpVal == Mask) { 1440 if (CCMask == SystemZ::CCMASK_CMP_EQ) 1441 return SystemZ::CCMASK_TM_ALL_1; 1442 if (CCMask == SystemZ::CCMASK_CMP_NE) 1443 return SystemZ::CCMASK_TM_SOME_0; 1444 } 1445 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { 1446 if (CCMask == SystemZ::CCMASK_CMP_GT) 1447 return SystemZ::CCMASK_TM_ALL_1; 1448 if (CCMask == SystemZ::CCMASK_CMP_LE) 1449 return SystemZ::CCMASK_TM_SOME_0; 1450 } 1451 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { 1452 if (CCMask == SystemZ::CCMASK_CMP_GE) 1453 return SystemZ::CCMASK_TM_ALL_1; 1454 if (CCMask == SystemZ::CCMASK_CMP_LT) 1455 return SystemZ::CCMASK_TM_SOME_0; 1456 } 1457 1458 // Check for ordered comparisons with the top bit. 1459 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { 1460 if (CCMask == SystemZ::CCMASK_CMP_LE) 1461 return SystemZ::CCMASK_TM_MSB_0; 1462 if (CCMask == SystemZ::CCMASK_CMP_GT) 1463 return SystemZ::CCMASK_TM_MSB_1; 1464 } 1465 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { 1466 if (CCMask == SystemZ::CCMASK_CMP_LT) 1467 return SystemZ::CCMASK_TM_MSB_0; 1468 if (CCMask == SystemZ::CCMASK_CMP_GE) 1469 return SystemZ::CCMASK_TM_MSB_1; 1470 } 1471 1472 // If there are just two bits, we can do equality checks for Low and High 1473 // as well. 1474 if (Mask == Low + High) { 1475 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low) 1476 return SystemZ::CCMASK_TM_MIXED_MSB_0; 1477 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low) 1478 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY; 1479 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) 1480 return SystemZ::CCMASK_TM_MIXED_MSB_1; 1481 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) 1482 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY; 1483 } 1484 1485 // Looks like we've exhausted our options. 1486 return 0; 1487 } 1488 1489 // See whether C can be implemented as a TEST UNDER MASK instruction. 1490 // Update the arguments with the TM version if so. 1491 static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) { 1492 // Check that we have a comparison with a constant. 1493 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1); 1494 if (!ConstOp1) 1495 return; 1496 uint64_t CmpVal = ConstOp1->getZExtValue(); 1497 1498 // Check whether the nonconstant input is an AND with a constant mask. 1499 Comparison NewC(C); 1500 uint64_t MaskVal; 1501 ConstantSDNode *Mask = nullptr; 1502 if (C.Op0.getOpcode() == ISD::AND) { 1503 NewC.Op0 = C.Op0.getOperand(0); 1504 NewC.Op1 = C.Op0.getOperand(1); 1505 Mask = dyn_cast<ConstantSDNode>(NewC.Op1); 1506 if (!Mask) 1507 return; 1508 MaskVal = Mask->getZExtValue(); 1509 } else { 1510 // There is no instruction to compare with a 64-bit immediate 1511 // so use TMHH instead if possible. We need an unsigned ordered 1512 // comparison with an i64 immediate. 1513 if (NewC.Op0.getValueType() != MVT::i64 || 1514 NewC.CCMask == SystemZ::CCMASK_CMP_EQ || 1515 NewC.CCMask == SystemZ::CCMASK_CMP_NE || 1516 NewC.ICmpType == SystemZICMP::SignedOnly) 1517 return; 1518 // Convert LE and GT comparisons into LT and GE. 1519 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE || 1520 NewC.CCMask == SystemZ::CCMASK_CMP_GT) { 1521 if (CmpVal == uint64_t(-1)) 1522 return; 1523 CmpVal += 1; 1524 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ; 1525 } 1526 // If the low N bits of Op1 are zero than the low N bits of Op0 can 1527 // be masked off without changing the result. 1528 MaskVal = -(CmpVal & -CmpVal); 1529 NewC.ICmpType = SystemZICMP::UnsignedOnly; 1530 } 1531 1532 // Check whether the combination of mask, comparison value and comparison 1533 // type are suitable. 1534 unsigned BitSize = NewC.Op0.getValueType().getSizeInBits(); 1535 unsigned NewCCMask, ShiftVal; 1536 if (NewC.ICmpType != SystemZICMP::SignedOnly && 1537 NewC.Op0.getOpcode() == ISD::SHL && 1538 isSimpleShift(NewC.Op0, ShiftVal) && 1539 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1540 MaskVal >> ShiftVal, 1541 CmpVal >> ShiftVal, 1542 SystemZICMP::Any))) { 1543 NewC.Op0 = NewC.Op0.getOperand(0); 1544 MaskVal >>= ShiftVal; 1545 } else if (NewC.ICmpType != SystemZICMP::SignedOnly && 1546 NewC.Op0.getOpcode() == ISD::SRL && 1547 isSimpleShift(NewC.Op0, ShiftVal) && 1548 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, 1549 MaskVal << ShiftVal, 1550 CmpVal << ShiftVal, 1551 SystemZICMP::UnsignedOnly))) { 1552 NewC.Op0 = NewC.Op0.getOperand(0); 1553 MaskVal <<= ShiftVal; 1554 } else { 1555 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal, 1556 NewC.ICmpType); 1557 if (!NewCCMask) 1558 return; 1559 } 1560 1561 // Go ahead and make the change. 1562 C.Opcode = SystemZISD::TM; 1563 C.Op0 = NewC.Op0; 1564 if (Mask && Mask->getZExtValue() == MaskVal) 1565 C.Op1 = SDValue(Mask, 0); 1566 else 1567 C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType()); 1568 C.CCValid = SystemZ::CCMASK_TM; 1569 C.CCMask = NewCCMask; 1570 } 1571 1572 // Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1. 1573 static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 1574 ISD::CondCode Cond) { 1575 Comparison C(CmpOp0, CmpOp1); 1576 C.CCMask = CCMaskForCondCode(Cond); 1577 if (C.Op0.getValueType().isFloatingPoint()) { 1578 C.CCValid = SystemZ::CCMASK_FCMP; 1579 C.Opcode = SystemZISD::FCMP; 1580 adjustForFNeg(C); 1581 } else { 1582 C.CCValid = SystemZ::CCMASK_ICMP; 1583 C.Opcode = SystemZISD::ICMP; 1584 // Choose the type of comparison. Equality and inequality tests can 1585 // use either signed or unsigned comparisons. The choice also doesn't 1586 // matter if both sign bits are known to be clear. In those cases we 1587 // want to give the main isel code the freedom to choose whichever 1588 // form fits best. 1589 if (C.CCMask == SystemZ::CCMASK_CMP_EQ || 1590 C.CCMask == SystemZ::CCMASK_CMP_NE || 1591 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1))) 1592 C.ICmpType = SystemZICMP::Any; 1593 else if (C.CCMask & SystemZ::CCMASK_CMP_UO) 1594 C.ICmpType = SystemZICMP::UnsignedOnly; 1595 else 1596 C.ICmpType = SystemZICMP::SignedOnly; 1597 C.CCMask &= ~SystemZ::CCMASK_CMP_UO; 1598 adjustZeroCmp(DAG, C); 1599 adjustSubwordCmp(DAG, C); 1600 adjustForSubtraction(DAG, C); 1601 adjustForLTGFR(C); 1602 adjustICmpTruncate(DAG, C); 1603 } 1604 1605 if (shouldSwapCmpOperands(C)) { 1606 std::swap(C.Op0, C.Op1); 1607 C.CCMask = reverseCCMask(C.CCMask); 1608 } 1609 1610 adjustForTestUnderMask(DAG, C); 1611 return C; 1612 } 1613 1614 // Emit the comparison instruction described by C. 1615 static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) { 1616 if (C.Opcode == SystemZISD::ICMP) 1617 return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1, 1618 DAG.getConstant(C.ICmpType, MVT::i32)); 1619 if (C.Opcode == SystemZISD::TM) { 1620 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) != 1621 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1)); 1622 return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1, 1623 DAG.getConstant(RegisterOnly, MVT::i32)); 1624 } 1625 return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1); 1626 } 1627 1628 // Implement a 32-bit *MUL_LOHI operation by extending both operands to 1629 // 64 bits. Extend is the extension type to use. Store the high part 1630 // in Hi and the low part in Lo. 1631 static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL, 1632 unsigned Extend, SDValue Op0, SDValue Op1, 1633 SDValue &Hi, SDValue &Lo) { 1634 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0); 1635 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1); 1636 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1); 1637 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64)); 1638 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi); 1639 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul); 1640 } 1641 1642 // Lower a binary operation that produces two VT results, one in each 1643 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1644 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1645 // on the extended Op0 and (unextended) Op1. Store the even register result 1646 // in Even and the odd register result in Odd. 1647 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1648 unsigned Extend, unsigned Opcode, 1649 SDValue Op0, SDValue Op1, 1650 SDValue &Even, SDValue &Odd) { 1651 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1652 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1653 SDValue(In128, 0), Op1); 1654 bool Is32Bit = is32Bit(VT); 1655 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); 1656 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); 1657 } 1658 1659 // Return an i32 value that is 1 if the CC value produced by Glue is 1660 // in the mask CCMask and 0 otherwise. CC is known to have a value 1661 // in CCValid, so other values can be ignored. 1662 static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue, 1663 unsigned CCValid, unsigned CCMask) { 1664 IPMConversion Conversion = getIPMConversion(CCValid, CCMask); 1665 SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 1666 1667 if (Conversion.XORValue) 1668 Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result, 1669 DAG.getConstant(Conversion.XORValue, MVT::i32)); 1670 1671 if (Conversion.AddValue) 1672 Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result, 1673 DAG.getConstant(Conversion.AddValue, MVT::i32)); 1674 1675 // The SHR/AND sequence should get optimized to an RISBG. 1676 Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result, 1677 DAG.getConstant(Conversion.Bit, MVT::i32)); 1678 if (Conversion.Bit != 31) 1679 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result, 1680 DAG.getConstant(1, MVT::i32)); 1681 return Result; 1682 } 1683 1684 SDValue SystemZTargetLowering::lowerSETCC(SDValue Op, 1685 SelectionDAG &DAG) const { 1686 SDValue CmpOp0 = Op.getOperand(0); 1687 SDValue CmpOp1 = Op.getOperand(1); 1688 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1689 SDLoc DL(Op); 1690 1691 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1692 SDValue Glue = emitCmp(DAG, DL, C); 1693 return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1694 } 1695 1696 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1697 SDValue Chain = Op.getOperand(0); 1698 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1699 SDValue CmpOp0 = Op.getOperand(2); 1700 SDValue CmpOp1 = Op.getOperand(3); 1701 SDValue Dest = Op.getOperand(4); 1702 SDLoc DL(Op); 1703 1704 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1705 SDValue Glue = emitCmp(DAG, DL, C); 1706 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1707 Chain, DAG.getConstant(C.CCValid, MVT::i32), 1708 DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue); 1709 } 1710 1711 // Return true if Pos is CmpOp and Neg is the negative of CmpOp, 1712 // allowing Pos and Neg to be wider than CmpOp. 1713 static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) { 1714 return (Neg.getOpcode() == ISD::SUB && 1715 Neg.getOperand(0).getOpcode() == ISD::Constant && 1716 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 && 1717 Neg.getOperand(1) == Pos && 1718 (Pos == CmpOp || 1719 (Pos.getOpcode() == ISD::SIGN_EXTEND && 1720 Pos.getOperand(0) == CmpOp))); 1721 } 1722 1723 // Return the absolute or negative absolute of Op; IsNegative decides which. 1724 static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op, 1725 bool IsNegative) { 1726 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op); 1727 if (IsNegative) 1728 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(), 1729 DAG.getConstant(0, Op.getValueType()), Op); 1730 return Op; 1731 } 1732 1733 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1734 SelectionDAG &DAG) const { 1735 SDValue CmpOp0 = Op.getOperand(0); 1736 SDValue CmpOp1 = Op.getOperand(1); 1737 SDValue TrueOp = Op.getOperand(2); 1738 SDValue FalseOp = Op.getOperand(3); 1739 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1740 SDLoc DL(Op); 1741 1742 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC)); 1743 1744 // Check for absolute and negative-absolute selections, including those 1745 // where the comparison value is sign-extended (for LPGFR and LNGFR). 1746 // This check supplements the one in DAGCombiner. 1747 if (C.Opcode == SystemZISD::ICMP && 1748 C.CCMask != SystemZ::CCMASK_CMP_EQ && 1749 C.CCMask != SystemZ::CCMASK_CMP_NE && 1750 C.Op1.getOpcode() == ISD::Constant && 1751 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) { 1752 if (isAbsolute(C.Op0, TrueOp, FalseOp)) 1753 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT); 1754 if (isAbsolute(C.Op0, FalseOp, TrueOp)) 1755 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT); 1756 } 1757 1758 SDValue Glue = emitCmp(DAG, DL, C); 1759 1760 // Special case for handling -1/0 results. The shifts we use here 1761 // should get optimized with the IPM conversion sequence. 1762 auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp); 1763 auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp); 1764 if (TrueC && FalseC) { 1765 int64_t TrueVal = TrueC->getSExtValue(); 1766 int64_t FalseVal = FalseC->getSExtValue(); 1767 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) { 1768 // Invert the condition if we want -1 on false. 1769 if (TrueVal == 0) 1770 C.CCMask ^= C.CCValid; 1771 SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask); 1772 EVT VT = Op.getValueType(); 1773 // Extend the result to VT. Upper bits are ignored. 1774 if (!is32Bit(VT)) 1775 Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result); 1776 // Sign-extend from the low bit. 1777 SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32); 1778 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt); 1779 return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt); 1780 } 1781 } 1782 1783 SmallVector<SDValue, 5> Ops; 1784 Ops.push_back(TrueOp); 1785 Ops.push_back(FalseOp); 1786 Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32)); 1787 Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32)); 1788 Ops.push_back(Glue); 1789 1790 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1791 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops); 1792 } 1793 1794 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1795 SelectionDAG &DAG) const { 1796 SDLoc DL(Node); 1797 const GlobalValue *GV = Node->getGlobal(); 1798 int64_t Offset = Node->getOffset(); 1799 EVT PtrVT = getPointerTy(); 1800 Reloc::Model RM = DAG.getTarget().getRelocationModel(); 1801 CodeModel::Model CM = DAG.getTarget().getCodeModel(); 1802 1803 SDValue Result; 1804 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1805 // Assign anchors at 1<<12 byte boundaries. 1806 uint64_t Anchor = Offset & ~uint64_t(0xfff); 1807 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor); 1808 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1809 1810 // The offset can be folded into the address if it is aligned to a halfword. 1811 Offset -= Anchor; 1812 if (Offset != 0 && (Offset & 1) == 0) { 1813 SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset); 1814 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result); 1815 Offset = 0; 1816 } 1817 } else { 1818 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1819 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1820 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1821 MachinePointerInfo::getGOT(), false, false, false, 0); 1822 } 1823 1824 // If there was a non-zero offset that we didn't fold, create an explicit 1825 // addition for it. 1826 if (Offset != 0) 1827 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1828 DAG.getConstant(Offset, PtrVT)); 1829 1830 return Result; 1831 } 1832 1833 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1834 SelectionDAG &DAG) const { 1835 SDLoc DL(Node); 1836 const GlobalValue *GV = Node->getGlobal(); 1837 EVT PtrVT = getPointerTy(); 1838 TLSModel::Model model = DAG.getTarget().getTLSModel(GV); 1839 1840 if (model != TLSModel::LocalExec) 1841 llvm_unreachable("only local-exec TLS mode supported"); 1842 1843 // The high part of the thread pointer is in access register 0. 1844 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1845 DAG.getConstant(0, MVT::i32)); 1846 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1847 1848 // The low part of the thread pointer is in access register 1. 1849 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1850 DAG.getConstant(1, MVT::i32)); 1851 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1852 1853 // Merge them into a single 64-bit address. 1854 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1855 DAG.getConstant(32, PtrVT)); 1856 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1857 1858 // Get the offset of GA from the thread pointer. 1859 SystemZConstantPoolValue *CPV = 1860 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1861 1862 // Force the offset into the constant pool and load it from there. 1863 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1864 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1865 CPAddr, MachinePointerInfo::getConstantPool(), 1866 false, false, false, 0); 1867 1868 // Add the base and offset together. 1869 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1870 } 1871 1872 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1873 SelectionDAG &DAG) const { 1874 SDLoc DL(Node); 1875 const BlockAddress *BA = Node->getBlockAddress(); 1876 int64_t Offset = Node->getOffset(); 1877 EVT PtrVT = getPointerTy(); 1878 1879 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1880 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1881 return Result; 1882 } 1883 1884 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1885 SelectionDAG &DAG) const { 1886 SDLoc DL(JT); 1887 EVT PtrVT = getPointerTy(); 1888 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1889 1890 // Use LARL to load the address of the table. 1891 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1892 } 1893 1894 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1895 SelectionDAG &DAG) const { 1896 SDLoc DL(CP); 1897 EVT PtrVT = getPointerTy(); 1898 1899 SDValue Result; 1900 if (CP->isMachineConstantPoolEntry()) 1901 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1902 CP->getAlignment()); 1903 else 1904 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1905 CP->getAlignment(), CP->getOffset()); 1906 1907 // Use LARL to load the address of the constant pool entry. 1908 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1909 } 1910 1911 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1912 SelectionDAG &DAG) const { 1913 SDLoc DL(Op); 1914 SDValue In = Op.getOperand(0); 1915 EVT InVT = In.getValueType(); 1916 EVT ResVT = Op.getValueType(); 1917 1918 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1919 SDValue In64; 1920 if (Subtarget.hasHighWord()) { 1921 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, 1922 MVT::i64); 1923 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1924 MVT::i64, SDValue(U64, 0), In); 1925 } else { 1926 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1927 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, 1928 DAG.getConstant(32, MVT::i64)); 1929 } 1930 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64); 1931 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, 1932 DL, MVT::f32, Out64); 1933 } 1934 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1935 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1936 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL, 1937 MVT::f64, SDValue(U64, 0), In); 1938 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64); 1939 if (Subtarget.hasHighWord()) 1940 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL, 1941 MVT::i32, Out64); 1942 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, 1943 DAG.getConstant(32, MVT::i64)); 1944 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1945 } 1946 llvm_unreachable("Unexpected bitcast combination"); 1947 } 1948 1949 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1950 SelectionDAG &DAG) const { 1951 MachineFunction &MF = DAG.getMachineFunction(); 1952 SystemZMachineFunctionInfo *FuncInfo = 1953 MF.getInfo<SystemZMachineFunctionInfo>(); 1954 EVT PtrVT = getPointerTy(); 1955 1956 SDValue Chain = Op.getOperand(0); 1957 SDValue Addr = Op.getOperand(1); 1958 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1959 SDLoc DL(Op); 1960 1961 // The initial values of each field. 1962 const unsigned NumFields = 4; 1963 SDValue Fields[NumFields] = { 1964 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1965 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1966 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1967 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1968 }; 1969 1970 // Store each field into its respective slot. 1971 SDValue MemOps[NumFields]; 1972 unsigned Offset = 0; 1973 for (unsigned I = 0; I < NumFields; ++I) { 1974 SDValue FieldAddr = Addr; 1975 if (Offset != 0) 1976 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1977 DAG.getIntPtrConstant(Offset)); 1978 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1979 MachinePointerInfo(SV, Offset), 1980 false, false, 0); 1981 Offset += 8; 1982 } 1983 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 1984 } 1985 1986 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1987 SelectionDAG &DAG) const { 1988 SDValue Chain = Op.getOperand(0); 1989 SDValue DstPtr = Op.getOperand(1); 1990 SDValue SrcPtr = Op.getOperand(2); 1991 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1992 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1993 SDLoc DL(Op); 1994 1995 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1996 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1997 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1998 } 1999 2000 SDValue SystemZTargetLowering:: 2001 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 2002 SDValue Chain = Op.getOperand(0); 2003 SDValue Size = Op.getOperand(1); 2004 SDLoc DL(Op); 2005 2006 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 2007 2008 // Get a reference to the stack pointer. 2009 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 2010 2011 // Get the new stack pointer value. 2012 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 2013 2014 // Copy the new stack pointer back. 2015 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 2016 2017 // The allocated data lives above the 160 bytes allocated for the standard 2018 // frame, plus any outgoing stack arguments. We don't know how much that 2019 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 2020 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 2021 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 2022 2023 SDValue Ops[2] = { Result, Chain }; 2024 return DAG.getMergeValues(Ops, DL); 2025 } 2026 2027 SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op, 2028 SelectionDAG &DAG) const { 2029 EVT VT = Op.getValueType(); 2030 SDLoc DL(Op); 2031 SDValue Ops[2]; 2032 if (is32Bit(VT)) 2033 // Just do a normal 64-bit multiplication and extract the results. 2034 // We define this so that it can be used for constant division. 2035 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0), 2036 Op.getOperand(1), Ops[1], Ops[0]); 2037 else { 2038 // Do a full 128-bit multiplication based on UMUL_LOHI64: 2039 // 2040 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64) 2041 // 2042 // but using the fact that the upper halves are either all zeros 2043 // or all ones: 2044 // 2045 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) 2046 // 2047 // and grouping the right terms together since they are quicker than the 2048 // multiplication: 2049 // 2050 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) 2051 SDValue C63 = DAG.getConstant(63, MVT::i64); 2052 SDValue LL = Op.getOperand(0); 2053 SDValue RL = Op.getOperand(1); 2054 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); 2055 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); 2056 // UMUL_LOHI64 returns the low result in the odd register and the high 2057 // result in the even register. SMUL_LOHI is defined to return the 2058 // low half first, so the results are in reverse order. 2059 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2060 LL, RL, Ops[1], Ops[0]); 2061 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); 2062 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); 2063 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); 2064 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); 2065 } 2066 return DAG.getMergeValues(Ops, DL); 2067 } 2068 2069 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 2070 SelectionDAG &DAG) const { 2071 EVT VT = Op.getValueType(); 2072 SDLoc DL(Op); 2073 SDValue Ops[2]; 2074 if (is32Bit(VT)) 2075 // Just do a normal 64-bit multiplication and extract the results. 2076 // We define this so that it can be used for constant division. 2077 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0), 2078 Op.getOperand(1), Ops[1], Ops[0]); 2079 else 2080 // UMUL_LOHI64 returns the low result in the odd register and the high 2081 // result in the even register. UMUL_LOHI is defined to return the 2082 // low half first, so the results are in reverse order. 2083 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 2084 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2085 return DAG.getMergeValues(Ops, DL); 2086 } 2087 2088 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 2089 SelectionDAG &DAG) const { 2090 SDValue Op0 = Op.getOperand(0); 2091 SDValue Op1 = Op.getOperand(1); 2092 EVT VT = Op.getValueType(); 2093 SDLoc DL(Op); 2094 unsigned Opcode; 2095 2096 // We use DSGF for 32-bit division. 2097 if (is32Bit(VT)) { 2098 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 2099 Opcode = SystemZISD::SDIVREM32; 2100 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 2101 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 2102 Opcode = SystemZISD::SDIVREM32; 2103 } else 2104 Opcode = SystemZISD::SDIVREM64; 2105 2106 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 2107 // input is "don't care". The instruction returns the remainder in 2108 // the even register and the quotient in the odd register. 2109 SDValue Ops[2]; 2110 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 2111 Op0, Op1, Ops[1], Ops[0]); 2112 return DAG.getMergeValues(Ops, DL); 2113 } 2114 2115 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 2116 SelectionDAG &DAG) const { 2117 EVT VT = Op.getValueType(); 2118 SDLoc DL(Op); 2119 2120 // DL(G) uses a double-width dividend, so we need to clear the even 2121 // register in the GR128 input. The instruction returns the remainder 2122 // in the even register and the quotient in the odd register. 2123 SDValue Ops[2]; 2124 if (is32Bit(VT)) 2125 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 2126 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2127 else 2128 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 2129 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 2130 return DAG.getMergeValues(Ops, DL); 2131 } 2132 2133 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 2134 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 2135 2136 // Get the known-zero masks for each operand. 2137 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 2138 APInt KnownZero[2], KnownOne[2]; 2139 DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]); 2140 DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]); 2141 2142 // See if the upper 32 bits of one operand and the lower 32 bits of the 2143 // other are known zero. They are the low and high operands respectively. 2144 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 2145 KnownZero[1].getZExtValue() }; 2146 unsigned High, Low; 2147 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 2148 High = 1, Low = 0; 2149 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 2150 High = 0, Low = 1; 2151 else 2152 return Op; 2153 2154 SDValue LowOp = Ops[Low]; 2155 SDValue HighOp = Ops[High]; 2156 2157 // If the high part is a constant, we're better off using IILH. 2158 if (HighOp.getOpcode() == ISD::Constant) 2159 return Op; 2160 2161 // If the low part is a constant that is outside the range of LHI, 2162 // then we're better off using IILF. 2163 if (LowOp.getOpcode() == ISD::Constant) { 2164 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 2165 if (!isInt<16>(Value)) 2166 return Op; 2167 } 2168 2169 // Check whether the high part is an AND that doesn't change the 2170 // high 32 bits and just masks out low bits. We can skip it if so. 2171 if (HighOp.getOpcode() == ISD::AND && 2172 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 2173 SDValue HighOp0 = HighOp.getOperand(0); 2174 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue(); 2175 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff)))) 2176 HighOp = HighOp0; 2177 } 2178 2179 // Take advantage of the fact that all GR32 operations only change the 2180 // low 32 bits by truncating Low to an i32 and inserting it directly 2181 // using a subreg. The interesting cases are those where the truncation 2182 // can be folded. 2183 SDLoc DL(Op); 2184 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 2185 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL, 2186 MVT::i64, HighOp, Low32); 2187 } 2188 2189 // Op is an atomic load. Lower it into a normal volatile load. 2190 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 2191 SelectionDAG &DAG) const { 2192 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2193 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(), 2194 Node->getChain(), Node->getBasePtr(), 2195 Node->getMemoryVT(), Node->getMemOperand()); 2196 } 2197 2198 // Op is an atomic store. Lower it into a normal volatile store followed 2199 // by a serialization. 2200 SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op, 2201 SelectionDAG &DAG) const { 2202 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2203 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(), 2204 Node->getBasePtr(), Node->getMemoryVT(), 2205 Node->getMemOperand()); 2206 return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other, 2207 Chain), 0); 2208 } 2209 2210 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 2211 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 2212 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op, 2213 SelectionDAG &DAG, 2214 unsigned Opcode) const { 2215 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2216 2217 // 32-bit operations need no code outside the main loop. 2218 EVT NarrowVT = Node->getMemoryVT(); 2219 EVT WideVT = MVT::i32; 2220 if (NarrowVT == WideVT) 2221 return Op; 2222 2223 int64_t BitSize = NarrowVT.getSizeInBits(); 2224 SDValue ChainIn = Node->getChain(); 2225 SDValue Addr = Node->getBasePtr(); 2226 SDValue Src2 = Node->getVal(); 2227 MachineMemOperand *MMO = Node->getMemOperand(); 2228 SDLoc DL(Node); 2229 EVT PtrVT = Addr.getValueType(); 2230 2231 // Convert atomic subtracts of constants into additions. 2232 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 2233 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) { 2234 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 2235 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 2236 } 2237 2238 // Get the address of the containing word. 2239 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2240 DAG.getConstant(-4, PtrVT)); 2241 2242 // Get the number of bits that the word must be rotated left in order 2243 // to bring the field to the top bits of a GR32. 2244 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2245 DAG.getConstant(3, PtrVT)); 2246 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2247 2248 // Get the complementing shift amount, for rotating a field in the top 2249 // bits back to its proper position. 2250 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2251 DAG.getConstant(0, WideVT), BitShift); 2252 2253 // Extend the source operand to 32 bits and prepare it for the inner loop. 2254 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 2255 // operations require the source to be shifted in advance. (This shift 2256 // can be folded if the source is constant.) For AND and NAND, the lower 2257 // bits must be set, while for other opcodes they should be left clear. 2258 if (Opcode != SystemZISD::ATOMIC_SWAPW) 2259 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 2260 DAG.getConstant(32 - BitSize, WideVT)); 2261 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 2262 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 2263 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 2264 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 2265 2266 // Construct the ATOMIC_LOADW_* node. 2267 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2268 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 2269 DAG.getConstant(BitSize, WideVT) }; 2270 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 2271 NarrowVT, MMO); 2272 2273 // Rotate the result of the final CS so that the field is in the lower 2274 // bits of a GR32, then truncate it. 2275 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 2276 DAG.getConstant(BitSize, WideVT)); 2277 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 2278 2279 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 2280 return DAG.getMergeValues(RetOps, DL); 2281 } 2282 2283 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations 2284 // into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit 2285 // operations into additions. 2286 SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op, 2287 SelectionDAG &DAG) const { 2288 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2289 EVT MemVT = Node->getMemoryVT(); 2290 if (MemVT == MVT::i32 || MemVT == MVT::i64) { 2291 // A full-width operation. 2292 assert(Op.getValueType() == MemVT && "Mismatched VTs"); 2293 SDValue Src2 = Node->getVal(); 2294 SDValue NegSrc2; 2295 SDLoc DL(Src2); 2296 2297 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) { 2298 // Use an addition if the operand is constant and either LAA(G) is 2299 // available or the negative value is in the range of A(G)FHI. 2300 int64_t Value = (-Op2->getAPIntValue()).getSExtValue(); 2301 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1()) 2302 NegSrc2 = DAG.getConstant(Value, MemVT); 2303 } else if (Subtarget.hasInterlockedAccess1()) 2304 // Use LAA(G) if available. 2305 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT), 2306 Src2); 2307 2308 if (NegSrc2.getNode()) 2309 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT, 2310 Node->getChain(), Node->getBasePtr(), NegSrc2, 2311 Node->getMemOperand(), Node->getOrdering(), 2312 Node->getSynchScope()); 2313 2314 // Use the node as-is. 2315 return Op; 2316 } 2317 2318 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 2319 } 2320 2321 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 2322 // into a fullword ATOMIC_CMP_SWAPW operation. 2323 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 2324 SelectionDAG &DAG) const { 2325 auto *Node = cast<AtomicSDNode>(Op.getNode()); 2326 2327 // We have native support for 32-bit compare and swap. 2328 EVT NarrowVT = Node->getMemoryVT(); 2329 EVT WideVT = MVT::i32; 2330 if (NarrowVT == WideVT) 2331 return Op; 2332 2333 int64_t BitSize = NarrowVT.getSizeInBits(); 2334 SDValue ChainIn = Node->getOperand(0); 2335 SDValue Addr = Node->getOperand(1); 2336 SDValue CmpVal = Node->getOperand(2); 2337 SDValue SwapVal = Node->getOperand(3); 2338 MachineMemOperand *MMO = Node->getMemOperand(); 2339 SDLoc DL(Node); 2340 EVT PtrVT = Addr.getValueType(); 2341 2342 // Get the address of the containing word. 2343 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 2344 DAG.getConstant(-4, PtrVT)); 2345 2346 // Get the number of bits that the word must be rotated left in order 2347 // to bring the field to the top bits of a GR32. 2348 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 2349 DAG.getConstant(3, PtrVT)); 2350 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 2351 2352 // Get the complementing shift amount, for rotating a field in the top 2353 // bits back to its proper position. 2354 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 2355 DAG.getConstant(0, WideVT), BitShift); 2356 2357 // Construct the ATOMIC_CMP_SWAPW node. 2358 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 2359 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 2360 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 2361 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 2362 VTList, Ops, NarrowVT, MMO); 2363 return AtomicOp; 2364 } 2365 2366 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 2367 SelectionDAG &DAG) const { 2368 MachineFunction &MF = DAG.getMachineFunction(); 2369 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2370 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 2371 SystemZ::R15D, Op.getValueType()); 2372 } 2373 2374 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 2375 SelectionDAG &DAG) const { 2376 MachineFunction &MF = DAG.getMachineFunction(); 2377 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 2378 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 2379 SystemZ::R15D, Op.getOperand(1)); 2380 } 2381 2382 SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op, 2383 SelectionDAG &DAG) const { 2384 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2385 if (!IsData) 2386 // Just preserve the chain. 2387 return Op.getOperand(0); 2388 2389 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2390 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ; 2391 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode()); 2392 SDValue Ops[] = { 2393 Op.getOperand(0), 2394 DAG.getConstant(Code, MVT::i32), 2395 Op.getOperand(1) 2396 }; 2397 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op), 2398 Node->getVTList(), Ops, 2399 Node->getMemoryVT(), Node->getMemOperand()); 2400 } 2401 2402 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 2403 SelectionDAG &DAG) const { 2404 switch (Op.getOpcode()) { 2405 case ISD::BR_CC: 2406 return lowerBR_CC(Op, DAG); 2407 case ISD::SELECT_CC: 2408 return lowerSELECT_CC(Op, DAG); 2409 case ISD::SETCC: 2410 return lowerSETCC(Op, DAG); 2411 case ISD::GlobalAddress: 2412 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 2413 case ISD::GlobalTLSAddress: 2414 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 2415 case ISD::BlockAddress: 2416 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 2417 case ISD::JumpTable: 2418 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 2419 case ISD::ConstantPool: 2420 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 2421 case ISD::BITCAST: 2422 return lowerBITCAST(Op, DAG); 2423 case ISD::VASTART: 2424 return lowerVASTART(Op, DAG); 2425 case ISD::VACOPY: 2426 return lowerVACOPY(Op, DAG); 2427 case ISD::DYNAMIC_STACKALLOC: 2428 return lowerDYNAMIC_STACKALLOC(Op, DAG); 2429 case ISD::SMUL_LOHI: 2430 return lowerSMUL_LOHI(Op, DAG); 2431 case ISD::UMUL_LOHI: 2432 return lowerUMUL_LOHI(Op, DAG); 2433 case ISD::SDIVREM: 2434 return lowerSDIVREM(Op, DAG); 2435 case ISD::UDIVREM: 2436 return lowerUDIVREM(Op, DAG); 2437 case ISD::OR: 2438 return lowerOR(Op, DAG); 2439 case ISD::ATOMIC_SWAP: 2440 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW); 2441 case ISD::ATOMIC_STORE: 2442 return lowerATOMIC_STORE(Op, DAG); 2443 case ISD::ATOMIC_LOAD: 2444 return lowerATOMIC_LOAD(Op, DAG); 2445 case ISD::ATOMIC_LOAD_ADD: 2446 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 2447 case ISD::ATOMIC_LOAD_SUB: 2448 return lowerATOMIC_LOAD_SUB(Op, DAG); 2449 case ISD::ATOMIC_LOAD_AND: 2450 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 2451 case ISD::ATOMIC_LOAD_OR: 2452 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 2453 case ISD::ATOMIC_LOAD_XOR: 2454 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 2455 case ISD::ATOMIC_LOAD_NAND: 2456 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 2457 case ISD::ATOMIC_LOAD_MIN: 2458 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 2459 case ISD::ATOMIC_LOAD_MAX: 2460 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 2461 case ISD::ATOMIC_LOAD_UMIN: 2462 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 2463 case ISD::ATOMIC_LOAD_UMAX: 2464 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 2465 case ISD::ATOMIC_CMP_SWAP: 2466 return lowerATOMIC_CMP_SWAP(Op, DAG); 2467 case ISD::STACKSAVE: 2468 return lowerSTACKSAVE(Op, DAG); 2469 case ISD::STACKRESTORE: 2470 return lowerSTACKRESTORE(Op, DAG); 2471 case ISD::PREFETCH: 2472 return lowerPREFETCH(Op, DAG); 2473 default: 2474 llvm_unreachable("Unexpected node to lower"); 2475 } 2476 } 2477 2478 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 2479 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 2480 switch (Opcode) { 2481 OPCODE(RET_FLAG); 2482 OPCODE(CALL); 2483 OPCODE(SIBCALL); 2484 OPCODE(PCREL_WRAPPER); 2485 OPCODE(PCREL_OFFSET); 2486 OPCODE(IABS); 2487 OPCODE(ICMP); 2488 OPCODE(FCMP); 2489 OPCODE(TM); 2490 OPCODE(BR_CCMASK); 2491 OPCODE(SELECT_CCMASK); 2492 OPCODE(ADJDYNALLOC); 2493 OPCODE(EXTRACT_ACCESS); 2494 OPCODE(UMUL_LOHI64); 2495 OPCODE(SDIVREM64); 2496 OPCODE(UDIVREM32); 2497 OPCODE(UDIVREM64); 2498 OPCODE(MVC); 2499 OPCODE(MVC_LOOP); 2500 OPCODE(NC); 2501 OPCODE(NC_LOOP); 2502 OPCODE(OC); 2503 OPCODE(OC_LOOP); 2504 OPCODE(XC); 2505 OPCODE(XC_LOOP); 2506 OPCODE(CLC); 2507 OPCODE(CLC_LOOP); 2508 OPCODE(STRCMP); 2509 OPCODE(STPCPY); 2510 OPCODE(SEARCH_STRING); 2511 OPCODE(IPM); 2512 OPCODE(SERIALIZE); 2513 OPCODE(ATOMIC_SWAPW); 2514 OPCODE(ATOMIC_LOADW_ADD); 2515 OPCODE(ATOMIC_LOADW_SUB); 2516 OPCODE(ATOMIC_LOADW_AND); 2517 OPCODE(ATOMIC_LOADW_OR); 2518 OPCODE(ATOMIC_LOADW_XOR); 2519 OPCODE(ATOMIC_LOADW_NAND); 2520 OPCODE(ATOMIC_LOADW_MIN); 2521 OPCODE(ATOMIC_LOADW_MAX); 2522 OPCODE(ATOMIC_LOADW_UMIN); 2523 OPCODE(ATOMIC_LOADW_UMAX); 2524 OPCODE(ATOMIC_CMP_SWAPW); 2525 OPCODE(PREFETCH); 2526 } 2527 return nullptr; 2528 #undef OPCODE 2529 } 2530 2531 SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N, 2532 DAGCombinerInfo &DCI) const { 2533 SelectionDAG &DAG = DCI.DAG; 2534 unsigned Opcode = N->getOpcode(); 2535 if (Opcode == ISD::SIGN_EXTEND) { 2536 // Convert (sext (ashr (shl X, C1), C2)) to 2537 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as 2538 // cheap as narrower ones. 2539 SDValue N0 = N->getOperand(0); 2540 EVT VT = N->getValueType(0); 2541 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) { 2542 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 2543 SDValue Inner = N0.getOperand(0); 2544 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) { 2545 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) { 2546 unsigned Extra = (VT.getSizeInBits() - 2547 N0.getValueType().getSizeInBits()); 2548 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; 2549 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; 2550 EVT ShiftVT = N0.getOperand(1).getValueType(); 2551 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, 2552 Inner.getOperand(0)); 2553 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, 2554 DAG.getConstant(NewShlAmt, ShiftVT)); 2555 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, 2556 DAG.getConstant(NewSraAmt, ShiftVT)); 2557 } 2558 } 2559 } 2560 } 2561 return SDValue(); 2562 } 2563 2564 //===----------------------------------------------------------------------===// 2565 // Custom insertion 2566 //===----------------------------------------------------------------------===// 2567 2568 // Create a new basic block after MBB. 2569 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 2570 MachineFunction &MF = *MBB->getParent(); 2571 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 2572 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB); 2573 return NewMBB; 2574 } 2575 2576 // Split MBB after MI and return the new block (the one that contains 2577 // instructions after MI). 2578 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 2579 MachineBasicBlock *MBB) { 2580 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2581 NewMBB->splice(NewMBB->begin(), MBB, 2582 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 2583 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2584 return NewMBB; 2585 } 2586 2587 // Split MBB before MI and return the new block (the one that contains MI). 2588 static MachineBasicBlock *splitBlockBefore(MachineInstr *MI, 2589 MachineBasicBlock *MBB) { 2590 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 2591 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end()); 2592 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 2593 return NewMBB; 2594 } 2595 2596 // Force base value Base into a register before MI. Return the register. 2597 static unsigned forceReg(MachineInstr *MI, MachineOperand &Base, 2598 const SystemZInstrInfo *TII) { 2599 if (Base.isReg()) 2600 return Base.getReg(); 2601 2602 MachineBasicBlock *MBB = MI->getParent(); 2603 MachineFunction &MF = *MBB->getParent(); 2604 MachineRegisterInfo &MRI = MF.getRegInfo(); 2605 2606 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 2607 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg) 2608 .addOperand(Base).addImm(0).addReg(0); 2609 return Reg; 2610 } 2611 2612 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 2613 MachineBasicBlock * 2614 SystemZTargetLowering::emitSelect(MachineInstr *MI, 2615 MachineBasicBlock *MBB) const { 2616 const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>( 2617 MBB->getParent()->getTarget().getInstrInfo()); 2618 2619 unsigned DestReg = MI->getOperand(0).getReg(); 2620 unsigned TrueReg = MI->getOperand(1).getReg(); 2621 unsigned FalseReg = MI->getOperand(2).getReg(); 2622 unsigned CCValid = MI->getOperand(3).getImm(); 2623 unsigned CCMask = MI->getOperand(4).getImm(); 2624 DebugLoc DL = MI->getDebugLoc(); 2625 2626 MachineBasicBlock *StartMBB = MBB; 2627 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2628 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2629 2630 // StartMBB: 2631 // BRC CCMask, JoinMBB 2632 // # fallthrough to FalseMBB 2633 MBB = StartMBB; 2634 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2635 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2636 MBB->addSuccessor(JoinMBB); 2637 MBB->addSuccessor(FalseMBB); 2638 2639 // FalseMBB: 2640 // # fallthrough to JoinMBB 2641 MBB = FalseMBB; 2642 MBB->addSuccessor(JoinMBB); 2643 2644 // JoinMBB: 2645 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 2646 // ... 2647 MBB = JoinMBB; 2648 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg) 2649 .addReg(TrueReg).addMBB(StartMBB) 2650 .addReg(FalseReg).addMBB(FalseMBB); 2651 2652 MI->eraseFromParent(); 2653 return JoinMBB; 2654 } 2655 2656 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 2657 // StoreOpcode is the store to use and Invert says whether the store should 2658 // happen when the condition is false rather than true. If a STORE ON 2659 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 2660 MachineBasicBlock * 2661 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 2662 MachineBasicBlock *MBB, 2663 unsigned StoreOpcode, unsigned STOCOpcode, 2664 bool Invert) const { 2665 const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>( 2666 MBB->getParent()->getTarget().getInstrInfo()); 2667 2668 unsigned SrcReg = MI->getOperand(0).getReg(); 2669 MachineOperand Base = MI->getOperand(1); 2670 int64_t Disp = MI->getOperand(2).getImm(); 2671 unsigned IndexReg = MI->getOperand(3).getReg(); 2672 unsigned CCValid = MI->getOperand(4).getImm(); 2673 unsigned CCMask = MI->getOperand(5).getImm(); 2674 DebugLoc DL = MI->getDebugLoc(); 2675 2676 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 2677 2678 // Use STOCOpcode if possible. We could use different store patterns in 2679 // order to avoid matching the index register, but the performance trade-offs 2680 // might be more complicated in that case. 2681 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) { 2682 if (Invert) 2683 CCMask ^= CCValid; 2684 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 2685 .addReg(SrcReg).addOperand(Base).addImm(Disp) 2686 .addImm(CCValid).addImm(CCMask); 2687 MI->eraseFromParent(); 2688 return MBB; 2689 } 2690 2691 // Get the condition needed to branch around the store. 2692 if (!Invert) 2693 CCMask ^= CCValid; 2694 2695 MachineBasicBlock *StartMBB = MBB; 2696 MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB); 2697 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 2698 2699 // StartMBB: 2700 // BRC CCMask, JoinMBB 2701 // # fallthrough to FalseMBB 2702 MBB = StartMBB; 2703 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2704 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 2705 MBB->addSuccessor(JoinMBB); 2706 MBB->addSuccessor(FalseMBB); 2707 2708 // FalseMBB: 2709 // store %SrcReg, %Disp(%Index,%Base) 2710 // # fallthrough to JoinMBB 2711 MBB = FalseMBB; 2712 BuildMI(MBB, DL, TII->get(StoreOpcode)) 2713 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 2714 MBB->addSuccessor(JoinMBB); 2715 2716 MI->eraseFromParent(); 2717 return JoinMBB; 2718 } 2719 2720 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 2721 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 2722 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 2723 // BitSize is the width of the field in bits, or 0 if this is a partword 2724 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 2725 // is one of the operands. Invert says whether the field should be 2726 // inverted after performing BinOpcode (e.g. for NAND). 2727 MachineBasicBlock * 2728 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 2729 MachineBasicBlock *MBB, 2730 unsigned BinOpcode, 2731 unsigned BitSize, 2732 bool Invert) const { 2733 MachineFunction &MF = *MBB->getParent(); 2734 const SystemZInstrInfo *TII = 2735 static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo()); 2736 MachineRegisterInfo &MRI = MF.getRegInfo(); 2737 bool IsSubWord = (BitSize < 32); 2738 2739 // Extract the operands. Base can be a register or a frame index. 2740 // Src2 can be a register or immediate. 2741 unsigned Dest = MI->getOperand(0).getReg(); 2742 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2743 int64_t Disp = MI->getOperand(2).getImm(); 2744 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 2745 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2746 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2747 DebugLoc DL = MI->getDebugLoc(); 2748 if (IsSubWord) 2749 BitSize = MI->getOperand(6).getImm(); 2750 2751 // Subword operations use 32-bit registers. 2752 const TargetRegisterClass *RC = (BitSize <= 32 ? 2753 &SystemZ::GR32BitRegClass : 2754 &SystemZ::GR64BitRegClass); 2755 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2756 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2757 2758 // Get the right opcodes for the displacement. 2759 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2760 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2761 assert(LOpcode && CSOpcode && "Displacement out of range"); 2762 2763 // Create virtual registers for temporary results. 2764 unsigned OrigVal = MRI.createVirtualRegister(RC); 2765 unsigned OldVal = MRI.createVirtualRegister(RC); 2766 unsigned NewVal = (BinOpcode || IsSubWord ? 2767 MRI.createVirtualRegister(RC) : Src2.getReg()); 2768 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2769 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2770 2771 // Insert a basic block for the main loop. 2772 MachineBasicBlock *StartMBB = MBB; 2773 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2774 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2775 2776 // StartMBB: 2777 // ... 2778 // %OrigVal = L Disp(%Base) 2779 // # fall through to LoopMMB 2780 MBB = StartMBB; 2781 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2782 .addOperand(Base).addImm(Disp).addReg(0); 2783 MBB->addSuccessor(LoopMBB); 2784 2785 // LoopMBB: 2786 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 2787 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2788 // %RotatedNewVal = OP %RotatedOldVal, %Src2 2789 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2790 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2791 // JNE LoopMBB 2792 // # fall through to DoneMMB 2793 MBB = LoopMBB; 2794 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2795 .addReg(OrigVal).addMBB(StartMBB) 2796 .addReg(Dest).addMBB(LoopMBB); 2797 if (IsSubWord) 2798 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2799 .addReg(OldVal).addReg(BitShift).addImm(0); 2800 if (Invert) { 2801 // Perform the operation normally and then invert every bit of the field. 2802 unsigned Tmp = MRI.createVirtualRegister(RC); 2803 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 2804 .addReg(RotatedOldVal).addOperand(Src2); 2805 if (BitSize < 32) 2806 // XILF with the upper BitSize bits set. 2807 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2808 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 2809 else if (BitSize == 32) 2810 // XILF with every bit set. 2811 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) 2812 .addReg(Tmp).addImm(~uint32_t(0)); 2813 else { 2814 // Use LCGR and add -1 to the result, which is more compact than 2815 // an XILF, XILH pair. 2816 unsigned Tmp2 = MRI.createVirtualRegister(RC); 2817 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 2818 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 2819 .addReg(Tmp2).addImm(-1); 2820 } 2821 } else if (BinOpcode) 2822 // A simply binary operation. 2823 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 2824 .addReg(RotatedOldVal).addOperand(Src2); 2825 else if (IsSubWord) 2826 // Use RISBG to rotate Src2 into position and use it to replace the 2827 // field in RotatedOldVal. 2828 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 2829 .addReg(RotatedOldVal).addReg(Src2.getReg()) 2830 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 2831 if (IsSubWord) 2832 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2833 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2834 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2835 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2836 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2837 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2838 MBB->addSuccessor(LoopMBB); 2839 MBB->addSuccessor(DoneMBB); 2840 2841 MI->eraseFromParent(); 2842 return DoneMBB; 2843 } 2844 2845 // Implement EmitInstrWithCustomInserter for pseudo 2846 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 2847 // instruction that should be used to compare the current field with the 2848 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 2849 // for when the current field should be kept. BitSize is the width of 2850 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 2851 MachineBasicBlock * 2852 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 2853 MachineBasicBlock *MBB, 2854 unsigned CompareOpcode, 2855 unsigned KeepOldMask, 2856 unsigned BitSize) const { 2857 MachineFunction &MF = *MBB->getParent(); 2858 const SystemZInstrInfo *TII = 2859 static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo()); 2860 MachineRegisterInfo &MRI = MF.getRegInfo(); 2861 bool IsSubWord = (BitSize < 32); 2862 2863 // Extract the operands. Base can be a register or a frame index. 2864 unsigned Dest = MI->getOperand(0).getReg(); 2865 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2866 int64_t Disp = MI->getOperand(2).getImm(); 2867 unsigned Src2 = MI->getOperand(3).getReg(); 2868 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 2869 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 2870 DebugLoc DL = MI->getDebugLoc(); 2871 if (IsSubWord) 2872 BitSize = MI->getOperand(6).getImm(); 2873 2874 // Subword operations use 32-bit registers. 2875 const TargetRegisterClass *RC = (BitSize <= 32 ? 2876 &SystemZ::GR32BitRegClass : 2877 &SystemZ::GR64BitRegClass); 2878 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2879 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2880 2881 // Get the right opcodes for the displacement. 2882 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2883 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2884 assert(LOpcode && CSOpcode && "Displacement out of range"); 2885 2886 // Create virtual registers for temporary results. 2887 unsigned OrigVal = MRI.createVirtualRegister(RC); 2888 unsigned OldVal = MRI.createVirtualRegister(RC); 2889 unsigned NewVal = MRI.createVirtualRegister(RC); 2890 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2891 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2892 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2893 2894 // Insert 3 basic blocks for the loop. 2895 MachineBasicBlock *StartMBB = MBB; 2896 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 2897 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2898 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 2899 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 2900 2901 // StartMBB: 2902 // ... 2903 // %OrigVal = L Disp(%Base) 2904 // # fall through to LoopMMB 2905 MBB = StartMBB; 2906 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2907 .addOperand(Base).addImm(Disp).addReg(0); 2908 MBB->addSuccessor(LoopMBB); 2909 2910 // LoopMBB: 2911 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 2912 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2913 // CompareOpcode %RotatedOldVal, %Src2 2914 // BRC KeepOldMask, UpdateMBB 2915 MBB = LoopMBB; 2916 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2917 .addReg(OrigVal).addMBB(StartMBB) 2918 .addReg(Dest).addMBB(UpdateMBB); 2919 if (IsSubWord) 2920 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2921 .addReg(OldVal).addReg(BitShift).addImm(0); 2922 BuildMI(MBB, DL, TII->get(CompareOpcode)) 2923 .addReg(RotatedOldVal).addReg(Src2); 2924 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2925 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 2926 MBB->addSuccessor(UpdateMBB); 2927 MBB->addSuccessor(UseAltMBB); 2928 2929 // UseAltMBB: 2930 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 2931 // # fall through to UpdateMMB 2932 MBB = UseAltMBB; 2933 if (IsSubWord) 2934 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 2935 .addReg(RotatedOldVal).addReg(Src2) 2936 .addImm(32).addImm(31 + BitSize).addImm(0); 2937 MBB->addSuccessor(UpdateMBB); 2938 2939 // UpdateMBB: 2940 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 2941 // [ %RotatedAltVal, UseAltMBB ] 2942 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2943 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2944 // JNE LoopMBB 2945 // # fall through to DoneMMB 2946 MBB = UpdateMBB; 2947 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 2948 .addReg(RotatedOldVal).addMBB(LoopMBB) 2949 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2950 if (IsSubWord) 2951 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2952 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2953 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2954 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2955 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2956 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2957 MBB->addSuccessor(LoopMBB); 2958 MBB->addSuccessor(DoneMBB); 2959 2960 MI->eraseFromParent(); 2961 return DoneMBB; 2962 } 2963 2964 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2965 // instruction MI. 2966 MachineBasicBlock * 2967 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2968 MachineBasicBlock *MBB) const { 2969 MachineFunction &MF = *MBB->getParent(); 2970 const SystemZInstrInfo *TII = 2971 static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo()); 2972 MachineRegisterInfo &MRI = MF.getRegInfo(); 2973 2974 // Extract the operands. Base can be a register or a frame index. 2975 unsigned Dest = MI->getOperand(0).getReg(); 2976 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2977 int64_t Disp = MI->getOperand(2).getImm(); 2978 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2979 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2980 unsigned BitShift = MI->getOperand(5).getReg(); 2981 unsigned NegBitShift = MI->getOperand(6).getReg(); 2982 int64_t BitSize = MI->getOperand(7).getImm(); 2983 DebugLoc DL = MI->getDebugLoc(); 2984 2985 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2986 2987 // Get the right opcodes for the displacement. 2988 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2989 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2990 assert(LOpcode && CSOpcode && "Displacement out of range"); 2991 2992 // Create virtual registers for temporary results. 2993 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2994 unsigned OldVal = MRI.createVirtualRegister(RC); 2995 unsigned CmpVal = MRI.createVirtualRegister(RC); 2996 unsigned SwapVal = MRI.createVirtualRegister(RC); 2997 unsigned StoreVal = MRI.createVirtualRegister(RC); 2998 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2999 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 3000 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 3001 3002 // Insert 2 basic blocks for the loop. 3003 MachineBasicBlock *StartMBB = MBB; 3004 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3005 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3006 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 3007 3008 // StartMBB: 3009 // ... 3010 // %OrigOldVal = L Disp(%Base) 3011 // # fall through to LoopMMB 3012 MBB = StartMBB; 3013 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 3014 .addOperand(Base).addImm(Disp).addReg(0); 3015 MBB->addSuccessor(LoopMBB); 3016 3017 // LoopMBB: 3018 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 3019 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 3020 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 3021 // %Dest = RLL %OldVal, BitSize(%BitShift) 3022 // ^^ The low BitSize bits contain the field 3023 // of interest. 3024 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 3025 // ^^ Replace the upper 32-BitSize bits of the 3026 // comparison value with those that we loaded, 3027 // so that we can use a full word comparison. 3028 // CR %Dest, %RetryCmpVal 3029 // JNE DoneMBB 3030 // # Fall through to SetMBB 3031 MBB = LoopMBB; 3032 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 3033 .addReg(OrigOldVal).addMBB(StartMBB) 3034 .addReg(RetryOldVal).addMBB(SetMBB); 3035 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 3036 .addReg(OrigCmpVal).addMBB(StartMBB) 3037 .addReg(RetryCmpVal).addMBB(SetMBB); 3038 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 3039 .addReg(OrigSwapVal).addMBB(StartMBB) 3040 .addReg(RetrySwapVal).addMBB(SetMBB); 3041 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 3042 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 3043 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 3044 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 3045 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 3046 .addReg(Dest).addReg(RetryCmpVal); 3047 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3048 .addImm(SystemZ::CCMASK_ICMP) 3049 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 3050 MBB->addSuccessor(DoneMBB); 3051 MBB->addSuccessor(SetMBB); 3052 3053 // SetMBB: 3054 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 3055 // ^^ Replace the upper 32-BitSize bits of the new 3056 // value with those that we loaded. 3057 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 3058 // ^^ Rotate the new field to its proper position. 3059 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 3060 // JNE LoopMBB 3061 // # fall through to ExitMMB 3062 MBB = SetMBB; 3063 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 3064 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 3065 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 3066 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 3067 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 3068 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 3069 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3070 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 3071 MBB->addSuccessor(LoopMBB); 3072 MBB->addSuccessor(DoneMBB); 3073 3074 MI->eraseFromParent(); 3075 return DoneMBB; 3076 } 3077 3078 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 3079 // if the high register of the GR128 value must be cleared or false if 3080 // it's "don't care". SubReg is subreg_l32 when extending a GR32 3081 // and subreg_l64 when extending a GR64. 3082 MachineBasicBlock * 3083 SystemZTargetLowering::emitExt128(MachineInstr *MI, 3084 MachineBasicBlock *MBB, 3085 bool ClearEven, unsigned SubReg) const { 3086 MachineFunction &MF = *MBB->getParent(); 3087 const SystemZInstrInfo *TII = 3088 static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo()); 3089 MachineRegisterInfo &MRI = MF.getRegInfo(); 3090 DebugLoc DL = MI->getDebugLoc(); 3091 3092 unsigned Dest = MI->getOperand(0).getReg(); 3093 unsigned Src = MI->getOperand(1).getReg(); 3094 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 3095 3096 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 3097 if (ClearEven) { 3098 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 3099 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 3100 3101 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 3102 .addImm(0); 3103 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 3104 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64); 3105 In128 = NewIn128; 3106 } 3107 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 3108 .addReg(In128).addReg(Src).addImm(SubReg); 3109 3110 MI->eraseFromParent(); 3111 return MBB; 3112 } 3113 3114 MachineBasicBlock * 3115 SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI, 3116 MachineBasicBlock *MBB, 3117 unsigned Opcode) const { 3118 MachineFunction &MF = *MBB->getParent(); 3119 const SystemZInstrInfo *TII = 3120 static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo()); 3121 MachineRegisterInfo &MRI = MF.getRegInfo(); 3122 DebugLoc DL = MI->getDebugLoc(); 3123 3124 MachineOperand DestBase = earlyUseOperand(MI->getOperand(0)); 3125 uint64_t DestDisp = MI->getOperand(1).getImm(); 3126 MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2)); 3127 uint64_t SrcDisp = MI->getOperand(3).getImm(); 3128 uint64_t Length = MI->getOperand(4).getImm(); 3129 3130 // When generating more than one CLC, all but the last will need to 3131 // branch to the end when a difference is found. 3132 MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ? 3133 splitBlockAfter(MI, MBB) : nullptr); 3134 3135 // Check for the loop form, in which operand 5 is the trip count. 3136 if (MI->getNumExplicitOperands() > 5) { 3137 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); 3138 3139 uint64_t StartCountReg = MI->getOperand(5).getReg(); 3140 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); 3141 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : 3142 forceReg(MI, DestBase, TII)); 3143 3144 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; 3145 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); 3146 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : 3147 MRI.createVirtualRegister(RC)); 3148 uint64_t NextSrcReg = MRI.createVirtualRegister(RC); 3149 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : 3150 MRI.createVirtualRegister(RC)); 3151 3152 RC = &SystemZ::GR64BitRegClass; 3153 uint64_t ThisCountReg = MRI.createVirtualRegister(RC); 3154 uint64_t NextCountReg = MRI.createVirtualRegister(RC); 3155 3156 MachineBasicBlock *StartMBB = MBB; 3157 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3158 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3159 MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB); 3160 3161 // StartMBB: 3162 // # fall through to LoopMMB 3163 MBB->addSuccessor(LoopMBB); 3164 3165 // LoopMBB: 3166 // %ThisDestReg = phi [ %StartDestReg, StartMBB ], 3167 // [ %NextDestReg, NextMBB ] 3168 // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ], 3169 // [ %NextSrcReg, NextMBB ] 3170 // %ThisCountReg = phi [ %StartCountReg, StartMBB ], 3171 // [ %NextCountReg, NextMBB ] 3172 // ( PFD 2, 768+DestDisp(%ThisDestReg) ) 3173 // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg) 3174 // ( JLH EndMBB ) 3175 // 3176 // The prefetch is used only for MVC. The JLH is used only for CLC. 3177 MBB = LoopMBB; 3178 3179 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) 3180 .addReg(StartDestReg).addMBB(StartMBB) 3181 .addReg(NextDestReg).addMBB(NextMBB); 3182 if (!HaveSingleBase) 3183 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) 3184 .addReg(StartSrcReg).addMBB(StartMBB) 3185 .addReg(NextSrcReg).addMBB(NextMBB); 3186 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) 3187 .addReg(StartCountReg).addMBB(StartMBB) 3188 .addReg(NextCountReg).addMBB(NextMBB); 3189 if (Opcode == SystemZ::MVC) 3190 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) 3191 .addImm(SystemZ::PFD_WRITE) 3192 .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0); 3193 BuildMI(MBB, DL, TII->get(Opcode)) 3194 .addReg(ThisDestReg).addImm(DestDisp).addImm(256) 3195 .addReg(ThisSrcReg).addImm(SrcDisp); 3196 if (EndMBB) { 3197 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3198 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3199 .addMBB(EndMBB); 3200 MBB->addSuccessor(EndMBB); 3201 MBB->addSuccessor(NextMBB); 3202 } 3203 3204 // NextMBB: 3205 // %NextDestReg = LA 256(%ThisDestReg) 3206 // %NextSrcReg = LA 256(%ThisSrcReg) 3207 // %NextCountReg = AGHI %ThisCountReg, -1 3208 // CGHI %NextCountReg, 0 3209 // JLH LoopMBB 3210 // # fall through to DoneMMB 3211 // 3212 // The AGHI, CGHI and JLH should be converted to BRCTG by later passes. 3213 MBB = NextMBB; 3214 3215 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) 3216 .addReg(ThisDestReg).addImm(256).addReg(0); 3217 if (!HaveSingleBase) 3218 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) 3219 .addReg(ThisSrcReg).addImm(256).addReg(0); 3220 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) 3221 .addReg(ThisCountReg).addImm(-1); 3222 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) 3223 .addReg(NextCountReg).addImm(0); 3224 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3225 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3226 .addMBB(LoopMBB); 3227 MBB->addSuccessor(LoopMBB); 3228 MBB->addSuccessor(DoneMBB); 3229 3230 DestBase = MachineOperand::CreateReg(NextDestReg, false); 3231 SrcBase = MachineOperand::CreateReg(NextSrcReg, false); 3232 Length &= 255; 3233 MBB = DoneMBB; 3234 } 3235 // Handle any remaining bytes with straight-line code. 3236 while (Length > 0) { 3237 uint64_t ThisLength = std::min(Length, uint64_t(256)); 3238 // The previous iteration might have created out-of-range displacements. 3239 // Apply them using LAY if so. 3240 if (!isUInt<12>(DestDisp)) { 3241 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3242 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3243 .addOperand(DestBase).addImm(DestDisp).addReg(0); 3244 DestBase = MachineOperand::CreateReg(Reg, false); 3245 DestDisp = 0; 3246 } 3247 if (!isUInt<12>(SrcDisp)) { 3248 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); 3249 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg) 3250 .addOperand(SrcBase).addImm(SrcDisp).addReg(0); 3251 SrcBase = MachineOperand::CreateReg(Reg, false); 3252 SrcDisp = 0; 3253 } 3254 BuildMI(*MBB, MI, DL, TII->get(Opcode)) 3255 .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) 3256 .addOperand(SrcBase).addImm(SrcDisp); 3257 DestDisp += ThisLength; 3258 SrcDisp += ThisLength; 3259 Length -= ThisLength; 3260 // If there's another CLC to go, branch to the end if a difference 3261 // was found. 3262 if (EndMBB && Length > 0) { 3263 MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB); 3264 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3265 .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE) 3266 .addMBB(EndMBB); 3267 MBB->addSuccessor(EndMBB); 3268 MBB->addSuccessor(NextMBB); 3269 MBB = NextMBB; 3270 } 3271 } 3272 if (EndMBB) { 3273 MBB->addSuccessor(EndMBB); 3274 MBB = EndMBB; 3275 MBB->addLiveIn(SystemZ::CC); 3276 } 3277 3278 MI->eraseFromParent(); 3279 return MBB; 3280 } 3281 3282 // Decompose string pseudo-instruction MI into a loop that continually performs 3283 // Opcode until CC != 3. 3284 MachineBasicBlock * 3285 SystemZTargetLowering::emitStringWrapper(MachineInstr *MI, 3286 MachineBasicBlock *MBB, 3287 unsigned Opcode) const { 3288 MachineFunction &MF = *MBB->getParent(); 3289 const SystemZInstrInfo *TII = 3290 static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo()); 3291 MachineRegisterInfo &MRI = MF.getRegInfo(); 3292 DebugLoc DL = MI->getDebugLoc(); 3293 3294 uint64_t End1Reg = MI->getOperand(0).getReg(); 3295 uint64_t Start1Reg = MI->getOperand(1).getReg(); 3296 uint64_t Start2Reg = MI->getOperand(2).getReg(); 3297 uint64_t CharReg = MI->getOperand(3).getReg(); 3298 3299 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass; 3300 uint64_t This1Reg = MRI.createVirtualRegister(RC); 3301 uint64_t This2Reg = MRI.createVirtualRegister(RC); 3302 uint64_t End2Reg = MRI.createVirtualRegister(RC); 3303 3304 MachineBasicBlock *StartMBB = MBB; 3305 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); 3306 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 3307 3308 // StartMBB: 3309 // # fall through to LoopMMB 3310 MBB->addSuccessor(LoopMBB); 3311 3312 // LoopMBB: 3313 // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ] 3314 // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ] 3315 // R0L = %CharReg 3316 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L 3317 // JO LoopMBB 3318 // # fall through to DoneMMB 3319 // 3320 // The load of R0L can be hoisted by post-RA LICM. 3321 MBB = LoopMBB; 3322 3323 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) 3324 .addReg(Start1Reg).addMBB(StartMBB) 3325 .addReg(End1Reg).addMBB(LoopMBB); 3326 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) 3327 .addReg(Start2Reg).addMBB(StartMBB) 3328 .addReg(End2Reg).addMBB(LoopMBB); 3329 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); 3330 BuildMI(MBB, DL, TII->get(Opcode)) 3331 .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define) 3332 .addReg(This1Reg).addReg(This2Reg); 3333 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 3334 .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB); 3335 MBB->addSuccessor(LoopMBB); 3336 MBB->addSuccessor(DoneMBB); 3337 3338 DoneMBB->addLiveIn(SystemZ::CC); 3339 3340 MI->eraseFromParent(); 3341 return DoneMBB; 3342 } 3343 3344 MachineBasicBlock *SystemZTargetLowering:: 3345 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 3346 switch (MI->getOpcode()) { 3347 case SystemZ::Select32Mux: 3348 case SystemZ::Select32: 3349 case SystemZ::SelectF32: 3350 case SystemZ::Select64: 3351 case SystemZ::SelectF64: 3352 case SystemZ::SelectF128: 3353 return emitSelect(MI, MBB); 3354 3355 case SystemZ::CondStore8Mux: 3356 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false); 3357 case SystemZ::CondStore8MuxInv: 3358 return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true); 3359 case SystemZ::CondStore16Mux: 3360 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false); 3361 case SystemZ::CondStore16MuxInv: 3362 return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true); 3363 case SystemZ::CondStore8: 3364 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 3365 case SystemZ::CondStore8Inv: 3366 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 3367 case SystemZ::CondStore16: 3368 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 3369 case SystemZ::CondStore16Inv: 3370 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 3371 case SystemZ::CondStore32: 3372 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 3373 case SystemZ::CondStore32Inv: 3374 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 3375 case SystemZ::CondStore64: 3376 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 3377 case SystemZ::CondStore64Inv: 3378 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 3379 case SystemZ::CondStoreF32: 3380 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 3381 case SystemZ::CondStoreF32Inv: 3382 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 3383 case SystemZ::CondStoreF64: 3384 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 3385 case SystemZ::CondStoreF64Inv: 3386 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 3387 3388 case SystemZ::AEXT128_64: 3389 return emitExt128(MI, MBB, false, SystemZ::subreg_l64); 3390 case SystemZ::ZEXT128_32: 3391 return emitExt128(MI, MBB, true, SystemZ::subreg_l32); 3392 case SystemZ::ZEXT128_64: 3393 return emitExt128(MI, MBB, true, SystemZ::subreg_l64); 3394 3395 case SystemZ::ATOMIC_SWAPW: 3396 return emitAtomicLoadBinary(MI, MBB, 0, 0); 3397 case SystemZ::ATOMIC_SWAP_32: 3398 return emitAtomicLoadBinary(MI, MBB, 0, 32); 3399 case SystemZ::ATOMIC_SWAP_64: 3400 return emitAtomicLoadBinary(MI, MBB, 0, 64); 3401 3402 case SystemZ::ATOMIC_LOADW_AR: 3403 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 3404 case SystemZ::ATOMIC_LOADW_AFI: 3405 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 3406 case SystemZ::ATOMIC_LOAD_AR: 3407 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 3408 case SystemZ::ATOMIC_LOAD_AHI: 3409 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 3410 case SystemZ::ATOMIC_LOAD_AFI: 3411 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 3412 case SystemZ::ATOMIC_LOAD_AGR: 3413 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 3414 case SystemZ::ATOMIC_LOAD_AGHI: 3415 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 3416 case SystemZ::ATOMIC_LOAD_AGFI: 3417 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 3418 3419 case SystemZ::ATOMIC_LOADW_SR: 3420 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 3421 case SystemZ::ATOMIC_LOAD_SR: 3422 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 3423 case SystemZ::ATOMIC_LOAD_SGR: 3424 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 3425 3426 case SystemZ::ATOMIC_LOADW_NR: 3427 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 3428 case SystemZ::ATOMIC_LOADW_NILH: 3429 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0); 3430 case SystemZ::ATOMIC_LOAD_NR: 3431 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 3432 case SystemZ::ATOMIC_LOAD_NILL: 3433 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32); 3434 case SystemZ::ATOMIC_LOAD_NILH: 3435 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32); 3436 case SystemZ::ATOMIC_LOAD_NILF: 3437 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32); 3438 case SystemZ::ATOMIC_LOAD_NGR: 3439 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 3440 case SystemZ::ATOMIC_LOAD_NILL64: 3441 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64); 3442 case SystemZ::ATOMIC_LOAD_NILH64: 3443 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64); 3444 case SystemZ::ATOMIC_LOAD_NIHL64: 3445 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64); 3446 case SystemZ::ATOMIC_LOAD_NIHH64: 3447 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64); 3448 case SystemZ::ATOMIC_LOAD_NILF64: 3449 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64); 3450 case SystemZ::ATOMIC_LOAD_NIHF64: 3451 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64); 3452 3453 case SystemZ::ATOMIC_LOADW_OR: 3454 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 3455 case SystemZ::ATOMIC_LOADW_OILH: 3456 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0); 3457 case SystemZ::ATOMIC_LOAD_OR: 3458 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 3459 case SystemZ::ATOMIC_LOAD_OILL: 3460 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32); 3461 case SystemZ::ATOMIC_LOAD_OILH: 3462 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32); 3463 case SystemZ::ATOMIC_LOAD_OILF: 3464 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32); 3465 case SystemZ::ATOMIC_LOAD_OGR: 3466 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 3467 case SystemZ::ATOMIC_LOAD_OILL64: 3468 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64); 3469 case SystemZ::ATOMIC_LOAD_OILH64: 3470 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64); 3471 case SystemZ::ATOMIC_LOAD_OIHL64: 3472 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64); 3473 case SystemZ::ATOMIC_LOAD_OIHH64: 3474 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64); 3475 case SystemZ::ATOMIC_LOAD_OILF64: 3476 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64); 3477 case SystemZ::ATOMIC_LOAD_OIHF64: 3478 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64); 3479 3480 case SystemZ::ATOMIC_LOADW_XR: 3481 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 3482 case SystemZ::ATOMIC_LOADW_XILF: 3483 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0); 3484 case SystemZ::ATOMIC_LOAD_XR: 3485 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 3486 case SystemZ::ATOMIC_LOAD_XILF: 3487 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32); 3488 case SystemZ::ATOMIC_LOAD_XGR: 3489 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 3490 case SystemZ::ATOMIC_LOAD_XILF64: 3491 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64); 3492 case SystemZ::ATOMIC_LOAD_XIHF64: 3493 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64); 3494 3495 case SystemZ::ATOMIC_LOADW_NRi: 3496 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 3497 case SystemZ::ATOMIC_LOADW_NILHi: 3498 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true); 3499 case SystemZ::ATOMIC_LOAD_NRi: 3500 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 3501 case SystemZ::ATOMIC_LOAD_NILLi: 3502 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true); 3503 case SystemZ::ATOMIC_LOAD_NILHi: 3504 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true); 3505 case SystemZ::ATOMIC_LOAD_NILFi: 3506 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true); 3507 case SystemZ::ATOMIC_LOAD_NGRi: 3508 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 3509 case SystemZ::ATOMIC_LOAD_NILL64i: 3510 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true); 3511 case SystemZ::ATOMIC_LOAD_NILH64i: 3512 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true); 3513 case SystemZ::ATOMIC_LOAD_NIHL64i: 3514 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true); 3515 case SystemZ::ATOMIC_LOAD_NIHH64i: 3516 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true); 3517 case SystemZ::ATOMIC_LOAD_NILF64i: 3518 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true); 3519 case SystemZ::ATOMIC_LOAD_NIHF64i: 3520 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true); 3521 3522 case SystemZ::ATOMIC_LOADW_MIN: 3523 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3524 SystemZ::CCMASK_CMP_LE, 0); 3525 case SystemZ::ATOMIC_LOAD_MIN_32: 3526 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3527 SystemZ::CCMASK_CMP_LE, 32); 3528 case SystemZ::ATOMIC_LOAD_MIN_64: 3529 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3530 SystemZ::CCMASK_CMP_LE, 64); 3531 3532 case SystemZ::ATOMIC_LOADW_MAX: 3533 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3534 SystemZ::CCMASK_CMP_GE, 0); 3535 case SystemZ::ATOMIC_LOAD_MAX_32: 3536 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 3537 SystemZ::CCMASK_CMP_GE, 32); 3538 case SystemZ::ATOMIC_LOAD_MAX_64: 3539 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 3540 SystemZ::CCMASK_CMP_GE, 64); 3541 3542 case SystemZ::ATOMIC_LOADW_UMIN: 3543 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3544 SystemZ::CCMASK_CMP_LE, 0); 3545 case SystemZ::ATOMIC_LOAD_UMIN_32: 3546 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3547 SystemZ::CCMASK_CMP_LE, 32); 3548 case SystemZ::ATOMIC_LOAD_UMIN_64: 3549 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3550 SystemZ::CCMASK_CMP_LE, 64); 3551 3552 case SystemZ::ATOMIC_LOADW_UMAX: 3553 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3554 SystemZ::CCMASK_CMP_GE, 0); 3555 case SystemZ::ATOMIC_LOAD_UMAX_32: 3556 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 3557 SystemZ::CCMASK_CMP_GE, 32); 3558 case SystemZ::ATOMIC_LOAD_UMAX_64: 3559 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 3560 SystemZ::CCMASK_CMP_GE, 64); 3561 3562 case SystemZ::ATOMIC_CMP_SWAPW: 3563 return emitAtomicCmpSwapW(MI, MBB); 3564 case SystemZ::MVCSequence: 3565 case SystemZ::MVCLoop: 3566 return emitMemMemWrapper(MI, MBB, SystemZ::MVC); 3567 case SystemZ::NCSequence: 3568 case SystemZ::NCLoop: 3569 return emitMemMemWrapper(MI, MBB, SystemZ::NC); 3570 case SystemZ::OCSequence: 3571 case SystemZ::OCLoop: 3572 return emitMemMemWrapper(MI, MBB, SystemZ::OC); 3573 case SystemZ::XCSequence: 3574 case SystemZ::XCLoop: 3575 return emitMemMemWrapper(MI, MBB, SystemZ::XC); 3576 case SystemZ::CLCSequence: 3577 case SystemZ::CLCLoop: 3578 return emitMemMemWrapper(MI, MBB, SystemZ::CLC); 3579 case SystemZ::CLSTLoop: 3580 return emitStringWrapper(MI, MBB, SystemZ::CLST); 3581 case SystemZ::MVSTLoop: 3582 return emitStringWrapper(MI, MBB, SystemZ::MVST); 3583 case SystemZ::SRSTLoop: 3584 return emitStringWrapper(MI, MBB, SystemZ::SRST); 3585 default: 3586 llvm_unreachable("Unexpected instr type to insert"); 3587 } 3588 } 3589