1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the SystemZTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "systemz-lower" 15 16 #include "SystemZISelLowering.h" 17 #include "SystemZCallingConv.h" 18 #include "SystemZConstantPoolValue.h" 19 #include "SystemZMachineFunctionInfo.h" 20 #include "SystemZTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 26 using namespace llvm; 27 28 // Classify VT as either 32 or 64 bit. 29 static bool is32Bit(EVT VT) { 30 switch (VT.getSimpleVT().SimpleTy) { 31 case MVT::i32: 32 return true; 33 case MVT::i64: 34 return false; 35 default: 36 llvm_unreachable("Unsupported type"); 37 } 38 } 39 40 // Return a version of MachineOperand that can be safely used before the 41 // final use. 42 static MachineOperand earlyUseOperand(MachineOperand Op) { 43 if (Op.isReg()) 44 Op.setIsKill(false); 45 return Op; 46 } 47 48 SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) 49 : TargetLowering(tm, new TargetLoweringObjectFileELF()), 50 Subtarget(*tm.getSubtargetImpl()), TM(tm) { 51 MVT PtrVT = getPointerTy(); 52 53 // Set up the register classes. 54 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass); 55 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass); 56 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass); 57 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass); 58 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass); 59 60 // Compute derived properties from the register classes 61 computeRegisterProperties(); 62 63 // Set up special registers. 64 setExceptionPointerRegister(SystemZ::R6D); 65 setExceptionSelectorRegister(SystemZ::R7D); 66 setStackPointerRegisterToSaveRestore(SystemZ::R15D); 67 68 // TODO: It may be better to default to latency-oriented scheduling, however 69 // LLVM's current latency-oriented scheduler can't handle physreg definitions 70 // such as SystemZ has with CC, so set this to the register-pressure 71 // scheduler, because it can. 72 setSchedulingPreference(Sched::RegPressure); 73 74 setBooleanContents(ZeroOrOneBooleanContent); 75 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 76 77 // Instructions are strings of 2-byte aligned 2-byte values. 78 setMinFunctionAlignment(2); 79 80 // Handle operations that are handled in a similar way for all types. 81 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 82 I <= MVT::LAST_FP_VALUETYPE; 83 ++I) { 84 MVT VT = MVT::SimpleValueType(I); 85 if (isTypeLegal(VT)) { 86 // Expand SETCC(X, Y, COND) into SELECT_CC(X, Y, 1, 0, COND). 87 setOperationAction(ISD::SETCC, VT, Expand); 88 89 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE). 90 setOperationAction(ISD::SELECT, VT, Expand); 91 92 // Lower SELECT_CC and BR_CC into separate comparisons and branches. 93 setOperationAction(ISD::SELECT_CC, VT, Custom); 94 setOperationAction(ISD::BR_CC, VT, Custom); 95 } 96 } 97 98 // Expand jump table branches as address arithmetic followed by an 99 // indirect jump. 100 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 101 102 // Expand BRCOND into a BR_CC (see above). 103 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 104 105 // Handle integer types. 106 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; 107 I <= MVT::LAST_INTEGER_VALUETYPE; 108 ++I) { 109 MVT VT = MVT::SimpleValueType(I); 110 if (isTypeLegal(VT)) { 111 // Expand individual DIV and REMs into DIVREMs. 112 setOperationAction(ISD::SDIV, VT, Expand); 113 setOperationAction(ISD::UDIV, VT, Expand); 114 setOperationAction(ISD::SREM, VT, Expand); 115 setOperationAction(ISD::UREM, VT, Expand); 116 setOperationAction(ISD::SDIVREM, VT, Custom); 117 setOperationAction(ISD::UDIVREM, VT, Custom); 118 119 // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP. 120 // FIXME: probably much too conservative. 121 setOperationAction(ISD::ATOMIC_LOAD, VT, Expand); 122 setOperationAction(ISD::ATOMIC_STORE, VT, Expand); 123 124 // No special instructions for these. 125 setOperationAction(ISD::CTPOP, VT, Expand); 126 setOperationAction(ISD::CTTZ, VT, Expand); 127 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 128 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 129 setOperationAction(ISD::ROTR, VT, Expand); 130 131 // Use *MUL_LOHI where possible and a wider multiplication otherwise. 132 setOperationAction(ISD::MULHS, VT, Expand); 133 setOperationAction(ISD::MULHU, VT, Expand); 134 135 // We have instructions for signed but not unsigned FP conversion. 136 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 137 } 138 } 139 140 // Type legalization will convert 8- and 16-bit atomic operations into 141 // forms that operate on i32s (but still keeping the original memory VT). 142 // Lower them into full i32 operations. 143 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom); 144 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom); 145 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 146 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 147 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom); 148 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom); 149 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom); 150 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom); 151 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom); 152 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom); 153 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom); 154 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 155 156 // We have instructions for signed but not unsigned FP conversion. 157 // Handle unsigned 32-bit types as signed 64-bit types. 158 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote); 159 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 160 161 // We have native support for a 64-bit CTLZ, via FLOGR. 162 setOperationAction(ISD::CTLZ, MVT::i32, Promote); 163 setOperationAction(ISD::CTLZ, MVT::i64, Legal); 164 165 // Give LowerOperation the chance to replace 64-bit ORs with subregs. 166 setOperationAction(ISD::OR, MVT::i64, Custom); 167 168 // The architecture has 32-bit SMUL_LOHI and UMUL_LOHI (MR and MLR), 169 // but they aren't really worth using. There is no 64-bit SMUL_LOHI, 170 // but there is a 64-bit UMUL_LOHI: MLGR. 171 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 172 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 173 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 174 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); 175 176 // FIXME: Can we support these natively? 177 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand); 178 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand); 179 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand); 180 181 // We have native instructions for i8, i16 and i32 extensions, but not i1. 182 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 183 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 184 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 186 187 // Handle the various types of symbolic address. 188 setOperationAction(ISD::ConstantPool, PtrVT, Custom); 189 setOperationAction(ISD::GlobalAddress, PtrVT, Custom); 190 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom); 191 setOperationAction(ISD::BlockAddress, PtrVT, Custom); 192 setOperationAction(ISD::JumpTable, PtrVT, Custom); 193 194 // We need to handle dynamic allocations specially because of the 195 // 160-byte area at the bottom of the stack. 196 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); 197 198 // Use custom expanders so that we can force the function to use 199 // a frame pointer. 200 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom); 201 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom); 202 203 // Handle floating-point types. 204 for (unsigned I = MVT::FIRST_FP_VALUETYPE; 205 I <= MVT::LAST_FP_VALUETYPE; 206 ++I) { 207 MVT VT = MVT::SimpleValueType(I); 208 if (isTypeLegal(VT)) { 209 // We can use FI for FRINT. 210 setOperationAction(ISD::FRINT, VT, Legal); 211 212 // No special instructions for these. 213 setOperationAction(ISD::FSIN, VT, Expand); 214 setOperationAction(ISD::FCOS, VT, Expand); 215 setOperationAction(ISD::FREM, VT, Expand); 216 } 217 } 218 219 // We have fused multiply-addition for f32 and f64 but not f128. 220 setOperationAction(ISD::FMA, MVT::f32, Legal); 221 setOperationAction(ISD::FMA, MVT::f64, Legal); 222 setOperationAction(ISD::FMA, MVT::f128, Expand); 223 224 // Needed so that we don't try to implement f128 constant loads using 225 // a load-and-extend of a f80 constant (in cases where the constant 226 // would fit in an f80). 227 setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand); 228 229 // Floating-point truncation and stores need to be done separately. 230 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 231 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 232 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 233 234 // We have 64-bit FPR<->GPR moves, but need special handling for 235 // 32-bit forms. 236 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 237 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 238 239 // VASTART and VACOPY need to deal with the SystemZ-specific varargs 240 // structure, but VAEND is a no-op. 241 setOperationAction(ISD::VASTART, MVT::Other, Custom); 242 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 243 setOperationAction(ISD::VAEND, MVT::Other, Expand); 244 245 // We want to use MVC in preference to even a single load/store pair. 246 MaxStoresPerMemcpy = 0; 247 MaxStoresPerMemcpyOptSize = 0; 248 249 // The main memset sequence is a byte store followed by an MVC. 250 // Two STC or MV..I stores win over that, but the kind of fused stores 251 // generated by target-independent code don't when the byte value is 252 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better 253 // than "STC;MVC". Handle the choice in target-specific code instead. 254 MaxStoresPerMemset = 0; 255 MaxStoresPerMemsetOptSize = 0; 256 } 257 258 bool 259 SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 260 VT = VT.getScalarType(); 261 262 if (!VT.isSimple()) 263 return false; 264 265 switch (VT.getSimpleVT().SimpleTy) { 266 case MVT::f32: 267 case MVT::f64: 268 return true; 269 case MVT::f128: 270 return false; 271 default: 272 break; 273 } 274 275 return false; 276 } 277 278 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 279 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR. 280 return Imm.isZero() || Imm.isNegZero(); 281 } 282 283 bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, 284 bool *Fast) const { 285 // Unaligned accesses should never be slower than the expanded version. 286 // We check specifically for aligned accesses in the few cases where 287 // they are required. 288 if (Fast) 289 *Fast = true; 290 return true; 291 } 292 293 bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM, 294 Type *Ty) const { 295 // Punt on globals for now, although they can be used in limited 296 // RELATIVE LONG cases. 297 if (AM.BaseGV) 298 return false; 299 300 // Require a 20-bit signed offset. 301 if (!isInt<20>(AM.BaseOffs)) 302 return false; 303 304 // Indexing is OK but no scale factor can be applied. 305 return AM.Scale == 0 || AM.Scale == 1; 306 } 307 308 //===----------------------------------------------------------------------===// 309 // Inline asm support 310 //===----------------------------------------------------------------------===// 311 312 TargetLowering::ConstraintType 313 SystemZTargetLowering::getConstraintType(const std::string &Constraint) const { 314 if (Constraint.size() == 1) { 315 switch (Constraint[0]) { 316 case 'a': // Address register 317 case 'd': // Data register (equivalent to 'r') 318 case 'f': // Floating-point register 319 case 'r': // General-purpose register 320 return C_RegisterClass; 321 322 case 'Q': // Memory with base and unsigned 12-bit displacement 323 case 'R': // Likewise, plus an index 324 case 'S': // Memory with base and signed 20-bit displacement 325 case 'T': // Likewise, plus an index 326 case 'm': // Equivalent to 'T'. 327 return C_Memory; 328 329 case 'I': // Unsigned 8-bit constant 330 case 'J': // Unsigned 12-bit constant 331 case 'K': // Signed 16-bit constant 332 case 'L': // Signed 20-bit displacement (on all targets we support) 333 case 'M': // 0x7fffffff 334 return C_Other; 335 336 default: 337 break; 338 } 339 } 340 return TargetLowering::getConstraintType(Constraint); 341 } 342 343 TargetLowering::ConstraintWeight SystemZTargetLowering:: 344 getSingleConstraintMatchWeight(AsmOperandInfo &info, 345 const char *constraint) const { 346 ConstraintWeight weight = CW_Invalid; 347 Value *CallOperandVal = info.CallOperandVal; 348 // If we don't have a value, we can't do a match, 349 // but allow it at the lowest weight. 350 if (CallOperandVal == NULL) 351 return CW_Default; 352 Type *type = CallOperandVal->getType(); 353 // Look at the constraint type. 354 switch (*constraint) { 355 default: 356 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 357 break; 358 359 case 'a': // Address register 360 case 'd': // Data register (equivalent to 'r') 361 case 'r': // General-purpose register 362 if (CallOperandVal->getType()->isIntegerTy()) 363 weight = CW_Register; 364 break; 365 366 case 'f': // Floating-point register 367 if (type->isFloatingPointTy()) 368 weight = CW_Register; 369 break; 370 371 case 'I': // Unsigned 8-bit constant 372 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 373 if (isUInt<8>(C->getZExtValue())) 374 weight = CW_Constant; 375 break; 376 377 case 'J': // Unsigned 12-bit constant 378 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 379 if (isUInt<12>(C->getZExtValue())) 380 weight = CW_Constant; 381 break; 382 383 case 'K': // Signed 16-bit constant 384 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 385 if (isInt<16>(C->getSExtValue())) 386 weight = CW_Constant; 387 break; 388 389 case 'L': // Signed 20-bit displacement (on all targets we support) 390 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 391 if (isInt<20>(C->getSExtValue())) 392 weight = CW_Constant; 393 break; 394 395 case 'M': // 0x7fffffff 396 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) 397 if (C->getZExtValue() == 0x7fffffff) 398 weight = CW_Constant; 399 break; 400 } 401 return weight; 402 } 403 404 // Parse a "{tNNN}" register constraint for which the register type "t" 405 // has already been verified. MC is the class associated with "t" and 406 // Map maps 0-based register numbers to LLVM register numbers. 407 static std::pair<unsigned, const TargetRegisterClass *> 408 parseRegisterNumber(const std::string &Constraint, 409 const TargetRegisterClass *RC, const unsigned *Map) { 410 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); 411 if (isdigit(Constraint[2])) { 412 std::string Suffix(Constraint.data() + 2, Constraint.size() - 2); 413 unsigned Index = atoi(Suffix.c_str()); 414 if (Index < 16 && Map[Index]) 415 return std::make_pair(Map[Index], RC); 416 } 417 return std::make_pair(0u, static_cast<TargetRegisterClass*>(0)); 418 } 419 420 std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering:: 421 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const { 422 if (Constraint.size() == 1) { 423 // GCC Constraint Letters 424 switch (Constraint[0]) { 425 default: break; 426 case 'd': // Data register (equivalent to 'r') 427 case 'r': // General-purpose register 428 if (VT == MVT::i64) 429 return std::make_pair(0U, &SystemZ::GR64BitRegClass); 430 else if (VT == MVT::i128) 431 return std::make_pair(0U, &SystemZ::GR128BitRegClass); 432 return std::make_pair(0U, &SystemZ::GR32BitRegClass); 433 434 case 'a': // Address register 435 if (VT == MVT::i64) 436 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass); 437 else if (VT == MVT::i128) 438 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass); 439 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass); 440 441 case 'f': // Floating-point register 442 if (VT == MVT::f64) 443 return std::make_pair(0U, &SystemZ::FP64BitRegClass); 444 else if (VT == MVT::f128) 445 return std::make_pair(0U, &SystemZ::FP128BitRegClass); 446 return std::make_pair(0U, &SystemZ::FP32BitRegClass); 447 } 448 } 449 if (Constraint[0] == '{') { 450 // We need to override the default register parsing for GPRs and FPRs 451 // because the interpretation depends on VT. The internal names of 452 // the registers are also different from the external names 453 // (F0D and F0S instead of F0, etc.). 454 if (Constraint[1] == 'r') { 455 if (VT == MVT::i32) 456 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass, 457 SystemZMC::GR32Regs); 458 if (VT == MVT::i128) 459 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass, 460 SystemZMC::GR128Regs); 461 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass, 462 SystemZMC::GR64Regs); 463 } 464 if (Constraint[1] == 'f') { 465 if (VT == MVT::f32) 466 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass, 467 SystemZMC::FP32Regs); 468 if (VT == MVT::f128) 469 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass, 470 SystemZMC::FP128Regs); 471 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass, 472 SystemZMC::FP64Regs); 473 } 474 } 475 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 476 } 477 478 void SystemZTargetLowering:: 479 LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 480 std::vector<SDValue> &Ops, 481 SelectionDAG &DAG) const { 482 // Only support length 1 constraints for now. 483 if (Constraint.length() == 1) { 484 switch (Constraint[0]) { 485 case 'I': // Unsigned 8-bit constant 486 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 487 if (isUInt<8>(C->getZExtValue())) 488 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 489 Op.getValueType())); 490 return; 491 492 case 'J': // Unsigned 12-bit constant 493 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 494 if (isUInt<12>(C->getZExtValue())) 495 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 496 Op.getValueType())); 497 return; 498 499 case 'K': // Signed 16-bit constant 500 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 501 if (isInt<16>(C->getSExtValue())) 502 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 503 Op.getValueType())); 504 return; 505 506 case 'L': // Signed 20-bit displacement (on all targets we support) 507 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 508 if (isInt<20>(C->getSExtValue())) 509 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), 510 Op.getValueType())); 511 return; 512 513 case 'M': // 0x7fffffff 514 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 515 if (C->getZExtValue() == 0x7fffffff) 516 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), 517 Op.getValueType())); 518 return; 519 } 520 } 521 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 522 } 523 524 //===----------------------------------------------------------------------===// 525 // Calling conventions 526 //===----------------------------------------------------------------------===// 527 528 #include "SystemZGenCallingConv.inc" 529 530 // Value is a value that has been passed to us in the location described by VA 531 // (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining 532 // any loads onto Chain. 533 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL, 534 CCValAssign &VA, SDValue Chain, 535 SDValue Value) { 536 // If the argument has been promoted from a smaller type, insert an 537 // assertion to capture this. 538 if (VA.getLocInfo() == CCValAssign::SExt) 539 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value, 540 DAG.getValueType(VA.getValVT())); 541 else if (VA.getLocInfo() == CCValAssign::ZExt) 542 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value, 543 DAG.getValueType(VA.getValVT())); 544 545 if (VA.isExtInLoc()) 546 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value); 547 else if (VA.getLocInfo() == CCValAssign::Indirect) 548 Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value, 549 MachinePointerInfo(), false, false, false, 0); 550 else 551 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"); 552 return Value; 553 } 554 555 // Value is a value of type VA.getValVT() that we need to copy into 556 // the location described by VA. Return a copy of Value converted to 557 // VA.getValVT(). The caller is responsible for handling indirect values. 558 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL, 559 CCValAssign &VA, SDValue Value) { 560 switch (VA.getLocInfo()) { 561 case CCValAssign::SExt: 562 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value); 563 case CCValAssign::ZExt: 564 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value); 565 case CCValAssign::AExt: 566 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value); 567 case CCValAssign::Full: 568 return Value; 569 default: 570 llvm_unreachable("Unhandled getLocInfo()"); 571 } 572 } 573 574 SDValue SystemZTargetLowering:: 575 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 576 const SmallVectorImpl<ISD::InputArg> &Ins, 577 SDLoc DL, SelectionDAG &DAG, 578 SmallVectorImpl<SDValue> &InVals) const { 579 MachineFunction &MF = DAG.getMachineFunction(); 580 MachineFrameInfo *MFI = MF.getFrameInfo(); 581 MachineRegisterInfo &MRI = MF.getRegInfo(); 582 SystemZMachineFunctionInfo *FuncInfo = 583 MF.getInfo<SystemZMachineFunctionInfo>(); 584 const SystemZFrameLowering *TFL = 585 static_cast<const SystemZFrameLowering *>(TM.getFrameLowering()); 586 587 // Assign locations to all of the incoming arguments. 588 SmallVector<CCValAssign, 16> ArgLocs; 589 CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 590 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); 591 592 unsigned NumFixedGPRs = 0; 593 unsigned NumFixedFPRs = 0; 594 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 595 SDValue ArgValue; 596 CCValAssign &VA = ArgLocs[I]; 597 EVT LocVT = VA.getLocVT(); 598 if (VA.isRegLoc()) { 599 // Arguments passed in registers 600 const TargetRegisterClass *RC; 601 switch (LocVT.getSimpleVT().SimpleTy) { 602 default: 603 // Integers smaller than i64 should be promoted to i64. 604 llvm_unreachable("Unexpected argument type"); 605 case MVT::i32: 606 NumFixedGPRs += 1; 607 RC = &SystemZ::GR32BitRegClass; 608 break; 609 case MVT::i64: 610 NumFixedGPRs += 1; 611 RC = &SystemZ::GR64BitRegClass; 612 break; 613 case MVT::f32: 614 NumFixedFPRs += 1; 615 RC = &SystemZ::FP32BitRegClass; 616 break; 617 case MVT::f64: 618 NumFixedFPRs += 1; 619 RC = &SystemZ::FP64BitRegClass; 620 break; 621 } 622 623 unsigned VReg = MRI.createVirtualRegister(RC); 624 MRI.addLiveIn(VA.getLocReg(), VReg); 625 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 626 } else { 627 assert(VA.isMemLoc() && "Argument not register or memory"); 628 629 // Create the frame index object for this incoming parameter. 630 int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8, 631 VA.getLocMemOffset(), true); 632 633 // Create the SelectionDAG nodes corresponding to a load 634 // from this parameter. Unpromoted ints and floats are 635 // passed as right-justified 8-byte values. 636 EVT PtrVT = getPointerTy(); 637 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 638 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 639 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4)); 640 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN, 641 MachinePointerInfo::getFixedStack(FI), 642 false, false, false, 0); 643 } 644 645 // Convert the value of the argument register into the value that's 646 // being passed. 647 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue)); 648 } 649 650 if (IsVarArg) { 651 // Save the number of non-varargs registers for later use by va_start, etc. 652 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); 653 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); 654 655 // Likewise the address (in the form of a frame index) of where the 656 // first stack vararg would be. The 1-byte size here is arbitrary. 657 int64_t StackSize = CCInfo.getNextStackOffset(); 658 FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true)); 659 660 // ...and a similar frame index for the caller-allocated save area 661 // that will be used to store the incoming registers. 662 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea(); 663 unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true); 664 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); 665 666 // Store the FPR varargs in the reserved frame slots. (We store the 667 // GPRs as part of the prologue.) 668 if (NumFixedFPRs < SystemZ::NumArgFPRs) { 669 SDValue MemOps[SystemZ::NumArgFPRs]; 670 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) { 671 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]); 672 int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true); 673 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 674 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I], 675 &SystemZ::FP64BitRegClass); 676 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64); 677 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN, 678 MachinePointerInfo::getFixedStack(FI), 679 false, false, 0); 680 681 } 682 // Join the stores, which are independent of one another. 683 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 684 &MemOps[NumFixedFPRs], 685 SystemZ::NumArgFPRs - NumFixedFPRs); 686 } 687 } 688 689 return Chain; 690 } 691 692 SDValue 693 SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, 694 SmallVectorImpl<SDValue> &InVals) const { 695 SelectionDAG &DAG = CLI.DAG; 696 SDLoc &DL = CLI.DL; 697 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 698 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 699 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 700 SDValue Chain = CLI.Chain; 701 SDValue Callee = CLI.Callee; 702 bool &isTailCall = CLI.IsTailCall; 703 CallingConv::ID CallConv = CLI.CallConv; 704 bool IsVarArg = CLI.IsVarArg; 705 MachineFunction &MF = DAG.getMachineFunction(); 706 EVT PtrVT = getPointerTy(); 707 708 // SystemZ target does not yet support tail call optimization. 709 isTailCall = false; 710 711 // Analyze the operands of the call, assigning locations to each operand. 712 SmallVector<CCValAssign, 16> ArgLocs; 713 CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext()); 714 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); 715 716 // Get a count of how many bytes are to be pushed on the stack. 717 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 718 719 // Mark the start of the call. 720 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true), 721 DL); 722 723 // Copy argument values to their designated locations. 724 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass; 725 SmallVector<SDValue, 8> MemOpChains; 726 SDValue StackPtr; 727 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 728 CCValAssign &VA = ArgLocs[I]; 729 SDValue ArgValue = OutVals[I]; 730 731 if (VA.getLocInfo() == CCValAssign::Indirect) { 732 // Store the argument in a stack slot and pass its address. 733 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); 734 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 735 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot, 736 MachinePointerInfo::getFixedStack(FI), 737 false, false, 0)); 738 ArgValue = SpillSlot; 739 } else 740 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue); 741 742 if (VA.isRegLoc()) 743 // Queue up the argument copies and emit them at the end. 744 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 745 else { 746 assert(VA.isMemLoc() && "Argument not register or memory"); 747 748 // Work out the address of the stack slot. Unpromoted ints and 749 // floats are passed as right-justified 8-byte values. 750 if (!StackPtr.getNode()) 751 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT); 752 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset(); 753 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32) 754 Offset += 4; 755 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 756 DAG.getIntPtrConstant(Offset)); 757 758 // Emit the store. 759 MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address, 760 MachinePointerInfo(), 761 false, false, 0)); 762 } 763 } 764 765 // Join the stores, which are independent of one another. 766 if (!MemOpChains.empty()) 767 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 768 &MemOpChains[0], MemOpChains.size()); 769 770 // Build a sequence of copy-to-reg nodes, chained and glued together. 771 SDValue Glue; 772 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) { 773 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first, 774 RegsToPass[I].second, Glue); 775 Glue = Chain.getValue(1); 776 } 777 778 // Accept direct calls by converting symbolic call addresses to the 779 // associated Target* opcodes. 780 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 781 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); 782 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 783 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 784 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); 785 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee); 786 } 787 788 // The first call operand is the chain and the second is the target address. 789 SmallVector<SDValue, 8> Ops; 790 Ops.push_back(Chain); 791 Ops.push_back(Callee); 792 793 // Add argument registers to the end of the list so that they are 794 // known live into the call. 795 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) 796 Ops.push_back(DAG.getRegister(RegsToPass[I].first, 797 RegsToPass[I].second.getValueType())); 798 799 // Glue the call to the argument copies, if any. 800 if (Glue.getNode()) 801 Ops.push_back(Glue); 802 803 // Emit the call. 804 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 805 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size()); 806 Glue = Chain.getValue(1); 807 808 // Mark the end of the call, which is glued to the call itself. 809 Chain = DAG.getCALLSEQ_END(Chain, 810 DAG.getConstant(NumBytes, PtrVT, true), 811 DAG.getConstant(0, PtrVT, true), 812 Glue, DL); 813 Glue = Chain.getValue(1); 814 815 // Assign locations to each value returned by this call. 816 SmallVector<CCValAssign, 16> RetLocs; 817 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 818 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); 819 820 // Copy all of the result registers out of their specified physreg. 821 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 822 CCValAssign &VA = RetLocs[I]; 823 824 // Copy the value out, gluing the copy to the end of the call sequence. 825 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 826 VA.getLocVT(), Glue); 827 Chain = RetValue.getValue(1); 828 Glue = RetValue.getValue(2); 829 830 // Convert the value of the return register into the value that's 831 // being returned. 832 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue)); 833 } 834 835 return Chain; 836 } 837 838 SDValue 839 SystemZTargetLowering::LowerReturn(SDValue Chain, 840 CallingConv::ID CallConv, bool IsVarArg, 841 const SmallVectorImpl<ISD::OutputArg> &Outs, 842 const SmallVectorImpl<SDValue> &OutVals, 843 SDLoc DL, SelectionDAG &DAG) const { 844 MachineFunction &MF = DAG.getMachineFunction(); 845 846 // Assign locations to each returned value. 847 SmallVector<CCValAssign, 16> RetLocs; 848 CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext()); 849 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); 850 851 // Quick exit for void returns 852 if (RetLocs.empty()) 853 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain); 854 855 // Copy the result values into the output registers. 856 SDValue Glue; 857 SmallVector<SDValue, 4> RetOps; 858 RetOps.push_back(Chain); 859 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) { 860 CCValAssign &VA = RetLocs[I]; 861 SDValue RetValue = OutVals[I]; 862 863 // Make the return register live on exit. 864 assert(VA.isRegLoc() && "Can only return in registers!"); 865 866 // Promote the value as required. 867 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue); 868 869 // Chain and glue the copies together. 870 unsigned Reg = VA.getLocReg(); 871 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue); 872 Glue = Chain.getValue(1); 873 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT())); 874 } 875 876 // Update chain and glue. 877 RetOps[0] = Chain; 878 if (Glue.getNode()) 879 RetOps.push_back(Glue); 880 881 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, 882 RetOps.data(), RetOps.size()); 883 } 884 885 // CC is a comparison that will be implemented using an integer or 886 // floating-point comparison. Return the condition code mask for 887 // a branch on true. In the integer case, CCMASK_CMP_UO is set for 888 // unsigned comparisons and clear for signed ones. In the floating-point 889 // case, CCMASK_CMP_UO has its normal mask meaning (unordered). 890 static unsigned CCMaskForCondCode(ISD::CondCode CC) { 891 #define CONV(X) \ 892 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \ 893 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \ 894 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X 895 896 switch (CC) { 897 default: 898 llvm_unreachable("Invalid integer condition!"); 899 900 CONV(EQ); 901 CONV(NE); 902 CONV(GT); 903 CONV(GE); 904 CONV(LT); 905 CONV(LE); 906 907 case ISD::SETO: return SystemZ::CCMASK_CMP_O; 908 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO; 909 } 910 #undef CONV 911 } 912 913 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 914 // can be converted to a comparison against zero, adjust the operands 915 // as necessary. 916 static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned, 917 SDValue &CmpOp0, SDValue &CmpOp1, 918 unsigned &CCMask) { 919 if (IsUnsigned) 920 return; 921 922 ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode()); 923 if (!ConstOp1) 924 return; 925 926 int64_t Value = ConstOp1->getSExtValue(); 927 if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) || 928 (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) || 929 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) || 930 (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) { 931 CCMask ^= SystemZ::CCMASK_CMP_EQ; 932 CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType()); 933 } 934 } 935 936 // If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1 937 // is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary. 938 static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned, 939 SDValue &CmpOp0, SDValue &CmpOp1, 940 unsigned &CCMask) { 941 // For us to make any changes, it must a comparison between a single-use 942 // load and a constant. 943 if (!CmpOp0.hasOneUse() || 944 CmpOp0.getOpcode() != ISD::LOAD || 945 CmpOp1.getOpcode() != ISD::Constant) 946 return; 947 948 // We must have an 8- or 16-bit load. 949 LoadSDNode *Load = cast<LoadSDNode>(CmpOp0); 950 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits(); 951 if (NumBits != 8 && NumBits != 16) 952 return; 953 954 // The load must be an extending one and the constant must be within the 955 // range of the unextended value. 956 ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1); 957 uint64_t Value = Constant->getZExtValue(); 958 uint64_t Mask = (1 << NumBits) - 1; 959 if (Load->getExtensionType() == ISD::SEXTLOAD) { 960 int64_t SignedValue = Constant->getSExtValue(); 961 if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask) 962 return; 963 // Unsigned comparison between two sign-extended values is equivalent 964 // to unsigned comparison between two zero-extended values. 965 if (IsUnsigned) 966 Value &= Mask; 967 else if (CCMask == SystemZ::CCMASK_CMP_EQ || 968 CCMask == SystemZ::CCMASK_CMP_NE) 969 // Any choice of IsUnsigned is OK for equality comparisons. 970 // We could use either CHHSI or CLHHSI for 16-bit comparisons, 971 // but since we use CLHHSI for zero extensions, it seems better 972 // to be consistent and do the same here. 973 Value &= Mask, IsUnsigned = true; 974 else if (NumBits == 8) { 975 // Try to treat the comparison as unsigned, so that we can use CLI. 976 // Adjust CCMask and Value as necessary. 977 if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT) 978 // Test whether the high bit of the byte is set. 979 Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true; 980 else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE) 981 // Test whether the high bit of the byte is clear. 982 Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true; 983 else 984 // No instruction exists for this combination. 985 return; 986 } 987 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { 988 if (Value > Mask) 989 return; 990 // Signed comparison between two zero-extended values is equivalent 991 // to unsigned comparison. 992 IsUnsigned = true; 993 } else 994 return; 995 996 // Make sure that the first operand is an i32 of the right extension type. 997 ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD; 998 if (CmpOp0.getValueType() != MVT::i32 || 999 Load->getExtensionType() != ExtType) 1000 CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, 1001 Load->getChain(), Load->getBasePtr(), 1002 Load->getPointerInfo(), Load->getMemoryVT(), 1003 Load->isVolatile(), Load->isNonTemporal(), 1004 Load->getAlignment()); 1005 1006 // Make sure that the second operand is an i32 with the right value. 1007 if (CmpOp1.getValueType() != MVT::i32 || 1008 Value != Constant->getZExtValue()) 1009 CmpOp1 = DAG.getConstant(Value, MVT::i32); 1010 } 1011 1012 // Return true if a comparison described by CCMask, CmpOp0 and CmpOp1 1013 // is an equality comparison that is better implemented using unsigned 1014 // rather than signed comparison instructions. 1015 static bool preferUnsignedComparison(SelectionDAG &DAG, SDValue CmpOp0, 1016 SDValue CmpOp1, unsigned CCMask) { 1017 // The test must be for equality or inequality. 1018 if (CCMask != SystemZ::CCMASK_CMP_EQ && CCMask != SystemZ::CCMASK_CMP_NE) 1019 return false; 1020 1021 if (CmpOp1.getOpcode() == ISD::Constant) { 1022 uint64_t Value = cast<ConstantSDNode>(CmpOp1)->getSExtValue(); 1023 1024 // If we're comparing with memory, prefer unsigned comparisons for 1025 // values that are in the unsigned 16-bit range but not the signed 1026 // 16-bit range. We want to use CLFHSI and CLGHSI. 1027 if (CmpOp0.hasOneUse() && 1028 ISD::isNormalLoad(CmpOp0.getNode()) && 1029 (Value >= 32768 && Value < 65536)) 1030 return true; 1031 1032 // Use unsigned comparisons for values that are in the CLGFI range 1033 // but not in the CGFI range. 1034 if (CmpOp0.getValueType() == MVT::i64 && (Value >> 31) == 1) 1035 return true; 1036 1037 return false; 1038 } 1039 1040 // Prefer CL for zero-extended loads. 1041 if (CmpOp1.getOpcode() == ISD::ZERO_EXTEND || 1042 ISD::isZEXTLoad(CmpOp1.getNode())) 1043 return true; 1044 1045 // ...and for "in-register" zero extensions. 1046 if (CmpOp1.getOpcode() == ISD::AND && CmpOp1.getValueType() == MVT::i64) { 1047 SDValue Mask = CmpOp1.getOperand(1); 1048 if (Mask.getOpcode() == ISD::Constant && 1049 cast<ConstantSDNode>(Mask)->getZExtValue() == 0xffffffff) 1050 return true; 1051 } 1052 1053 return false; 1054 } 1055 1056 // Return a target node that compares CmpOp0 with CmpOp1 and stores a 1057 // 2-bit result in CC. Set CCValid to the CCMASK_* of all possible 1058 // 2-bit results and CCMask to the subset of those results that are 1059 // associated with Cond. 1060 static SDValue emitCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, 1061 ISD::CondCode Cond, unsigned &CCValid, 1062 unsigned &CCMask) { 1063 bool IsUnsigned = false; 1064 CCMask = CCMaskForCondCode(Cond); 1065 if (CmpOp0.getValueType().isFloatingPoint()) 1066 CCValid = SystemZ::CCMASK_FCMP; 1067 else { 1068 IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO; 1069 CCValid = SystemZ::CCMASK_ICMP; 1070 CCMask &= CCValid; 1071 adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 1072 adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask); 1073 if (preferUnsignedComparison(DAG, CmpOp0, CmpOp1, CCMask)) 1074 IsUnsigned = true; 1075 } 1076 1077 SDLoc DL(CmpOp0); 1078 return DAG.getNode((IsUnsigned ? SystemZISD::UCMP : SystemZISD::CMP), 1079 DL, MVT::Glue, CmpOp0, CmpOp1); 1080 } 1081 1082 // Lower a binary operation that produces two VT results, one in each 1083 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation, 1084 // Extend extends Op0 to a GR128, and Opcode performs the GR128 operation 1085 // on the extended Op0 and (unextended) Op1. Store the even register result 1086 // in Even and the odd register result in Odd. 1087 static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT, 1088 unsigned Extend, unsigned Opcode, 1089 SDValue Op0, SDValue Op1, 1090 SDValue &Even, SDValue &Odd) { 1091 SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0); 1092 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, 1093 SDValue(In128, 0), Op1); 1094 bool Is32Bit = is32Bit(VT); 1095 SDValue SubReg0 = DAG.getTargetConstant(SystemZ::even128(Is32Bit), VT); 1096 SDValue SubReg1 = DAG.getTargetConstant(SystemZ::odd128(Is32Bit), VT); 1097 SDNode *Reg0 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1098 VT, Result, SubReg0); 1099 SDNode *Reg1 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1100 VT, Result, SubReg1); 1101 Even = SDValue(Reg0, 0); 1102 Odd = SDValue(Reg1, 0); 1103 } 1104 1105 SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1106 SDValue Chain = Op.getOperand(0); 1107 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1108 SDValue CmpOp0 = Op.getOperand(2); 1109 SDValue CmpOp1 = Op.getOperand(3); 1110 SDValue Dest = Op.getOperand(4); 1111 SDLoc DL(Op); 1112 1113 unsigned CCValid, CCMask; 1114 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCValid, CCMask); 1115 return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(), 1116 Chain, DAG.getConstant(CCValid, MVT::i32), 1117 DAG.getConstant(CCMask, MVT::i32), Dest, Flags); 1118 } 1119 1120 SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op, 1121 SelectionDAG &DAG) const { 1122 SDValue CmpOp0 = Op.getOperand(0); 1123 SDValue CmpOp1 = Op.getOperand(1); 1124 SDValue TrueOp = Op.getOperand(2); 1125 SDValue FalseOp = Op.getOperand(3); 1126 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 1127 SDLoc DL(Op); 1128 1129 unsigned CCValid, CCMask; 1130 SDValue Flags = emitCmp(DAG, CmpOp0, CmpOp1, CC, CCValid, CCMask); 1131 1132 SmallVector<SDValue, 5> Ops; 1133 Ops.push_back(TrueOp); 1134 Ops.push_back(FalseOp); 1135 Ops.push_back(DAG.getConstant(CCValid, MVT::i32)); 1136 Ops.push_back(DAG.getConstant(CCMask, MVT::i32)); 1137 Ops.push_back(Flags); 1138 1139 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 1140 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size()); 1141 } 1142 1143 SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node, 1144 SelectionDAG &DAG) const { 1145 SDLoc DL(Node); 1146 const GlobalValue *GV = Node->getGlobal(); 1147 int64_t Offset = Node->getOffset(); 1148 EVT PtrVT = getPointerTy(); 1149 Reloc::Model RM = TM.getRelocationModel(); 1150 CodeModel::Model CM = TM.getCodeModel(); 1151 1152 SDValue Result; 1153 if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) { 1154 // Make sure that the offset is aligned to a halfword. If it isn't, 1155 // create an "anchor" at the previous 12-bit boundary. 1156 // FIXME check whether there is a better way of handling this. 1157 if (Offset & 1) { 1158 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 1159 Offset & ~uint64_t(0xfff)); 1160 Offset &= 0xfff; 1161 } else { 1162 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset); 1163 Offset = 0; 1164 } 1165 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1166 } else { 1167 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT); 1168 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1169 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 1170 MachinePointerInfo::getGOT(), false, false, false, 0); 1171 } 1172 1173 // If there was a non-zero offset that we didn't fold, create an explicit 1174 // addition for it. 1175 if (Offset != 0) 1176 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result, 1177 DAG.getConstant(Offset, PtrVT)); 1178 1179 return Result; 1180 } 1181 1182 SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 1183 SelectionDAG &DAG) const { 1184 SDLoc DL(Node); 1185 const GlobalValue *GV = Node->getGlobal(); 1186 EVT PtrVT = getPointerTy(); 1187 TLSModel::Model model = TM.getTLSModel(GV); 1188 1189 if (model != TLSModel::LocalExec) 1190 llvm_unreachable("only local-exec TLS mode supported"); 1191 1192 // The high part of the thread pointer is in access register 0. 1193 SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1194 DAG.getConstant(0, MVT::i32)); 1195 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi); 1196 1197 // The low part of the thread pointer is in access register 1. 1198 SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32, 1199 DAG.getConstant(1, MVT::i32)); 1200 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo); 1201 1202 // Merge them into a single 64-bit address. 1203 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi, 1204 DAG.getConstant(32, PtrVT)); 1205 SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo); 1206 1207 // Get the offset of GA from the thread pointer. 1208 SystemZConstantPoolValue *CPV = 1209 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF); 1210 1211 // Force the offset into the constant pool and load it from there. 1212 SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8); 1213 SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), 1214 CPAddr, MachinePointerInfo::getConstantPool(), 1215 false, false, false, 0); 1216 1217 // Add the base and offset together. 1218 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset); 1219 } 1220 1221 SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node, 1222 SelectionDAG &DAG) const { 1223 SDLoc DL(Node); 1224 const BlockAddress *BA = Node->getBlockAddress(); 1225 int64_t Offset = Node->getOffset(); 1226 EVT PtrVT = getPointerTy(); 1227 1228 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset); 1229 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1230 return Result; 1231 } 1232 1233 SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT, 1234 SelectionDAG &DAG) const { 1235 SDLoc DL(JT); 1236 EVT PtrVT = getPointerTy(); 1237 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 1238 1239 // Use LARL to load the address of the table. 1240 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1241 } 1242 1243 SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP, 1244 SelectionDAG &DAG) const { 1245 SDLoc DL(CP); 1246 EVT PtrVT = getPointerTy(); 1247 1248 SDValue Result; 1249 if (CP->isMachineConstantPoolEntry()) 1250 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 1251 CP->getAlignment()); 1252 else 1253 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 1254 CP->getAlignment(), CP->getOffset()); 1255 1256 // Use LARL to load the address of the constant pool entry. 1257 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result); 1258 } 1259 1260 SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op, 1261 SelectionDAG &DAG) const { 1262 SDLoc DL(Op); 1263 SDValue In = Op.getOperand(0); 1264 EVT InVT = In.getValueType(); 1265 EVT ResVT = Op.getValueType(); 1266 1267 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1268 SDValue Shift32 = DAG.getConstant(32, MVT::i64); 1269 if (InVT == MVT::i32 && ResVT == MVT::f32) { 1270 SDValue In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In); 1271 SDValue Shift = DAG.getNode(ISD::SHL, DL, MVT::i64, In64, Shift32); 1272 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, Shift); 1273 SDNode *Out = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 1274 MVT::f32, Out64, SubReg32); 1275 return SDValue(Out, 0); 1276 } 1277 if (InVT == MVT::f32 && ResVT == MVT::i32) { 1278 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64); 1279 SDNode *In64 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1280 MVT::f64, SDValue(U64, 0), In, SubReg32); 1281 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, SDValue(In64, 0)); 1282 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64, Shift32); 1283 SDValue Out = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift); 1284 return Out; 1285 } 1286 llvm_unreachable("Unexpected bitcast combination"); 1287 } 1288 1289 SDValue SystemZTargetLowering::lowerVASTART(SDValue Op, 1290 SelectionDAG &DAG) const { 1291 MachineFunction &MF = DAG.getMachineFunction(); 1292 SystemZMachineFunctionInfo *FuncInfo = 1293 MF.getInfo<SystemZMachineFunctionInfo>(); 1294 EVT PtrVT = getPointerTy(); 1295 1296 SDValue Chain = Op.getOperand(0); 1297 SDValue Addr = Op.getOperand(1); 1298 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1299 SDLoc DL(Op); 1300 1301 // The initial values of each field. 1302 const unsigned NumFields = 4; 1303 SDValue Fields[NumFields] = { 1304 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT), 1305 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT), 1306 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), 1307 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) 1308 }; 1309 1310 // Store each field into its respective slot. 1311 SDValue MemOps[NumFields]; 1312 unsigned Offset = 0; 1313 for (unsigned I = 0; I < NumFields; ++I) { 1314 SDValue FieldAddr = Addr; 1315 if (Offset != 0) 1316 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr, 1317 DAG.getIntPtrConstant(Offset)); 1318 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr, 1319 MachinePointerInfo(SV, Offset), 1320 false, false, 0); 1321 Offset += 8; 1322 } 1323 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields); 1324 } 1325 1326 SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op, 1327 SelectionDAG &DAG) const { 1328 SDValue Chain = Op.getOperand(0); 1329 SDValue DstPtr = Op.getOperand(1); 1330 SDValue SrcPtr = Op.getOperand(2); 1331 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 1332 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 1333 SDLoc DL(Op); 1334 1335 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32), 1336 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false, 1337 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); 1338 } 1339 1340 SDValue SystemZTargetLowering:: 1341 lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { 1342 SDValue Chain = Op.getOperand(0); 1343 SDValue Size = Op.getOperand(1); 1344 SDLoc DL(Op); 1345 1346 unsigned SPReg = getStackPointerRegisterToSaveRestore(); 1347 1348 // Get a reference to the stack pointer. 1349 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64); 1350 1351 // Get the new stack pointer value. 1352 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size); 1353 1354 // Copy the new stack pointer back. 1355 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP); 1356 1357 // The allocated data lives above the 160 bytes allocated for the standard 1358 // frame, plus any outgoing stack arguments. We don't know how much that 1359 // amounts to yet, so emit a special ADJDYNALLOC placeholder. 1360 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64); 1361 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust); 1362 1363 SDValue Ops[2] = { Result, Chain }; 1364 return DAG.getMergeValues(Ops, 2, DL); 1365 } 1366 1367 SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op, 1368 SelectionDAG &DAG) const { 1369 EVT VT = Op.getValueType(); 1370 SDLoc DL(Op); 1371 assert(!is32Bit(VT) && "Only support 64-bit UMUL_LOHI"); 1372 1373 // UMUL_LOHI64 returns the low result in the odd register and the high 1374 // result in the even register. UMUL_LOHI is defined to return the 1375 // low half first, so the results are in reverse order. 1376 SDValue Ops[2]; 1377 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64, 1378 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1379 return DAG.getMergeValues(Ops, 2, DL); 1380 } 1381 1382 SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op, 1383 SelectionDAG &DAG) const { 1384 SDValue Op0 = Op.getOperand(0); 1385 SDValue Op1 = Op.getOperand(1); 1386 EVT VT = Op.getValueType(); 1387 SDLoc DL(Op); 1388 unsigned Opcode; 1389 1390 // We use DSGF for 32-bit division. 1391 if (is32Bit(VT)) { 1392 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0); 1393 Opcode = SystemZISD::SDIVREM32; 1394 } else if (DAG.ComputeNumSignBits(Op1) > 32) { 1395 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1); 1396 Opcode = SystemZISD::SDIVREM32; 1397 } else 1398 Opcode = SystemZISD::SDIVREM64; 1399 1400 // DSG(F) takes a 64-bit dividend, so the even register in the GR128 1401 // input is "don't care". The instruction returns the remainder in 1402 // the even register and the quotient in the odd register. 1403 SDValue Ops[2]; 1404 lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode, 1405 Op0, Op1, Ops[1], Ops[0]); 1406 return DAG.getMergeValues(Ops, 2, DL); 1407 } 1408 1409 SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op, 1410 SelectionDAG &DAG) const { 1411 EVT VT = Op.getValueType(); 1412 SDLoc DL(Op); 1413 1414 // DL(G) uses a double-width dividend, so we need to clear the even 1415 // register in the GR128 input. The instruction returns the remainder 1416 // in the even register and the quotient in the odd register. 1417 SDValue Ops[2]; 1418 if (is32Bit(VT)) 1419 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32, 1420 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1421 else 1422 lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64, 1423 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]); 1424 return DAG.getMergeValues(Ops, 2, DL); 1425 } 1426 1427 SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const { 1428 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); 1429 1430 // Get the known-zero masks for each operand. 1431 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) }; 1432 APInt KnownZero[2], KnownOne[2]; 1433 DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]); 1434 DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]); 1435 1436 // See if the upper 32 bits of one operand and the lower 32 bits of the 1437 // other are known zero. They are the low and high operands respectively. 1438 uint64_t Masks[] = { KnownZero[0].getZExtValue(), 1439 KnownZero[1].getZExtValue() }; 1440 unsigned High, Low; 1441 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff) 1442 High = 1, Low = 0; 1443 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff) 1444 High = 0, Low = 1; 1445 else 1446 return Op; 1447 1448 SDValue LowOp = Ops[Low]; 1449 SDValue HighOp = Ops[High]; 1450 1451 // If the high part is a constant, we're better off using IILH. 1452 if (HighOp.getOpcode() == ISD::Constant) 1453 return Op; 1454 1455 // If the low part is a constant that is outside the range of LHI, 1456 // then we're better off using IILF. 1457 if (LowOp.getOpcode() == ISD::Constant) { 1458 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue()); 1459 if (!isInt<16>(Value)) 1460 return Op; 1461 } 1462 1463 // Check whether the high part is an AND that doesn't change the 1464 // high 32 bits and just masks out low bits. We can skip it if so. 1465 if (HighOp.getOpcode() == ISD::AND && 1466 HighOp.getOperand(1).getOpcode() == ISD::Constant) { 1467 ConstantSDNode *MaskNode = cast<ConstantSDNode>(HighOp.getOperand(1)); 1468 uint64_t Mask = MaskNode->getZExtValue() | Masks[High]; 1469 if ((Mask >> 32) == 0xffffffff) 1470 HighOp = HighOp.getOperand(0); 1471 } 1472 1473 // Take advantage of the fact that all GR32 operations only change the 1474 // low 32 bits by truncating Low to an i32 and inserting it directly 1475 // using a subreg. The interesting cases are those where the truncation 1476 // can be folded. 1477 SDLoc DL(Op); 1478 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp); 1479 SDValue SubReg32 = DAG.getTargetConstant(SystemZ::subreg_32bit, MVT::i64); 1480 SDNode *Result = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 1481 MVT::i64, HighOp, Low32, SubReg32); 1482 return SDValue(Result, 0); 1483 } 1484 1485 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first 1486 // two into the fullword ATOMIC_LOADW_* operation given by Opcode. 1487 SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op, 1488 SelectionDAG &DAG, 1489 unsigned Opcode) const { 1490 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1491 1492 // 32-bit operations need no code outside the main loop. 1493 EVT NarrowVT = Node->getMemoryVT(); 1494 EVT WideVT = MVT::i32; 1495 if (NarrowVT == WideVT) 1496 return Op; 1497 1498 int64_t BitSize = NarrowVT.getSizeInBits(); 1499 SDValue ChainIn = Node->getChain(); 1500 SDValue Addr = Node->getBasePtr(); 1501 SDValue Src2 = Node->getVal(); 1502 MachineMemOperand *MMO = Node->getMemOperand(); 1503 SDLoc DL(Node); 1504 EVT PtrVT = Addr.getValueType(); 1505 1506 // Convert atomic subtracts of constants into additions. 1507 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB) 1508 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) { 1509 Opcode = SystemZISD::ATOMIC_LOADW_ADD; 1510 Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType()); 1511 } 1512 1513 // Get the address of the containing word. 1514 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1515 DAG.getConstant(-4, PtrVT)); 1516 1517 // Get the number of bits that the word must be rotated left in order 1518 // to bring the field to the top bits of a GR32. 1519 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1520 DAG.getConstant(3, PtrVT)); 1521 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1522 1523 // Get the complementing shift amount, for rotating a field in the top 1524 // bits back to its proper position. 1525 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1526 DAG.getConstant(0, WideVT), BitShift); 1527 1528 // Extend the source operand to 32 bits and prepare it for the inner loop. 1529 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other 1530 // operations require the source to be shifted in advance. (This shift 1531 // can be folded if the source is constant.) For AND and NAND, the lower 1532 // bits must be set, while for other opcodes they should be left clear. 1533 if (Opcode != SystemZISD::ATOMIC_SWAPW) 1534 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2, 1535 DAG.getConstant(32 - BitSize, WideVT)); 1536 if (Opcode == SystemZISD::ATOMIC_LOADW_AND || 1537 Opcode == SystemZISD::ATOMIC_LOADW_NAND) 1538 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2, 1539 DAG.getConstant(uint32_t(-1) >> BitSize, WideVT)); 1540 1541 // Construct the ATOMIC_LOADW_* node. 1542 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1543 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift, 1544 DAG.getConstant(BitSize, WideVT) }; 1545 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops, 1546 array_lengthof(Ops), 1547 NarrowVT, MMO); 1548 1549 // Rotate the result of the final CS so that the field is in the lower 1550 // bits of a GR32, then truncate it. 1551 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift, 1552 DAG.getConstant(BitSize, WideVT)); 1553 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift); 1554 1555 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) }; 1556 return DAG.getMergeValues(RetOps, 2, DL); 1557 } 1558 1559 // Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two 1560 // into a fullword ATOMIC_CMP_SWAPW operation. 1561 SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op, 1562 SelectionDAG &DAG) const { 1563 AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode()); 1564 1565 // We have native support for 32-bit compare and swap. 1566 EVT NarrowVT = Node->getMemoryVT(); 1567 EVT WideVT = MVT::i32; 1568 if (NarrowVT == WideVT) 1569 return Op; 1570 1571 int64_t BitSize = NarrowVT.getSizeInBits(); 1572 SDValue ChainIn = Node->getOperand(0); 1573 SDValue Addr = Node->getOperand(1); 1574 SDValue CmpVal = Node->getOperand(2); 1575 SDValue SwapVal = Node->getOperand(3); 1576 MachineMemOperand *MMO = Node->getMemOperand(); 1577 SDLoc DL(Node); 1578 EVT PtrVT = Addr.getValueType(); 1579 1580 // Get the address of the containing word. 1581 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr, 1582 DAG.getConstant(-4, PtrVT)); 1583 1584 // Get the number of bits that the word must be rotated left in order 1585 // to bring the field to the top bits of a GR32. 1586 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr, 1587 DAG.getConstant(3, PtrVT)); 1588 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift); 1589 1590 // Get the complementing shift amount, for rotating a field in the top 1591 // bits back to its proper position. 1592 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT, 1593 DAG.getConstant(0, WideVT), BitShift); 1594 1595 // Construct the ATOMIC_CMP_SWAPW node. 1596 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other); 1597 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift, 1598 NegBitShift, DAG.getConstant(BitSize, WideVT) }; 1599 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL, 1600 VTList, Ops, array_lengthof(Ops), 1601 NarrowVT, MMO); 1602 return AtomicOp; 1603 } 1604 1605 SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op, 1606 SelectionDAG &DAG) const { 1607 MachineFunction &MF = DAG.getMachineFunction(); 1608 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1609 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), 1610 SystemZ::R15D, Op.getValueType()); 1611 } 1612 1613 SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op, 1614 SelectionDAG &DAG) const { 1615 MachineFunction &MF = DAG.getMachineFunction(); 1616 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true); 1617 return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), 1618 SystemZ::R15D, Op.getOperand(1)); 1619 } 1620 1621 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, 1622 SelectionDAG &DAG) const { 1623 switch (Op.getOpcode()) { 1624 case ISD::BR_CC: 1625 return lowerBR_CC(Op, DAG); 1626 case ISD::SELECT_CC: 1627 return lowerSELECT_CC(Op, DAG); 1628 case ISD::GlobalAddress: 1629 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG); 1630 case ISD::GlobalTLSAddress: 1631 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG); 1632 case ISD::BlockAddress: 1633 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG); 1634 case ISD::JumpTable: 1635 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG); 1636 case ISD::ConstantPool: 1637 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG); 1638 case ISD::BITCAST: 1639 return lowerBITCAST(Op, DAG); 1640 case ISD::VASTART: 1641 return lowerVASTART(Op, DAG); 1642 case ISD::VACOPY: 1643 return lowerVACOPY(Op, DAG); 1644 case ISD::DYNAMIC_STACKALLOC: 1645 return lowerDYNAMIC_STACKALLOC(Op, DAG); 1646 case ISD::UMUL_LOHI: 1647 return lowerUMUL_LOHI(Op, DAG); 1648 case ISD::SDIVREM: 1649 return lowerSDIVREM(Op, DAG); 1650 case ISD::UDIVREM: 1651 return lowerUDIVREM(Op, DAG); 1652 case ISD::OR: 1653 return lowerOR(Op, DAG); 1654 case ISD::ATOMIC_SWAP: 1655 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW); 1656 case ISD::ATOMIC_LOAD_ADD: 1657 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD); 1658 case ISD::ATOMIC_LOAD_SUB: 1659 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB); 1660 case ISD::ATOMIC_LOAD_AND: 1661 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND); 1662 case ISD::ATOMIC_LOAD_OR: 1663 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR); 1664 case ISD::ATOMIC_LOAD_XOR: 1665 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR); 1666 case ISD::ATOMIC_LOAD_NAND: 1667 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND); 1668 case ISD::ATOMIC_LOAD_MIN: 1669 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN); 1670 case ISD::ATOMIC_LOAD_MAX: 1671 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX); 1672 case ISD::ATOMIC_LOAD_UMIN: 1673 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN); 1674 case ISD::ATOMIC_LOAD_UMAX: 1675 return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX); 1676 case ISD::ATOMIC_CMP_SWAP: 1677 return lowerATOMIC_CMP_SWAP(Op, DAG); 1678 case ISD::STACKSAVE: 1679 return lowerSTACKSAVE(Op, DAG); 1680 case ISD::STACKRESTORE: 1681 return lowerSTACKRESTORE(Op, DAG); 1682 default: 1683 llvm_unreachable("Unexpected node to lower"); 1684 } 1685 } 1686 1687 const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const { 1688 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME 1689 switch (Opcode) { 1690 OPCODE(RET_FLAG); 1691 OPCODE(CALL); 1692 OPCODE(PCREL_WRAPPER); 1693 OPCODE(CMP); 1694 OPCODE(UCMP); 1695 OPCODE(BR_CCMASK); 1696 OPCODE(SELECT_CCMASK); 1697 OPCODE(ADJDYNALLOC); 1698 OPCODE(EXTRACT_ACCESS); 1699 OPCODE(UMUL_LOHI64); 1700 OPCODE(SDIVREM64); 1701 OPCODE(UDIVREM32); 1702 OPCODE(UDIVREM64); 1703 OPCODE(MVC); 1704 OPCODE(ATOMIC_SWAPW); 1705 OPCODE(ATOMIC_LOADW_ADD); 1706 OPCODE(ATOMIC_LOADW_SUB); 1707 OPCODE(ATOMIC_LOADW_AND); 1708 OPCODE(ATOMIC_LOADW_OR); 1709 OPCODE(ATOMIC_LOADW_XOR); 1710 OPCODE(ATOMIC_LOADW_NAND); 1711 OPCODE(ATOMIC_LOADW_MIN); 1712 OPCODE(ATOMIC_LOADW_MAX); 1713 OPCODE(ATOMIC_LOADW_UMIN); 1714 OPCODE(ATOMIC_LOADW_UMAX); 1715 OPCODE(ATOMIC_CMP_SWAPW); 1716 } 1717 return NULL; 1718 #undef OPCODE 1719 } 1720 1721 //===----------------------------------------------------------------------===// 1722 // Custom insertion 1723 //===----------------------------------------------------------------------===// 1724 1725 // Create a new basic block after MBB. 1726 static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) { 1727 MachineFunction &MF = *MBB->getParent(); 1728 MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock()); 1729 MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB); 1730 return NewMBB; 1731 } 1732 1733 // Split MBB after MI and return the new block (the one that contains 1734 // instructions after MI). 1735 static MachineBasicBlock *splitBlockAfter(MachineInstr *MI, 1736 MachineBasicBlock *MBB) { 1737 MachineBasicBlock *NewMBB = emitBlockAfter(MBB); 1738 NewMBB->splice(NewMBB->begin(), MBB, 1739 llvm::next(MachineBasicBlock::iterator(MI)), 1740 MBB->end()); 1741 NewMBB->transferSuccessorsAndUpdatePHIs(MBB); 1742 return NewMBB; 1743 } 1744 1745 // Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI. 1746 MachineBasicBlock * 1747 SystemZTargetLowering::emitSelect(MachineInstr *MI, 1748 MachineBasicBlock *MBB) const { 1749 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1750 1751 unsigned DestReg = MI->getOperand(0).getReg(); 1752 unsigned TrueReg = MI->getOperand(1).getReg(); 1753 unsigned FalseReg = MI->getOperand(2).getReg(); 1754 unsigned CCValid = MI->getOperand(3).getImm(); 1755 unsigned CCMask = MI->getOperand(4).getImm(); 1756 DebugLoc DL = MI->getDebugLoc(); 1757 1758 MachineBasicBlock *StartMBB = MBB; 1759 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1760 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1761 1762 // StartMBB: 1763 // BRC CCMask, JoinMBB 1764 // # fallthrough to FalseMBB 1765 MBB = StartMBB; 1766 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 1767 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 1768 MBB->addSuccessor(JoinMBB); 1769 MBB->addSuccessor(FalseMBB); 1770 1771 // FalseMBB: 1772 // # fallthrough to JoinMBB 1773 MBB = FalseMBB; 1774 MBB->addSuccessor(JoinMBB); 1775 1776 // JoinMBB: 1777 // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ] 1778 // ... 1779 MBB = JoinMBB; 1780 BuildMI(*MBB, MBB->begin(), DL, TII->get(SystemZ::PHI), DestReg) 1781 .addReg(TrueReg).addMBB(StartMBB) 1782 .addReg(FalseReg).addMBB(FalseMBB); 1783 1784 MI->eraseFromParent(); 1785 return JoinMBB; 1786 } 1787 1788 // Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI. 1789 // StoreOpcode is the store to use and Invert says whether the store should 1790 // happen when the condition is false rather than true. If a STORE ON 1791 // CONDITION is available, STOCOpcode is its opcode, otherwise it is 0. 1792 MachineBasicBlock * 1793 SystemZTargetLowering::emitCondStore(MachineInstr *MI, 1794 MachineBasicBlock *MBB, 1795 unsigned StoreOpcode, unsigned STOCOpcode, 1796 bool Invert) const { 1797 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1798 1799 unsigned SrcReg = MI->getOperand(0).getReg(); 1800 MachineOperand Base = MI->getOperand(1); 1801 int64_t Disp = MI->getOperand(2).getImm(); 1802 unsigned IndexReg = MI->getOperand(3).getReg(); 1803 unsigned CCValid = MI->getOperand(4).getImm(); 1804 unsigned CCMask = MI->getOperand(5).getImm(); 1805 DebugLoc DL = MI->getDebugLoc(); 1806 1807 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); 1808 1809 // Use STOCOpcode if possible. We could use different store patterns in 1810 // order to avoid matching the index register, but the performance trade-offs 1811 // might be more complicated in that case. 1812 if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) { 1813 if (Invert) 1814 CCMask ^= CCValid; 1815 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) 1816 .addReg(SrcReg).addOperand(Base).addImm(Disp) 1817 .addImm(CCValid).addImm(CCMask); 1818 MI->eraseFromParent(); 1819 return MBB; 1820 } 1821 1822 // Get the condition needed to branch around the store. 1823 if (!Invert) 1824 CCMask ^= CCValid; 1825 1826 MachineBasicBlock *StartMBB = MBB; 1827 MachineBasicBlock *JoinMBB = splitBlockAfter(MI, MBB); 1828 MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB); 1829 1830 // StartMBB: 1831 // BRC CCMask, JoinMBB 1832 // # fallthrough to FalseMBB 1833 MBB = StartMBB; 1834 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 1835 .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB); 1836 MBB->addSuccessor(JoinMBB); 1837 MBB->addSuccessor(FalseMBB); 1838 1839 // FalseMBB: 1840 // store %SrcReg, %Disp(%Index,%Base) 1841 // # fallthrough to JoinMBB 1842 MBB = FalseMBB; 1843 BuildMI(MBB, DL, TII->get(StoreOpcode)) 1844 .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); 1845 MBB->addSuccessor(JoinMBB); 1846 1847 MI->eraseFromParent(); 1848 return JoinMBB; 1849 } 1850 1851 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_* 1852 // or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that 1853 // performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}. 1854 // BitSize is the width of the field in bits, or 0 if this is a partword 1855 // ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize 1856 // is one of the operands. Invert says whether the field should be 1857 // inverted after performing BinOpcode (e.g. for NAND). 1858 MachineBasicBlock * 1859 SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI, 1860 MachineBasicBlock *MBB, 1861 unsigned BinOpcode, 1862 unsigned BitSize, 1863 bool Invert) const { 1864 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1865 MachineFunction &MF = *MBB->getParent(); 1866 MachineRegisterInfo &MRI = MF.getRegInfo(); 1867 bool IsSubWord = (BitSize < 32); 1868 1869 // Extract the operands. Base can be a register or a frame index. 1870 // Src2 can be a register or immediate. 1871 unsigned Dest = MI->getOperand(0).getReg(); 1872 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1873 int64_t Disp = MI->getOperand(2).getImm(); 1874 MachineOperand Src2 = earlyUseOperand(MI->getOperand(3)); 1875 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1876 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1877 DebugLoc DL = MI->getDebugLoc(); 1878 if (IsSubWord) 1879 BitSize = MI->getOperand(6).getImm(); 1880 1881 // Subword operations use 32-bit registers. 1882 const TargetRegisterClass *RC = (BitSize <= 32 ? 1883 &SystemZ::GR32BitRegClass : 1884 &SystemZ::GR64BitRegClass); 1885 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 1886 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 1887 1888 // Get the right opcodes for the displacement. 1889 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 1890 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 1891 assert(LOpcode && CSOpcode && "Displacement out of range"); 1892 1893 // Create virtual registers for temporary results. 1894 unsigned OrigVal = MRI.createVirtualRegister(RC); 1895 unsigned OldVal = MRI.createVirtualRegister(RC); 1896 unsigned NewVal = (BinOpcode || IsSubWord ? 1897 MRI.createVirtualRegister(RC) : Src2.getReg()); 1898 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 1899 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 1900 1901 // Insert a basic block for the main loop. 1902 MachineBasicBlock *StartMBB = MBB; 1903 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 1904 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 1905 1906 // StartMBB: 1907 // ... 1908 // %OrigVal = L Disp(%Base) 1909 // # fall through to LoopMMB 1910 MBB = StartMBB; 1911 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 1912 .addOperand(Base).addImm(Disp).addReg(0); 1913 MBB->addSuccessor(LoopMBB); 1914 1915 // LoopMBB: 1916 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ] 1917 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 1918 // %RotatedNewVal = OP %RotatedOldVal, %Src2 1919 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 1920 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 1921 // JNE LoopMBB 1922 // # fall through to DoneMMB 1923 MBB = LoopMBB; 1924 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 1925 .addReg(OrigVal).addMBB(StartMBB) 1926 .addReg(Dest).addMBB(LoopMBB); 1927 if (IsSubWord) 1928 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 1929 .addReg(OldVal).addReg(BitShift).addImm(0); 1930 if (Invert) { 1931 // Perform the operation normally and then invert every bit of the field. 1932 unsigned Tmp = MRI.createVirtualRegister(RC); 1933 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) 1934 .addReg(RotatedOldVal).addOperand(Src2); 1935 if (BitSize < 32) 1936 // XILF with the upper BitSize bits set. 1937 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1938 .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); 1939 else if (BitSize == 32) 1940 // XILF with every bit set. 1941 BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) 1942 .addReg(Tmp).addImm(~uint32_t(0)); 1943 else { 1944 // Use LCGR and add -1 to the result, which is more compact than 1945 // an XILF, XILH pair. 1946 unsigned Tmp2 = MRI.createVirtualRegister(RC); 1947 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp); 1948 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal) 1949 .addReg(Tmp2).addImm(-1); 1950 } 1951 } else if (BinOpcode) 1952 // A simply binary operation. 1953 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) 1954 .addReg(RotatedOldVal).addOperand(Src2); 1955 else if (IsSubWord) 1956 // Use RISBG to rotate Src2 into position and use it to replace the 1957 // field in RotatedOldVal. 1958 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) 1959 .addReg(RotatedOldVal).addReg(Src2.getReg()) 1960 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); 1961 if (IsSubWord) 1962 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 1963 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 1964 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 1965 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 1966 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 1967 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 1968 MBB->addSuccessor(LoopMBB); 1969 MBB->addSuccessor(DoneMBB); 1970 1971 MI->eraseFromParent(); 1972 return DoneMBB; 1973 } 1974 1975 // Implement EmitInstrWithCustomInserter for pseudo 1976 // ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the 1977 // instruction that should be used to compare the current field with the 1978 // minimum or maximum value. KeepOldMask is the BRC condition-code mask 1979 // for when the current field should be kept. BitSize is the width of 1980 // the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction. 1981 MachineBasicBlock * 1982 SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI, 1983 MachineBasicBlock *MBB, 1984 unsigned CompareOpcode, 1985 unsigned KeepOldMask, 1986 unsigned BitSize) const { 1987 const SystemZInstrInfo *TII = TM.getInstrInfo(); 1988 MachineFunction &MF = *MBB->getParent(); 1989 MachineRegisterInfo &MRI = MF.getRegInfo(); 1990 bool IsSubWord = (BitSize < 32); 1991 1992 // Extract the operands. Base can be a register or a frame index. 1993 unsigned Dest = MI->getOperand(0).getReg(); 1994 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 1995 int64_t Disp = MI->getOperand(2).getImm(); 1996 unsigned Src2 = MI->getOperand(3).getReg(); 1997 unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0); 1998 unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0); 1999 DebugLoc DL = MI->getDebugLoc(); 2000 if (IsSubWord) 2001 BitSize = MI->getOperand(6).getImm(); 2002 2003 // Subword operations use 32-bit registers. 2004 const TargetRegisterClass *RC = (BitSize <= 32 ? 2005 &SystemZ::GR32BitRegClass : 2006 &SystemZ::GR64BitRegClass); 2007 unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG; 2008 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG; 2009 2010 // Get the right opcodes for the displacement. 2011 LOpcode = TII->getOpcodeForOffset(LOpcode, Disp); 2012 CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp); 2013 assert(LOpcode && CSOpcode && "Displacement out of range"); 2014 2015 // Create virtual registers for temporary results. 2016 unsigned OrigVal = MRI.createVirtualRegister(RC); 2017 unsigned OldVal = MRI.createVirtualRegister(RC); 2018 unsigned NewVal = MRI.createVirtualRegister(RC); 2019 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); 2020 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); 2021 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); 2022 2023 // Insert 3 basic blocks for the loop. 2024 MachineBasicBlock *StartMBB = MBB; 2025 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 2026 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2027 MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB); 2028 MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB); 2029 2030 // StartMBB: 2031 // ... 2032 // %OrigVal = L Disp(%Base) 2033 // # fall through to LoopMMB 2034 MBB = StartMBB; 2035 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) 2036 .addOperand(Base).addImm(Disp).addReg(0); 2037 MBB->addSuccessor(LoopMBB); 2038 2039 // LoopMBB: 2040 // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ] 2041 // %RotatedOldVal = RLL %OldVal, 0(%BitShift) 2042 // CompareOpcode %RotatedOldVal, %Src2 2043 // BRC KeepOldMask, UpdateMBB 2044 MBB = LoopMBB; 2045 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2046 .addReg(OrigVal).addMBB(StartMBB) 2047 .addReg(Dest).addMBB(UpdateMBB); 2048 if (IsSubWord) 2049 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) 2050 .addReg(OldVal).addReg(BitShift).addImm(0); 2051 BuildMI(MBB, DL, TII->get(CompareOpcode)) 2052 .addReg(RotatedOldVal).addReg(Src2); 2053 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2054 .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB); 2055 MBB->addSuccessor(UpdateMBB); 2056 MBB->addSuccessor(UseAltMBB); 2057 2058 // UseAltMBB: 2059 // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0 2060 // # fall through to UpdateMMB 2061 MBB = UseAltMBB; 2062 if (IsSubWord) 2063 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) 2064 .addReg(RotatedOldVal).addReg(Src2) 2065 .addImm(32).addImm(31 + BitSize).addImm(0); 2066 MBB->addSuccessor(UpdateMBB); 2067 2068 // UpdateMBB: 2069 // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ], 2070 // [ %RotatedAltVal, UseAltMBB ] 2071 // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift) 2072 // %Dest = CS %OldVal, %NewVal, Disp(%Base) 2073 // JNE LoopMBB 2074 // # fall through to DoneMMB 2075 MBB = UpdateMBB; 2076 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) 2077 .addReg(RotatedOldVal).addMBB(LoopMBB) 2078 .addReg(RotatedAltVal).addMBB(UseAltMBB); 2079 if (IsSubWord) 2080 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) 2081 .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); 2082 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) 2083 .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); 2084 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2085 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2086 MBB->addSuccessor(LoopMBB); 2087 MBB->addSuccessor(DoneMBB); 2088 2089 MI->eraseFromParent(); 2090 return DoneMBB; 2091 } 2092 2093 // Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW 2094 // instruction MI. 2095 MachineBasicBlock * 2096 SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI, 2097 MachineBasicBlock *MBB) const { 2098 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2099 MachineFunction &MF = *MBB->getParent(); 2100 MachineRegisterInfo &MRI = MF.getRegInfo(); 2101 2102 // Extract the operands. Base can be a register or a frame index. 2103 unsigned Dest = MI->getOperand(0).getReg(); 2104 MachineOperand Base = earlyUseOperand(MI->getOperand(1)); 2105 int64_t Disp = MI->getOperand(2).getImm(); 2106 unsigned OrigCmpVal = MI->getOperand(3).getReg(); 2107 unsigned OrigSwapVal = MI->getOperand(4).getReg(); 2108 unsigned BitShift = MI->getOperand(5).getReg(); 2109 unsigned NegBitShift = MI->getOperand(6).getReg(); 2110 int64_t BitSize = MI->getOperand(7).getImm(); 2111 DebugLoc DL = MI->getDebugLoc(); 2112 2113 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass; 2114 2115 // Get the right opcodes for the displacement. 2116 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); 2117 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); 2118 assert(LOpcode && CSOpcode && "Displacement out of range"); 2119 2120 // Create virtual registers for temporary results. 2121 unsigned OrigOldVal = MRI.createVirtualRegister(RC); 2122 unsigned OldVal = MRI.createVirtualRegister(RC); 2123 unsigned CmpVal = MRI.createVirtualRegister(RC); 2124 unsigned SwapVal = MRI.createVirtualRegister(RC); 2125 unsigned StoreVal = MRI.createVirtualRegister(RC); 2126 unsigned RetryOldVal = MRI.createVirtualRegister(RC); 2127 unsigned RetryCmpVal = MRI.createVirtualRegister(RC); 2128 unsigned RetrySwapVal = MRI.createVirtualRegister(RC); 2129 2130 // Insert 2 basic blocks for the loop. 2131 MachineBasicBlock *StartMBB = MBB; 2132 MachineBasicBlock *DoneMBB = splitBlockAfter(MI, MBB); 2133 MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB); 2134 MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB); 2135 2136 // StartMBB: 2137 // ... 2138 // %OrigOldVal = L Disp(%Base) 2139 // # fall through to LoopMMB 2140 MBB = StartMBB; 2141 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) 2142 .addOperand(Base).addImm(Disp).addReg(0); 2143 MBB->addSuccessor(LoopMBB); 2144 2145 // LoopMBB: 2146 // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ] 2147 // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ] 2148 // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ] 2149 // %Dest = RLL %OldVal, BitSize(%BitShift) 2150 // ^^ The low BitSize bits contain the field 2151 // of interest. 2152 // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0 2153 // ^^ Replace the upper 32-BitSize bits of the 2154 // comparison value with those that we loaded, 2155 // so that we can use a full word comparison. 2156 // CR %Dest, %RetryCmpVal 2157 // JNE DoneMBB 2158 // # Fall through to SetMBB 2159 MBB = LoopMBB; 2160 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) 2161 .addReg(OrigOldVal).addMBB(StartMBB) 2162 .addReg(RetryOldVal).addMBB(SetMBB); 2163 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal) 2164 .addReg(OrigCmpVal).addMBB(StartMBB) 2165 .addReg(RetryCmpVal).addMBB(SetMBB); 2166 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) 2167 .addReg(OrigSwapVal).addMBB(StartMBB) 2168 .addReg(RetrySwapVal).addMBB(SetMBB); 2169 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest) 2170 .addReg(OldVal).addReg(BitShift).addImm(BitSize); 2171 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal) 2172 .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2173 BuildMI(MBB, DL, TII->get(SystemZ::CR)) 2174 .addReg(Dest).addReg(RetryCmpVal); 2175 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2176 .addImm(SystemZ::CCMASK_ICMP) 2177 .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB); 2178 MBB->addSuccessor(DoneMBB); 2179 MBB->addSuccessor(SetMBB); 2180 2181 // SetMBB: 2182 // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0 2183 // ^^ Replace the upper 32-BitSize bits of the new 2184 // value with those that we loaded. 2185 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) 2186 // ^^ Rotate the new field to its proper position. 2187 // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base) 2188 // JNE LoopMBB 2189 // # fall through to ExitMMB 2190 MBB = SetMBB; 2191 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) 2192 .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0); 2193 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) 2194 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); 2195 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) 2196 .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); 2197 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) 2198 .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); 2199 MBB->addSuccessor(LoopMBB); 2200 MBB->addSuccessor(DoneMBB); 2201 2202 MI->eraseFromParent(); 2203 return DoneMBB; 2204 } 2205 2206 // Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true 2207 // if the high register of the GR128 value must be cleared or false if 2208 // it's "don't care". SubReg is subreg_odd32 when extending a GR32 2209 // and subreg_odd when extending a GR64. 2210 MachineBasicBlock * 2211 SystemZTargetLowering::emitExt128(MachineInstr *MI, 2212 MachineBasicBlock *MBB, 2213 bool ClearEven, unsigned SubReg) const { 2214 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2215 MachineFunction &MF = *MBB->getParent(); 2216 MachineRegisterInfo &MRI = MF.getRegInfo(); 2217 DebugLoc DL = MI->getDebugLoc(); 2218 2219 unsigned Dest = MI->getOperand(0).getReg(); 2220 unsigned Src = MI->getOperand(1).getReg(); 2221 unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2222 2223 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); 2224 if (ClearEven) { 2225 unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass); 2226 unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass); 2227 2228 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) 2229 .addImm(0); 2230 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) 2231 .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_high); 2232 In128 = NewIn128; 2233 } 2234 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) 2235 .addReg(In128).addReg(Src).addImm(SubReg); 2236 2237 MI->eraseFromParent(); 2238 return MBB; 2239 } 2240 2241 MachineBasicBlock * 2242 SystemZTargetLowering::emitMVCWrapper(MachineInstr *MI, 2243 MachineBasicBlock *MBB) const { 2244 const SystemZInstrInfo *TII = TM.getInstrInfo(); 2245 DebugLoc DL = MI->getDebugLoc(); 2246 2247 MachineOperand DestBase = MI->getOperand(0); 2248 uint64_t DestDisp = MI->getOperand(1).getImm(); 2249 MachineOperand SrcBase = MI->getOperand(2); 2250 uint64_t SrcDisp = MI->getOperand(3).getImm(); 2251 uint64_t Length = MI->getOperand(4).getImm(); 2252 2253 BuildMI(*MBB, MI, DL, TII->get(SystemZ::MVC)) 2254 .addOperand(DestBase).addImm(DestDisp).addImm(Length) 2255 .addOperand(SrcBase).addImm(SrcDisp); 2256 2257 MI->eraseFromParent(); 2258 return MBB; 2259 } 2260 2261 MachineBasicBlock *SystemZTargetLowering:: 2262 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const { 2263 switch (MI->getOpcode()) { 2264 case SystemZ::Select32: 2265 case SystemZ::SelectF32: 2266 case SystemZ::Select64: 2267 case SystemZ::SelectF64: 2268 case SystemZ::SelectF128: 2269 return emitSelect(MI, MBB); 2270 2271 case SystemZ::CondStore8_32: 2272 return emitCondStore(MI, MBB, SystemZ::STC32, 0, false); 2273 case SystemZ::CondStore8_32Inv: 2274 return emitCondStore(MI, MBB, SystemZ::STC32, 0, true); 2275 case SystemZ::CondStore16_32: 2276 return emitCondStore(MI, MBB, SystemZ::STH32, 0, false); 2277 case SystemZ::CondStore16_32Inv: 2278 return emitCondStore(MI, MBB, SystemZ::STH32, 0, true); 2279 case SystemZ::CondStore32_32: 2280 return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, false); 2281 case SystemZ::CondStore32_32Inv: 2282 return emitCondStore(MI, MBB, SystemZ::ST32, SystemZ::STOC32, true); 2283 case SystemZ::CondStore8: 2284 return emitCondStore(MI, MBB, SystemZ::STC, 0, false); 2285 case SystemZ::CondStore8Inv: 2286 return emitCondStore(MI, MBB, SystemZ::STC, 0, true); 2287 case SystemZ::CondStore16: 2288 return emitCondStore(MI, MBB, SystemZ::STH, 0, false); 2289 case SystemZ::CondStore16Inv: 2290 return emitCondStore(MI, MBB, SystemZ::STH, 0, true); 2291 case SystemZ::CondStore32: 2292 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false); 2293 case SystemZ::CondStore32Inv: 2294 return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true); 2295 case SystemZ::CondStore64: 2296 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false); 2297 case SystemZ::CondStore64Inv: 2298 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true); 2299 case SystemZ::CondStoreF32: 2300 return emitCondStore(MI, MBB, SystemZ::STE, 0, false); 2301 case SystemZ::CondStoreF32Inv: 2302 return emitCondStore(MI, MBB, SystemZ::STE, 0, true); 2303 case SystemZ::CondStoreF64: 2304 return emitCondStore(MI, MBB, SystemZ::STD, 0, false); 2305 case SystemZ::CondStoreF64Inv: 2306 return emitCondStore(MI, MBB, SystemZ::STD, 0, true); 2307 2308 case SystemZ::AEXT128_64: 2309 return emitExt128(MI, MBB, false, SystemZ::subreg_low); 2310 case SystemZ::ZEXT128_32: 2311 return emitExt128(MI, MBB, true, SystemZ::subreg_low32); 2312 case SystemZ::ZEXT128_64: 2313 return emitExt128(MI, MBB, true, SystemZ::subreg_low); 2314 2315 case SystemZ::ATOMIC_SWAPW: 2316 return emitAtomicLoadBinary(MI, MBB, 0, 0); 2317 case SystemZ::ATOMIC_SWAP_32: 2318 return emitAtomicLoadBinary(MI, MBB, 0, 32); 2319 case SystemZ::ATOMIC_SWAP_64: 2320 return emitAtomicLoadBinary(MI, MBB, 0, 64); 2321 2322 case SystemZ::ATOMIC_LOADW_AR: 2323 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0); 2324 case SystemZ::ATOMIC_LOADW_AFI: 2325 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0); 2326 case SystemZ::ATOMIC_LOAD_AR: 2327 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32); 2328 case SystemZ::ATOMIC_LOAD_AHI: 2329 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32); 2330 case SystemZ::ATOMIC_LOAD_AFI: 2331 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32); 2332 case SystemZ::ATOMIC_LOAD_AGR: 2333 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64); 2334 case SystemZ::ATOMIC_LOAD_AGHI: 2335 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64); 2336 case SystemZ::ATOMIC_LOAD_AGFI: 2337 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64); 2338 2339 case SystemZ::ATOMIC_LOADW_SR: 2340 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0); 2341 case SystemZ::ATOMIC_LOAD_SR: 2342 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32); 2343 case SystemZ::ATOMIC_LOAD_SGR: 2344 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64); 2345 2346 case SystemZ::ATOMIC_LOADW_NR: 2347 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); 2348 case SystemZ::ATOMIC_LOADW_NILH: 2349 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); 2350 case SystemZ::ATOMIC_LOAD_NR: 2351 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); 2352 case SystemZ::ATOMIC_LOAD_NILL32: 2353 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); 2354 case SystemZ::ATOMIC_LOAD_NILH32: 2355 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); 2356 case SystemZ::ATOMIC_LOAD_NILF32: 2357 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); 2358 case SystemZ::ATOMIC_LOAD_NGR: 2359 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); 2360 case SystemZ::ATOMIC_LOAD_NILL: 2361 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); 2362 case SystemZ::ATOMIC_LOAD_NILH: 2363 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); 2364 case SystemZ::ATOMIC_LOAD_NIHL: 2365 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); 2366 case SystemZ::ATOMIC_LOAD_NIHH: 2367 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); 2368 case SystemZ::ATOMIC_LOAD_NILF: 2369 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); 2370 case SystemZ::ATOMIC_LOAD_NIHF: 2371 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); 2372 2373 case SystemZ::ATOMIC_LOADW_OR: 2374 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); 2375 case SystemZ::ATOMIC_LOADW_OILH: 2376 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); 2377 case SystemZ::ATOMIC_LOAD_OR: 2378 return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); 2379 case SystemZ::ATOMIC_LOAD_OILL32: 2380 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); 2381 case SystemZ::ATOMIC_LOAD_OILH32: 2382 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); 2383 case SystemZ::ATOMIC_LOAD_OILF32: 2384 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); 2385 case SystemZ::ATOMIC_LOAD_OGR: 2386 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); 2387 case SystemZ::ATOMIC_LOAD_OILL: 2388 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); 2389 case SystemZ::ATOMIC_LOAD_OILH: 2390 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); 2391 case SystemZ::ATOMIC_LOAD_OIHL: 2392 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); 2393 case SystemZ::ATOMIC_LOAD_OIHH: 2394 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); 2395 case SystemZ::ATOMIC_LOAD_OILF: 2396 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); 2397 case SystemZ::ATOMIC_LOAD_OIHF: 2398 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); 2399 2400 case SystemZ::ATOMIC_LOADW_XR: 2401 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); 2402 case SystemZ::ATOMIC_LOADW_XILF: 2403 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); 2404 case SystemZ::ATOMIC_LOAD_XR: 2405 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); 2406 case SystemZ::ATOMIC_LOAD_XILF32: 2407 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); 2408 case SystemZ::ATOMIC_LOAD_XGR: 2409 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); 2410 case SystemZ::ATOMIC_LOAD_XILF: 2411 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); 2412 case SystemZ::ATOMIC_LOAD_XIHF: 2413 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); 2414 2415 case SystemZ::ATOMIC_LOADW_NRi: 2416 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); 2417 case SystemZ::ATOMIC_LOADW_NILHi: 2418 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); 2419 case SystemZ::ATOMIC_LOAD_NRi: 2420 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); 2421 case SystemZ::ATOMIC_LOAD_NILL32i: 2422 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); 2423 case SystemZ::ATOMIC_LOAD_NILH32i: 2424 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); 2425 case SystemZ::ATOMIC_LOAD_NILF32i: 2426 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); 2427 case SystemZ::ATOMIC_LOAD_NGRi: 2428 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); 2429 case SystemZ::ATOMIC_LOAD_NILLi: 2430 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); 2431 case SystemZ::ATOMIC_LOAD_NILHi: 2432 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); 2433 case SystemZ::ATOMIC_LOAD_NIHLi: 2434 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); 2435 case SystemZ::ATOMIC_LOAD_NIHHi: 2436 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); 2437 case SystemZ::ATOMIC_LOAD_NILFi: 2438 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); 2439 case SystemZ::ATOMIC_LOAD_NIHFi: 2440 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); 2441 2442 case SystemZ::ATOMIC_LOADW_MIN: 2443 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2444 SystemZ::CCMASK_CMP_LE, 0); 2445 case SystemZ::ATOMIC_LOAD_MIN_32: 2446 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2447 SystemZ::CCMASK_CMP_LE, 32); 2448 case SystemZ::ATOMIC_LOAD_MIN_64: 2449 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2450 SystemZ::CCMASK_CMP_LE, 64); 2451 2452 case SystemZ::ATOMIC_LOADW_MAX: 2453 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2454 SystemZ::CCMASK_CMP_GE, 0); 2455 case SystemZ::ATOMIC_LOAD_MAX_32: 2456 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR, 2457 SystemZ::CCMASK_CMP_GE, 32); 2458 case SystemZ::ATOMIC_LOAD_MAX_64: 2459 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR, 2460 SystemZ::CCMASK_CMP_GE, 64); 2461 2462 case SystemZ::ATOMIC_LOADW_UMIN: 2463 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2464 SystemZ::CCMASK_CMP_LE, 0); 2465 case SystemZ::ATOMIC_LOAD_UMIN_32: 2466 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2467 SystemZ::CCMASK_CMP_LE, 32); 2468 case SystemZ::ATOMIC_LOAD_UMIN_64: 2469 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2470 SystemZ::CCMASK_CMP_LE, 64); 2471 2472 case SystemZ::ATOMIC_LOADW_UMAX: 2473 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2474 SystemZ::CCMASK_CMP_GE, 0); 2475 case SystemZ::ATOMIC_LOAD_UMAX_32: 2476 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR, 2477 SystemZ::CCMASK_CMP_GE, 32); 2478 case SystemZ::ATOMIC_LOAD_UMAX_64: 2479 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR, 2480 SystemZ::CCMASK_CMP_GE, 64); 2481 2482 case SystemZ::ATOMIC_CMP_SWAPW: 2483 return emitAtomicCmpSwapW(MI, MBB); 2484 case SystemZ::MVCWrapper: 2485 return emitMVCWrapper(MI, MBB); 2486 default: 2487 llvm_unreachable("Unexpected instr type to insert"); 2488 } 2489 } 2490