1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the SystemZ implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZInstrInfo.h" 15 #include "SystemZInstrBuilder.h" 16 #include "SystemZTargetMachine.h" 17 #include "llvm/CodeGen/LiveVariables.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 20 using namespace llvm; 21 22 #define GET_INSTRINFO_CTOR_DTOR 23 #define GET_INSTRMAP_INFO 24 #include "SystemZGenInstrInfo.inc" 25 26 // Return a mask with Count low bits set. 27 static uint64_t allOnes(unsigned int Count) { 28 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; 29 } 30 31 // Reg should be a 32-bit GPR. Return true if it is a high register rather 32 // than a low register. 33 static bool isHighReg(unsigned int Reg) { 34 if (SystemZ::GRH32BitRegClass.contains(Reg)) 35 return true; 36 assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32"); 37 return false; 38 } 39 40 // Pin the vtable to this file. 41 void SystemZInstrInfo::anchor() {} 42 43 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti) 44 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), 45 RI(), STI(sti) { 46 } 47 48 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, 49 // each having the opcode given by NewOpcode. 50 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, 51 unsigned NewOpcode) const { 52 MachineBasicBlock *MBB = MI->getParent(); 53 MachineFunction &MF = *MBB->getParent(); 54 55 // Get two load or store instructions. Use the original instruction for one 56 // of them (arbitrarily the second here) and create a clone for the other. 57 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); 58 MBB->insert(MI, EarlierMI); 59 60 // Set up the two 64-bit registers. 61 MachineOperand &HighRegOp = EarlierMI->getOperand(0); 62 MachineOperand &LowRegOp = MI->getOperand(0); 63 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64)); 64 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64)); 65 66 // The address in the first (high) instruction is already correct. 67 // Adjust the offset in the second (low) instruction. 68 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); 69 MachineOperand &LowOffsetOp = MI->getOperand(2); 70 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); 71 72 // Set the opcodes. 73 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm()); 74 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm()); 75 assert(HighOpcode && LowOpcode && "Both offsets should be in range"); 76 77 EarlierMI->setDesc(get(HighOpcode)); 78 MI->setDesc(get(LowOpcode)); 79 } 80 81 // Split ADJDYNALLOC instruction MI. 82 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { 83 MachineBasicBlock *MBB = MI->getParent(); 84 MachineFunction &MF = *MBB->getParent(); 85 MachineFrameInfo *MFFrame = MF.getFrameInfo(); 86 MachineOperand &OffsetMO = MI->getOperand(2); 87 88 uint64_t Offset = (MFFrame->getMaxCallFrameSize() + 89 SystemZMC::CallFrameSize + 90 OffsetMO.getImm()); 91 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset); 92 assert(NewOpcode && "No support for huge argument lists yet"); 93 MI->setDesc(get(NewOpcode)); 94 OffsetMO.setImm(Offset); 95 } 96 97 // MI is an RI-style pseudo instruction. Replace it with LowOpcode 98 // if the first operand is a low GR32 and HighOpcode if the first operand 99 // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand 100 // and HighOpcode takes an unsigned 32-bit operand. In those cases, 101 // MI has the same kind of operand as LowOpcode, so needs to be converted 102 // if HighOpcode is used. 103 void SystemZInstrInfo::expandRIPseudo(MachineInstr *MI, unsigned LowOpcode, 104 unsigned HighOpcode, 105 bool ConvertHigh) const { 106 unsigned Reg = MI->getOperand(0).getReg(); 107 bool IsHigh = isHighReg(Reg); 108 MI->setDesc(get(IsHigh ? HighOpcode : LowOpcode)); 109 if (IsHigh && ConvertHigh) 110 MI->getOperand(1).setImm(uint32_t(MI->getOperand(1).getImm())); 111 } 112 113 // MI is a three-operand RIE-style pseudo instruction. Replace it with 114 // LowOpcode3 if the registers are both low GR32s, otherwise use a move 115 // followed by HighOpcode or LowOpcode, depending on whether the target 116 // is a high or low GR32. 117 void SystemZInstrInfo::expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode, 118 unsigned LowOpcodeK, 119 unsigned HighOpcode) const { 120 unsigned DestReg = MI->getOperand(0).getReg(); 121 unsigned SrcReg = MI->getOperand(1).getReg(); 122 bool DestIsHigh = isHighReg(DestReg); 123 bool SrcIsHigh = isHighReg(SrcReg); 124 if (!DestIsHigh && !SrcIsHigh) 125 MI->setDesc(get(LowOpcodeK)); 126 else { 127 emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(), 128 DestReg, SrcReg, SystemZ::LR, 32, 129 MI->getOperand(1).isKill()); 130 MI->setDesc(get(DestIsHigh ? HighOpcode : LowOpcode)); 131 MI->getOperand(1).setReg(DestReg); 132 } 133 } 134 135 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode 136 // if the first operand is a low GR32 and HighOpcode if the first operand 137 // is a high GR32. 138 void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode, 139 unsigned HighOpcode) const { 140 unsigned Reg = MI->getOperand(0).getReg(); 141 unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode, 142 MI->getOperand(2).getImm()); 143 MI->setDesc(get(Opcode)); 144 } 145 146 // MI is an RR-style pseudo instruction that zero-extends the low Size bits 147 // of one GRX32 into another. Replace it with LowOpcode if both operands 148 // are low registers, otherwise use RISB[LH]G. 149 void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode, 150 unsigned Size) const { 151 emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(), 152 MI->getOperand(0).getReg(), MI->getOperand(1).getReg(), 153 LowOpcode, Size, MI->getOperand(1).isKill()); 154 MI->eraseFromParent(); 155 } 156 157 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR 158 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg 159 // are low registers, otherwise use RISB[LH]G. Size is the number of bits 160 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR). 161 // KillSrc is true if this move is the last use of SrcReg. 162 void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, 163 MachineBasicBlock::iterator MBBI, 164 DebugLoc DL, unsigned DestReg, 165 unsigned SrcReg, unsigned LowLowOpcode, 166 unsigned Size, bool KillSrc) const { 167 unsigned Opcode; 168 bool DestIsHigh = isHighReg(DestReg); 169 bool SrcIsHigh = isHighReg(SrcReg); 170 if (DestIsHigh && SrcIsHigh) 171 Opcode = SystemZ::RISBHH; 172 else if (DestIsHigh && !SrcIsHigh) 173 Opcode = SystemZ::RISBHL; 174 else if (!DestIsHigh && SrcIsHigh) 175 Opcode = SystemZ::RISBLH; 176 else { 177 BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg) 178 .addReg(SrcReg, getKillRegState(KillSrc)); 179 return; 180 } 181 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0); 182 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 183 .addReg(DestReg, RegState::Undef) 184 .addReg(SrcReg, getKillRegState(KillSrc)) 185 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate); 186 } 187 188 // If MI is a simple load or store for a frame object, return the register 189 // it loads or stores and set FrameIndex to the index of the frame object. 190 // Return 0 otherwise. 191 // 192 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 193 static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, 194 unsigned Flag) { 195 const MCInstrDesc &MCID = MI->getDesc(); 196 if ((MCID.TSFlags & Flag) && 197 MI->getOperand(1).isFI() && 198 MI->getOperand(2).getImm() == 0 && 199 MI->getOperand(3).getReg() == 0) { 200 FrameIndex = MI->getOperand(1).getIndex(); 201 return MI->getOperand(0).getReg(); 202 } 203 return 0; 204 } 205 206 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 207 int &FrameIndex) const { 208 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad); 209 } 210 211 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 212 int &FrameIndex) const { 213 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); 214 } 215 216 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI, 217 int &DestFrameIndex, 218 int &SrcFrameIndex) const { 219 // Check for MVC 0(Length,FI1),0(FI2) 220 const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo(); 221 if (MI->getOpcode() != SystemZ::MVC || 222 !MI->getOperand(0).isFI() || 223 MI->getOperand(1).getImm() != 0 || 224 !MI->getOperand(3).isFI() || 225 MI->getOperand(4).getImm() != 0) 226 return false; 227 228 // Check that Length covers the full slots. 229 int64_t Length = MI->getOperand(2).getImm(); 230 unsigned FI1 = MI->getOperand(0).getIndex(); 231 unsigned FI2 = MI->getOperand(3).getIndex(); 232 if (MFI->getObjectSize(FI1) != Length || 233 MFI->getObjectSize(FI2) != Length) 234 return false; 235 236 DestFrameIndex = FI1; 237 SrcFrameIndex = FI2; 238 return true; 239 } 240 241 bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 242 MachineBasicBlock *&TBB, 243 MachineBasicBlock *&FBB, 244 SmallVectorImpl<MachineOperand> &Cond, 245 bool AllowModify) const { 246 // Most of the code and comments here are boilerplate. 247 248 // Start from the bottom of the block and work up, examining the 249 // terminator instructions. 250 MachineBasicBlock::iterator I = MBB.end(); 251 while (I != MBB.begin()) { 252 --I; 253 if (I->isDebugValue()) 254 continue; 255 256 // Working from the bottom, when we see a non-terminator instruction, we're 257 // done. 258 if (!isUnpredicatedTerminator(I)) 259 break; 260 261 // A terminator that isn't a branch can't easily be handled by this 262 // analysis. 263 if (!I->isBranch()) 264 return true; 265 266 // Can't handle indirect branches. 267 SystemZII::Branch Branch(getBranchInfo(I)); 268 if (!Branch.Target->isMBB()) 269 return true; 270 271 // Punt on compound branches. 272 if (Branch.Type != SystemZII::BranchNormal) 273 return true; 274 275 if (Branch.CCMask == SystemZ::CCMASK_ANY) { 276 // Handle unconditional branches. 277 if (!AllowModify) { 278 TBB = Branch.Target->getMBB(); 279 continue; 280 } 281 282 // If the block has any instructions after a JMP, delete them. 283 while (std::next(I) != MBB.end()) 284 std::next(I)->eraseFromParent(); 285 286 Cond.clear(); 287 FBB = nullptr; 288 289 // Delete the JMP if it's equivalent to a fall-through. 290 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { 291 TBB = nullptr; 292 I->eraseFromParent(); 293 I = MBB.end(); 294 continue; 295 } 296 297 // TBB is used to indicate the unconditinal destination. 298 TBB = Branch.Target->getMBB(); 299 continue; 300 } 301 302 // Working from the bottom, handle the first conditional branch. 303 if (Cond.empty()) { 304 // FIXME: add X86-style branch swap 305 FBB = TBB; 306 TBB = Branch.Target->getMBB(); 307 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid)); 308 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); 309 continue; 310 } 311 312 // Handle subsequent conditional branches. 313 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch"); 314 315 // Only handle the case where all conditional branches branch to the same 316 // destination. 317 if (TBB != Branch.Target->getMBB()) 318 return true; 319 320 // If the conditions are the same, we can leave them alone. 321 unsigned OldCCValid = Cond[0].getImm(); 322 unsigned OldCCMask = Cond[1].getImm(); 323 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask) 324 continue; 325 326 // FIXME: Try combining conditions like X86 does. Should be easy on Z! 327 return false; 328 } 329 330 return false; 331 } 332 333 unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 334 // Most of the code and comments here are boilerplate. 335 MachineBasicBlock::iterator I = MBB.end(); 336 unsigned Count = 0; 337 338 while (I != MBB.begin()) { 339 --I; 340 if (I->isDebugValue()) 341 continue; 342 if (!I->isBranch()) 343 break; 344 if (!getBranchInfo(I).Target->isMBB()) 345 break; 346 // Remove the branch. 347 I->eraseFromParent(); 348 I = MBB.end(); 349 ++Count; 350 } 351 352 return Count; 353 } 354 355 bool SystemZInstrInfo:: 356 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 357 assert(Cond.size() == 2 && "Invalid condition"); 358 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm()); 359 return false; 360 } 361 362 unsigned 363 SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 364 MachineBasicBlock *FBB, 365 const SmallVectorImpl<MachineOperand> &Cond, 366 DebugLoc DL) const { 367 // In this function we output 32-bit branches, which should always 368 // have enough range. They can be shortened and relaxed by later code 369 // in the pipeline, if desired. 370 371 // Shouldn't be a fall through. 372 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 373 assert((Cond.size() == 2 || Cond.size() == 0) && 374 "SystemZ branch conditions have one component!"); 375 376 if (Cond.empty()) { 377 // Unconditional branch? 378 assert(!FBB && "Unconditional branch with multiple successors!"); 379 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); 380 return 1; 381 } 382 383 // Conditional branch. 384 unsigned Count = 0; 385 unsigned CCValid = Cond[0].getImm(); 386 unsigned CCMask = Cond[1].getImm(); 387 BuildMI(&MBB, DL, get(SystemZ::BRC)) 388 .addImm(CCValid).addImm(CCMask).addMBB(TBB); 389 ++Count; 390 391 if (FBB) { 392 // Two-way Conditional branch. Insert the second branch. 393 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); 394 ++Count; 395 } 396 return Count; 397 } 398 399 bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI, 400 unsigned &SrcReg, unsigned &SrcReg2, 401 int &Mask, int &Value) const { 402 assert(MI->isCompare() && "Caller should have checked for a comparison"); 403 404 if (MI->getNumExplicitOperands() == 2 && 405 MI->getOperand(0).isReg() && 406 MI->getOperand(1).isImm()) { 407 SrcReg = MI->getOperand(0).getReg(); 408 SrcReg2 = 0; 409 Value = MI->getOperand(1).getImm(); 410 Mask = ~0; 411 return true; 412 } 413 414 return false; 415 } 416 417 // If Reg is a virtual register, return its definition, otherwise return null. 418 static MachineInstr *getDef(unsigned Reg, 419 const MachineRegisterInfo *MRI) { 420 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 421 return nullptr; 422 return MRI->getUniqueVRegDef(Reg); 423 } 424 425 // Return true if MI is a shift of type Opcode by Imm bits. 426 static bool isShift(MachineInstr *MI, int Opcode, int64_t Imm) { 427 return (MI->getOpcode() == Opcode && 428 !MI->getOperand(2).getReg() && 429 MI->getOperand(3).getImm() == Imm); 430 } 431 432 // If the destination of MI has no uses, delete it as dead. 433 static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) { 434 if (MRI->use_nodbg_empty(MI->getOperand(0).getReg())) 435 MI->eraseFromParent(); 436 } 437 438 // Compare compares SrcReg against zero. Check whether SrcReg contains 439 // the result of an IPM sequence whose input CC survives until Compare, 440 // and whether Compare is therefore redundant. Delete it and return 441 // true if so. 442 static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg, 443 const MachineRegisterInfo *MRI, 444 const TargetRegisterInfo *TRI) { 445 MachineInstr *LGFR = nullptr; 446 MachineInstr *RLL = getDef(SrcReg, MRI); 447 if (RLL && RLL->getOpcode() == SystemZ::LGFR) { 448 LGFR = RLL; 449 RLL = getDef(LGFR->getOperand(1).getReg(), MRI); 450 } 451 if (!RLL || !isShift(RLL, SystemZ::RLL, 31)) 452 return false; 453 454 MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI); 455 if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC)) 456 return false; 457 458 MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI); 459 if (!IPM || IPM->getOpcode() != SystemZ::IPM) 460 return false; 461 462 // Check that there are no assignments to CC between the IPM and Compare, 463 if (IPM->getParent() != Compare->getParent()) 464 return false; 465 MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare; 466 for (++MBBI; MBBI != MBBE; ++MBBI) { 467 MachineInstr *MI = MBBI; 468 if (MI->modifiesRegister(SystemZ::CC, TRI)) 469 return false; 470 } 471 472 Compare->eraseFromParent(); 473 if (LGFR) 474 eraseIfDead(LGFR, MRI); 475 eraseIfDead(RLL, MRI); 476 eraseIfDead(SRL, MRI); 477 eraseIfDead(IPM, MRI); 478 479 return true; 480 } 481 482 bool 483 SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare, 484 unsigned SrcReg, unsigned SrcReg2, 485 int Mask, int Value, 486 const MachineRegisterInfo *MRI) const { 487 assert(!SrcReg2 && "Only optimizing constant comparisons so far"); 488 bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0; 489 if (Value == 0 && 490 !IsLogical && 491 removeIPMBasedCompare(Compare, SrcReg, MRI, &RI)) 492 return true; 493 return false; 494 } 495 496 // If Opcode is a move that has a conditional variant, return that variant, 497 // otherwise return 0. 498 static unsigned getConditionalMove(unsigned Opcode) { 499 switch (Opcode) { 500 case SystemZ::LR: return SystemZ::LOCR; 501 case SystemZ::LGR: return SystemZ::LOCGR; 502 default: return 0; 503 } 504 } 505 506 bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const { 507 unsigned Opcode = MI->getOpcode(); 508 if (STI.hasLoadStoreOnCond() && 509 getConditionalMove(Opcode)) 510 return true; 511 return false; 512 } 513 514 bool SystemZInstrInfo:: 515 isProfitableToIfCvt(MachineBasicBlock &MBB, 516 unsigned NumCycles, unsigned ExtraPredCycles, 517 const BranchProbability &Probability) const { 518 // For now only convert single instructions. 519 return NumCycles == 1; 520 } 521 522 bool SystemZInstrInfo:: 523 isProfitableToIfCvt(MachineBasicBlock &TMBB, 524 unsigned NumCyclesT, unsigned ExtraPredCyclesT, 525 MachineBasicBlock &FMBB, 526 unsigned NumCyclesF, unsigned ExtraPredCyclesF, 527 const BranchProbability &Probability) const { 528 // For now avoid converting mutually-exclusive cases. 529 return false; 530 } 531 532 bool SystemZInstrInfo:: 533 PredicateInstruction(MachineInstr *MI, 534 const SmallVectorImpl<MachineOperand> &Pred) const { 535 assert(Pred.size() == 2 && "Invalid condition"); 536 unsigned CCValid = Pred[0].getImm(); 537 unsigned CCMask = Pred[1].getImm(); 538 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate"); 539 unsigned Opcode = MI->getOpcode(); 540 if (STI.hasLoadStoreOnCond()) { 541 if (unsigned CondOpcode = getConditionalMove(Opcode)) { 542 MI->setDesc(get(CondOpcode)); 543 MachineInstrBuilder(*MI->getParent()->getParent(), MI) 544 .addImm(CCValid).addImm(CCMask) 545 .addReg(SystemZ::CC, RegState::Implicit); 546 return true; 547 } 548 } 549 return false; 550 } 551 552 void 553 SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 554 MachineBasicBlock::iterator MBBI, DebugLoc DL, 555 unsigned DestReg, unsigned SrcReg, 556 bool KillSrc) const { 557 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. 558 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { 559 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64), 560 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc); 561 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64), 562 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc); 563 return; 564 } 565 566 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) { 567 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc); 568 return; 569 } 570 571 // Everything else needs only one instruction. 572 unsigned Opcode; 573 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) 574 Opcode = SystemZ::LGR; 575 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) 576 Opcode = SystemZ::LER; 577 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) 578 Opcode = SystemZ::LDR; 579 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) 580 Opcode = SystemZ::LXR; 581 else 582 llvm_unreachable("Impossible reg-to-reg copy"); 583 584 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 585 .addReg(SrcReg, getKillRegState(KillSrc)); 586 } 587 588 void 589 SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 590 MachineBasicBlock::iterator MBBI, 591 unsigned SrcReg, bool isKill, 592 int FrameIdx, 593 const TargetRegisterClass *RC, 594 const TargetRegisterInfo *TRI) const { 595 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 596 597 // Callers may expect a single instruction, so keep 128-bit moves 598 // together for now and lower them after register allocation. 599 unsigned LoadOpcode, StoreOpcode; 600 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 601 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) 602 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx); 603 } 604 605 void 606 SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 607 MachineBasicBlock::iterator MBBI, 608 unsigned DestReg, int FrameIdx, 609 const TargetRegisterClass *RC, 610 const TargetRegisterInfo *TRI) const { 611 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 612 613 // Callers may expect a single instruction, so keep 128-bit moves 614 // together for now and lower them after register allocation. 615 unsigned LoadOpcode, StoreOpcode; 616 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 617 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), 618 FrameIdx); 619 } 620 621 // Return true if MI is a simple load or store with a 12-bit displacement 622 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 623 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { 624 const MCInstrDesc &MCID = MI->getDesc(); 625 return ((MCID.TSFlags & Flag) && 626 isUInt<12>(MI->getOperand(2).getImm()) && 627 MI->getOperand(3).getReg() == 0); 628 } 629 630 namespace { 631 struct LogicOp { 632 LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {} 633 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize) 634 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {} 635 636 operator bool() const { return RegSize; } 637 638 unsigned RegSize, ImmLSB, ImmSize; 639 }; 640 } // end anonymous namespace 641 642 static LogicOp interpretAndImmediate(unsigned Opcode) { 643 switch (Opcode) { 644 case SystemZ::NILMux: return LogicOp(32, 0, 16); 645 case SystemZ::NIHMux: return LogicOp(32, 16, 16); 646 case SystemZ::NILL64: return LogicOp(64, 0, 16); 647 case SystemZ::NILH64: return LogicOp(64, 16, 16); 648 case SystemZ::NIHL64: return LogicOp(64, 32, 16); 649 case SystemZ::NIHH64: return LogicOp(64, 48, 16); 650 case SystemZ::NIFMux: return LogicOp(32, 0, 32); 651 case SystemZ::NILF64: return LogicOp(64, 0, 32); 652 case SystemZ::NIHF64: return LogicOp(64, 32, 32); 653 default: return LogicOp(); 654 } 655 } 656 657 // Used to return from convertToThreeAddress after replacing two-address 658 // instruction OldMI with three-address instruction NewMI. 659 static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI, 660 MachineInstr *NewMI, 661 LiveVariables *LV) { 662 if (LV) { 663 unsigned NumOps = OldMI->getNumOperands(); 664 for (unsigned I = 1; I < NumOps; ++I) { 665 MachineOperand &Op = OldMI->getOperand(I); 666 if (Op.isReg() && Op.isKill()) 667 LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI); 668 } 669 } 670 return NewMI; 671 } 672 673 MachineInstr * 674 SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 675 MachineBasicBlock::iterator &MBBI, 676 LiveVariables *LV) const { 677 MachineInstr *MI = MBBI; 678 MachineBasicBlock *MBB = MI->getParent(); 679 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 680 681 unsigned Opcode = MI->getOpcode(); 682 unsigned NumOps = MI->getNumOperands(); 683 684 // Try to convert something like SLL into SLLK, if supported. 685 // We prefer to keep the two-operand form where possible both 686 // because it tends to be shorter and because some instructions 687 // have memory forms that can be used during spilling. 688 if (STI.hasDistinctOps()) { 689 MachineOperand &Dest = MI->getOperand(0); 690 MachineOperand &Src = MI->getOperand(1); 691 unsigned DestReg = Dest.getReg(); 692 unsigned SrcReg = Src.getReg(); 693 // AHIMux is only really a three-operand instruction when both operands 694 // are low registers. Try to constrain both operands to be low if 695 // possible. 696 if (Opcode == SystemZ::AHIMux && 697 TargetRegisterInfo::isVirtualRegister(DestReg) && 698 TargetRegisterInfo::isVirtualRegister(SrcReg) && 699 MRI.getRegClass(DestReg)->contains(SystemZ::R1L) && 700 MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) { 701 MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass); 702 MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass); 703 } 704 int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode); 705 if (ThreeOperandOpcode >= 0) { 706 MachineInstrBuilder MIB = 707 BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode)) 708 .addOperand(Dest); 709 // Keep the kill state, but drop the tied flag. 710 MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()); 711 // Keep the remaining operands as-is. 712 for (unsigned I = 2; I < NumOps; ++I) 713 MIB.addOperand(MI->getOperand(I)); 714 return finishConvertToThreeAddress(MI, MIB, LV); 715 } 716 } 717 718 // Try to convert an AND into an RISBG-type instruction. 719 if (LogicOp And = interpretAndImmediate(Opcode)) { 720 uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB; 721 // AND IMMEDIATE leaves the other bits of the register unchanged. 722 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB); 723 unsigned Start, End; 724 if (isRxSBGMask(Imm, And.RegSize, Start, End)) { 725 unsigned NewOpcode; 726 if (And.RegSize == 64) 727 NewOpcode = SystemZ::RISBG; 728 else { 729 NewOpcode = SystemZ::RISBMux; 730 Start &= 31; 731 End &= 31; 732 } 733 MachineOperand &Dest = MI->getOperand(0); 734 MachineOperand &Src = MI->getOperand(1); 735 MachineInstrBuilder MIB = 736 BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode)) 737 .addOperand(Dest).addReg(0) 738 .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()) 739 .addImm(Start).addImm(End + 128).addImm(0); 740 return finishConvertToThreeAddress(MI, MIB, LV); 741 } 742 } 743 return nullptr; 744 } 745 746 MachineInstr * 747 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 748 MachineInstr *MI, 749 const SmallVectorImpl<unsigned> &Ops, 750 int FrameIndex) const { 751 const MachineFrameInfo *MFI = MF.getFrameInfo(); 752 unsigned Size = MFI->getObjectSize(FrameIndex); 753 unsigned Opcode = MI->getOpcode(); 754 755 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 756 if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && 757 isInt<8>(MI->getOperand(2).getImm()) && 758 !MI->getOperand(3).getReg()) { 759 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST 760 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI)) 761 .addFrameIndex(FrameIndex).addImm(0) 762 .addImm(MI->getOperand(2).getImm()); 763 } 764 return nullptr; 765 } 766 767 // All other cases require a single operand. 768 if (Ops.size() != 1) 769 return nullptr; 770 771 unsigned OpNum = Ops[0]; 772 assert(Size == MF.getRegInfo() 773 .getRegClass(MI->getOperand(OpNum).getReg())->getSize() && 774 "Invalid size combination"); 775 776 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && 777 OpNum == 0 && 778 isInt<8>(MI->getOperand(2).getImm())) { 779 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST 780 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI); 781 return BuildMI(MF, MI->getDebugLoc(), get(Opcode)) 782 .addFrameIndex(FrameIndex).addImm(0) 783 .addImm(MI->getOperand(2).getImm()); 784 } 785 786 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { 787 bool Op0IsGPR = (Opcode == SystemZ::LGDR); 788 bool Op1IsGPR = (Opcode == SystemZ::LDGR); 789 // If we're spilling the destination of an LDGR or LGDR, store the 790 // source register instead. 791 if (OpNum == 0) { 792 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; 793 return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) 794 .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) 795 .addImm(0).addReg(0); 796 } 797 // If we're spilling the source of an LDGR or LGDR, load the 798 // destination register instead. 799 if (OpNum == 1) { 800 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; 801 unsigned Dest = MI->getOperand(0).getReg(); 802 return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) 803 .addFrameIndex(FrameIndex).addImm(0).addReg(0); 804 } 805 } 806 807 // Look for cases where the source of a simple store or the destination 808 // of a simple load is being spilled. Try to use MVC instead. 809 // 810 // Although MVC is in practice a fast choice in these cases, it is still 811 // logically a bytewise copy. This means that we cannot use it if the 812 // load or store is volatile. We also wouldn't be able to use MVC if 813 // the two memories partially overlap, but that case cannot occur here, 814 // because we know that one of the memories is a full frame index. 815 // 816 // For performance reasons, we also want to avoid using MVC if the addresses 817 // might be equal. We don't worry about that case here, because spill slot 818 // coloring happens later, and because we have special code to remove 819 // MVCs that turn out to be redundant. 820 if (OpNum == 0 && MI->hasOneMemOperand()) { 821 MachineMemOperand *MMO = *MI->memoperands_begin(); 822 if (MMO->getSize() == Size && !MMO->isVolatile()) { 823 // Handle conversion of loads. 824 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { 825 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 826 .addFrameIndex(FrameIndex).addImm(0).addImm(Size) 827 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 828 .addMemOperand(MMO); 829 } 830 // Handle conversion of stores. 831 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { 832 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 833 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 834 .addImm(Size).addFrameIndex(FrameIndex).addImm(0) 835 .addMemOperand(MMO); 836 } 837 } 838 } 839 840 // If the spilled operand is the final one, try to change <INSN>R 841 // into <INSN>. 842 int MemOpcode = SystemZ::getMemOpcode(Opcode); 843 if (MemOpcode >= 0) { 844 unsigned NumOps = MI->getNumExplicitOperands(); 845 if (OpNum == NumOps - 1) { 846 const MCInstrDesc &MemDesc = get(MemOpcode); 847 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); 848 assert(AccessBytes != 0 && "Size of access should be known"); 849 assert(AccessBytes <= Size && "Access outside the frame index"); 850 uint64_t Offset = Size - AccessBytes; 851 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); 852 for (unsigned I = 0; I < OpNum; ++I) 853 MIB.addOperand(MI->getOperand(I)); 854 MIB.addFrameIndex(FrameIndex).addImm(Offset); 855 if (MemDesc.TSFlags & SystemZII::HasIndex) 856 MIB.addReg(0); 857 return MIB; 858 } 859 } 860 861 return nullptr; 862 } 863 864 MachineInstr * 865 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, 866 const SmallVectorImpl<unsigned> &Ops, 867 MachineInstr* LoadMI) const { 868 return nullptr; 869 } 870 871 bool 872 SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 873 switch (MI->getOpcode()) { 874 case SystemZ::L128: 875 splitMove(MI, SystemZ::LG); 876 return true; 877 878 case SystemZ::ST128: 879 splitMove(MI, SystemZ::STG); 880 return true; 881 882 case SystemZ::LX: 883 splitMove(MI, SystemZ::LD); 884 return true; 885 886 case SystemZ::STX: 887 splitMove(MI, SystemZ::STD); 888 return true; 889 890 case SystemZ::LBMux: 891 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH); 892 return true; 893 894 case SystemZ::LHMux: 895 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH); 896 return true; 897 898 case SystemZ::LLCRMux: 899 expandZExtPseudo(MI, SystemZ::LLCR, 8); 900 return true; 901 902 case SystemZ::LLHRMux: 903 expandZExtPseudo(MI, SystemZ::LLHR, 16); 904 return true; 905 906 case SystemZ::LLCMux: 907 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH); 908 return true; 909 910 case SystemZ::LLHMux: 911 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH); 912 return true; 913 914 case SystemZ::LMux: 915 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH); 916 return true; 917 918 case SystemZ::STCMux: 919 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH); 920 return true; 921 922 case SystemZ::STHMux: 923 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH); 924 return true; 925 926 case SystemZ::STMux: 927 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH); 928 return true; 929 930 case SystemZ::LHIMux: 931 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true); 932 return true; 933 934 case SystemZ::IIFMux: 935 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false); 936 return true; 937 938 case SystemZ::IILMux: 939 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false); 940 return true; 941 942 case SystemZ::IIHMux: 943 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false); 944 return true; 945 946 case SystemZ::NIFMux: 947 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false); 948 return true; 949 950 case SystemZ::NILMux: 951 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false); 952 return true; 953 954 case SystemZ::NIHMux: 955 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false); 956 return true; 957 958 case SystemZ::OIFMux: 959 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false); 960 return true; 961 962 case SystemZ::OILMux: 963 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false); 964 return true; 965 966 case SystemZ::OIHMux: 967 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false); 968 return true; 969 970 case SystemZ::XIFMux: 971 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false); 972 return true; 973 974 case SystemZ::TMLMux: 975 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false); 976 return true; 977 978 case SystemZ::TMHMux: 979 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false); 980 return true; 981 982 case SystemZ::AHIMux: 983 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false); 984 return true; 985 986 case SystemZ::AHIMuxK: 987 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH); 988 return true; 989 990 case SystemZ::AFIMux: 991 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false); 992 return true; 993 994 case SystemZ::CFIMux: 995 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false); 996 return true; 997 998 case SystemZ::CLFIMux: 999 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false); 1000 return true; 1001 1002 case SystemZ::CMux: 1003 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF); 1004 return true; 1005 1006 case SystemZ::CLMux: 1007 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF); 1008 return true; 1009 1010 case SystemZ::RISBMux: { 1011 bool DestIsHigh = isHighReg(MI->getOperand(0).getReg()); 1012 bool SrcIsHigh = isHighReg(MI->getOperand(2).getReg()); 1013 if (SrcIsHigh == DestIsHigh) 1014 MI->setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL)); 1015 else { 1016 MI->setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH)); 1017 MI->getOperand(5).setImm(MI->getOperand(5).getImm() ^ 32); 1018 } 1019 return true; 1020 } 1021 1022 case SystemZ::ADJDYNALLOC: 1023 splitAdjDynAlloc(MI); 1024 return true; 1025 1026 default: 1027 return false; 1028 } 1029 } 1030 1031 uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { 1032 if (MI->getOpcode() == TargetOpcode::INLINEASM) { 1033 const MachineFunction *MF = MI->getParent()->getParent(); 1034 const char *AsmStr = MI->getOperand(0).getSymbolName(); 1035 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 1036 } 1037 return MI->getDesc().getSize(); 1038 } 1039 1040 SystemZII::Branch 1041 SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const { 1042 switch (MI->getOpcode()) { 1043 case SystemZ::BR: 1044 case SystemZ::J: 1045 case SystemZ::JG: 1046 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, 1047 SystemZ::CCMASK_ANY, &MI->getOperand(0)); 1048 1049 case SystemZ::BRC: 1050 case SystemZ::BRCL: 1051 return SystemZII::Branch(SystemZII::BranchNormal, 1052 MI->getOperand(0).getImm(), 1053 MI->getOperand(1).getImm(), &MI->getOperand(2)); 1054 1055 case SystemZ::BRCT: 1056 return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP, 1057 SystemZ::CCMASK_CMP_NE, &MI->getOperand(2)); 1058 1059 case SystemZ::BRCTG: 1060 return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP, 1061 SystemZ::CCMASK_CMP_NE, &MI->getOperand(2)); 1062 1063 case SystemZ::CIJ: 1064 case SystemZ::CRJ: 1065 return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP, 1066 MI->getOperand(2).getImm(), &MI->getOperand(3)); 1067 1068 case SystemZ::CLIJ: 1069 case SystemZ::CLRJ: 1070 return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP, 1071 MI->getOperand(2).getImm(), &MI->getOperand(3)); 1072 1073 case SystemZ::CGIJ: 1074 case SystemZ::CGRJ: 1075 return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP, 1076 MI->getOperand(2).getImm(), &MI->getOperand(3)); 1077 1078 case SystemZ::CLGIJ: 1079 case SystemZ::CLGRJ: 1080 return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP, 1081 MI->getOperand(2).getImm(), &MI->getOperand(3)); 1082 1083 default: 1084 llvm_unreachable("Unrecognized branch opcode"); 1085 } 1086 } 1087 1088 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, 1089 unsigned &LoadOpcode, 1090 unsigned &StoreOpcode) const { 1091 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { 1092 LoadOpcode = SystemZ::L; 1093 StoreOpcode = SystemZ::ST; 1094 } else if (RC == &SystemZ::GRH32BitRegClass) { 1095 LoadOpcode = SystemZ::LFH; 1096 StoreOpcode = SystemZ::STFH; 1097 } else if (RC == &SystemZ::GRX32BitRegClass) { 1098 LoadOpcode = SystemZ::LMux; 1099 StoreOpcode = SystemZ::STMux; 1100 } else if (RC == &SystemZ::GR64BitRegClass || 1101 RC == &SystemZ::ADDR64BitRegClass) { 1102 LoadOpcode = SystemZ::LG; 1103 StoreOpcode = SystemZ::STG; 1104 } else if (RC == &SystemZ::GR128BitRegClass || 1105 RC == &SystemZ::ADDR128BitRegClass) { 1106 LoadOpcode = SystemZ::L128; 1107 StoreOpcode = SystemZ::ST128; 1108 } else if (RC == &SystemZ::FP32BitRegClass) { 1109 LoadOpcode = SystemZ::LE; 1110 StoreOpcode = SystemZ::STE; 1111 } else if (RC == &SystemZ::FP64BitRegClass) { 1112 LoadOpcode = SystemZ::LD; 1113 StoreOpcode = SystemZ::STD; 1114 } else if (RC == &SystemZ::FP128BitRegClass) { 1115 LoadOpcode = SystemZ::LX; 1116 StoreOpcode = SystemZ::STX; 1117 } else 1118 llvm_unreachable("Unsupported regclass to load or store"); 1119 } 1120 1121 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, 1122 int64_t Offset) const { 1123 const MCInstrDesc &MCID = get(Opcode); 1124 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); 1125 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { 1126 // Get the instruction to use for unsigned 12-bit displacements. 1127 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); 1128 if (Disp12Opcode >= 0) 1129 return Disp12Opcode; 1130 1131 // All address-related instructions can use unsigned 12-bit 1132 // displacements. 1133 return Opcode; 1134 } 1135 if (isInt<20>(Offset) && isInt<20>(Offset2)) { 1136 // Get the instruction to use for signed 20-bit displacements. 1137 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); 1138 if (Disp20Opcode >= 0) 1139 return Disp20Opcode; 1140 1141 // Check whether Opcode allows signed 20-bit displacements. 1142 if (MCID.TSFlags & SystemZII::Has20BitOffset) 1143 return Opcode; 1144 } 1145 return 0; 1146 } 1147 1148 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const { 1149 switch (Opcode) { 1150 case SystemZ::L: return SystemZ::LT; 1151 case SystemZ::LY: return SystemZ::LT; 1152 case SystemZ::LG: return SystemZ::LTG; 1153 case SystemZ::LGF: return SystemZ::LTGF; 1154 case SystemZ::LR: return SystemZ::LTR; 1155 case SystemZ::LGFR: return SystemZ::LTGFR; 1156 case SystemZ::LGR: return SystemZ::LTGR; 1157 case SystemZ::LER: return SystemZ::LTEBR; 1158 case SystemZ::LDR: return SystemZ::LTDBR; 1159 case SystemZ::LXR: return SystemZ::LTXBR; 1160 default: return 0; 1161 } 1162 } 1163 1164 // Return true if Mask matches the regexp 0*1+0*, given that zero masks 1165 // have already been filtered out. Store the first set bit in LSB and 1166 // the number of set bits in Length if so. 1167 static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) { 1168 unsigned First = findFirstSet(Mask); 1169 uint64_t Top = (Mask >> First) + 1; 1170 if ((Top & -Top) == Top) { 1171 LSB = First; 1172 Length = findFirstSet(Top); 1173 return true; 1174 } 1175 return false; 1176 } 1177 1178 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize, 1179 unsigned &Start, unsigned &End) const { 1180 // Reject trivial all-zero masks. 1181 if (Mask == 0) 1182 return false; 1183 1184 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of 1185 // the msb and End specifies the index of the lsb. 1186 unsigned LSB, Length; 1187 if (isStringOfOnes(Mask, LSB, Length)) { 1188 Start = 63 - (LSB + Length - 1); 1189 End = 63 - LSB; 1190 return true; 1191 } 1192 1193 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb 1194 // of the low 1s and End specifies the lsb of the high 1s. 1195 if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) { 1196 assert(LSB > 0 && "Bottom bit must be set"); 1197 assert(LSB + Length < BitSize && "Top bit must be set"); 1198 Start = 63 - (LSB - 1); 1199 End = 63 - (LSB + Length); 1200 return true; 1201 } 1202 1203 return false; 1204 } 1205 1206 unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode, 1207 const MachineInstr *MI) const { 1208 switch (Opcode) { 1209 case SystemZ::CR: 1210 return SystemZ::CRJ; 1211 case SystemZ::CGR: 1212 return SystemZ::CGRJ; 1213 case SystemZ::CHI: 1214 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0; 1215 case SystemZ::CGHI: 1216 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0; 1217 case SystemZ::CLR: 1218 return SystemZ::CLRJ; 1219 case SystemZ::CLGR: 1220 return SystemZ::CLGRJ; 1221 case SystemZ::CLFI: 1222 return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0; 1223 case SystemZ::CLGFI: 1224 return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0; 1225 default: 1226 return 0; 1227 } 1228 } 1229 1230 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, 1231 MachineBasicBlock::iterator MBBI, 1232 unsigned Reg, uint64_t Value) const { 1233 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 1234 unsigned Opcode; 1235 if (isInt<16>(Value)) 1236 Opcode = SystemZ::LGHI; 1237 else if (SystemZ::isImmLL(Value)) 1238 Opcode = SystemZ::LLILL; 1239 else if (SystemZ::isImmLH(Value)) { 1240 Opcode = SystemZ::LLILH; 1241 Value >>= 16; 1242 } else { 1243 assert(isInt<32>(Value) && "Huge values not handled yet"); 1244 Opcode = SystemZ::LGFI; 1245 } 1246 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); 1247 } 1248