1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the Base ARM implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "ARMBaseInstrInfo.h" 15 #include "ARM.h" 16 #include "ARMBaseRegisterInfo.h" 17 #include "ARMConstantPoolValue.h" 18 #include "ARMHazardRecognizer.h" 19 #include "ARMMachineFunctionInfo.h" 20 #include "MCTargetDesc/ARMAddressingModes.h" 21 #include "llvm/Constants.h" 22 #include "llvm/Function.h" 23 #include "llvm/GlobalValue.h" 24 #include "llvm/CodeGen/LiveVariables.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineJumpTableInfo.h" 29 #include "llvm/CodeGen/MachineMemOperand.h" 30 #include "llvm/CodeGen/MachineRegisterInfo.h" 31 #include "llvm/CodeGen/SelectionDAGNodes.h" 32 #include "llvm/MC/MCAsmInfo.h" 33 #include "llvm/Support/BranchProbability.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/ADT/STLExtras.h" 38 39 #define GET_INSTRINFO_CTOR 40 #include "ARMGenInstrInfo.inc" 41 42 using namespace llvm; 43 44 static cl::opt<bool> 45 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, 46 cl::desc("Enable ARM 2-addr to 3-addr conv")); 47 48 static cl::opt<bool> 49 WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true), 50 cl::desc("Widen ARM vmovs to vmovd when possible")); 51 52 /// ARM_MLxEntry - Record information about MLA / MLS instructions. 53 struct ARM_MLxEntry { 54 uint16_t MLxOpc; // MLA / MLS opcode 55 uint16_t MulOpc; // Expanded multiplication opcode 56 uint16_t AddSubOpc; // Expanded add / sub opcode 57 bool NegAcc; // True if the acc is negated before the add / sub. 58 bool HasLane; // True if instruction has an extra "lane" operand. 59 }; 60 61 static const ARM_MLxEntry ARM_MLxTable[] = { 62 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane 63 // fp scalar ops 64 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, 65 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, 66 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, 67 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, 68 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, 69 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, 70 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, 71 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, 72 73 // fp SIMD ops 74 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, 75 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, 76 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, 77 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, 78 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, 79 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, 80 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, 81 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, 82 }; 83 84 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) 85 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), 86 Subtarget(STI) { 87 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { 88 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) 89 assert(false && "Duplicated entries?"); 90 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); 91 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); 92 } 93 } 94 95 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl 96 // currently defaults to no prepass hazard recognizer. 97 ScheduleHazardRecognizer *ARMBaseInstrInfo:: 98 CreateTargetHazardRecognizer(const TargetMachine *TM, 99 const ScheduleDAG *DAG) const { 100 if (usePreRAHazardRecognizer()) { 101 const InstrItineraryData *II = TM->getInstrItineraryData(); 102 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); 103 } 104 return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG); 105 } 106 107 ScheduleHazardRecognizer *ARMBaseInstrInfo:: 108 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 109 const ScheduleDAG *DAG) const { 110 if (Subtarget.isThumb2() || Subtarget.hasVFP2()) 111 return (ScheduleHazardRecognizer *) 112 new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG); 113 return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG); 114 } 115 116 MachineInstr * 117 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 118 MachineBasicBlock::iterator &MBBI, 119 LiveVariables *LV) const { 120 // FIXME: Thumb2 support. 121 122 if (!EnableARM3Addr) 123 return NULL; 124 125 MachineInstr *MI = MBBI; 126 MachineFunction &MF = *MI->getParent()->getParent(); 127 uint64_t TSFlags = MI->getDesc().TSFlags; 128 bool isPre = false; 129 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) { 130 default: return NULL; 131 case ARMII::IndexModePre: 132 isPre = true; 133 break; 134 case ARMII::IndexModePost: 135 break; 136 } 137 138 // Try splitting an indexed load/store to an un-indexed one plus an add/sub 139 // operation. 140 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode()); 141 if (MemOpc == 0) 142 return NULL; 143 144 MachineInstr *UpdateMI = NULL; 145 MachineInstr *MemMI = NULL; 146 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); 147 const MCInstrDesc &MCID = MI->getDesc(); 148 unsigned NumOps = MCID.getNumOperands(); 149 bool isLoad = !MI->mayStore(); 150 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); 151 const MachineOperand &Base = MI->getOperand(2); 152 const MachineOperand &Offset = MI->getOperand(NumOps-3); 153 unsigned WBReg = WB.getReg(); 154 unsigned BaseReg = Base.getReg(); 155 unsigned OffReg = Offset.getReg(); 156 unsigned OffImm = MI->getOperand(NumOps-2).getImm(); 157 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm(); 158 switch (AddrMode) { 159 default: llvm_unreachable("Unknown indexed op!"); 160 case ARMII::AddrMode2: { 161 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub; 162 unsigned Amt = ARM_AM::getAM2Offset(OffImm); 163 if (OffReg == 0) { 164 if (ARM_AM::getSOImmVal(Amt) == -1) 165 // Can't encode it in a so_imm operand. This transformation will 166 // add more than 1 instruction. Abandon! 167 return NULL; 168 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 169 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 170 .addReg(BaseReg).addImm(Amt) 171 .addImm(Pred).addReg(0).addReg(0); 172 } else if (Amt != 0) { 173 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); 174 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); 175 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 176 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) 177 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc) 178 .addImm(Pred).addReg(0).addReg(0); 179 } else 180 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 181 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 182 .addReg(BaseReg).addReg(OffReg) 183 .addImm(Pred).addReg(0).addReg(0); 184 break; 185 } 186 case ARMII::AddrMode3 : { 187 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub; 188 unsigned Amt = ARM_AM::getAM3Offset(OffImm); 189 if (OffReg == 0) 190 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand. 191 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 192 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg) 193 .addReg(BaseReg).addImm(Amt) 194 .addImm(Pred).addReg(0).addReg(0); 195 else 196 UpdateMI = BuildMI(MF, MI->getDebugLoc(), 197 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg) 198 .addReg(BaseReg).addReg(OffReg) 199 .addImm(Pred).addReg(0).addReg(0); 200 break; 201 } 202 } 203 204 std::vector<MachineInstr*> NewMIs; 205 if (isPre) { 206 if (isLoad) 207 MemMI = BuildMI(MF, MI->getDebugLoc(), 208 get(MemOpc), MI->getOperand(0).getReg()) 209 .addReg(WBReg).addImm(0).addImm(Pred); 210 else 211 MemMI = BuildMI(MF, MI->getDebugLoc(), 212 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 213 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred); 214 NewMIs.push_back(MemMI); 215 NewMIs.push_back(UpdateMI); 216 } else { 217 if (isLoad) 218 MemMI = BuildMI(MF, MI->getDebugLoc(), 219 get(MemOpc), MI->getOperand(0).getReg()) 220 .addReg(BaseReg).addImm(0).addImm(Pred); 221 else 222 MemMI = BuildMI(MF, MI->getDebugLoc(), 223 get(MemOpc)).addReg(MI->getOperand(1).getReg()) 224 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred); 225 if (WB.isDead()) 226 UpdateMI->getOperand(0).setIsDead(); 227 NewMIs.push_back(UpdateMI); 228 NewMIs.push_back(MemMI); 229 } 230 231 // Transfer LiveVariables states, kill / dead info. 232 if (LV) { 233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 234 MachineOperand &MO = MI->getOperand(i); 235 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 236 unsigned Reg = MO.getReg(); 237 238 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); 239 if (MO.isDef()) { 240 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI; 241 if (MO.isDead()) 242 LV->addVirtualRegisterDead(Reg, NewMI); 243 } 244 if (MO.isUse() && MO.isKill()) { 245 for (unsigned j = 0; j < 2; ++j) { 246 // Look at the two new MI's in reverse order. 247 MachineInstr *NewMI = NewMIs[j]; 248 if (!NewMI->readsRegister(Reg)) 249 continue; 250 LV->addVirtualRegisterKilled(Reg, NewMI); 251 if (VI.removeKill(MI)) 252 VI.Kills.push_back(NewMI); 253 break; 254 } 255 } 256 } 257 } 258 } 259 260 MFI->insert(MBBI, NewMIs[1]); 261 MFI->insert(MBBI, NewMIs[0]); 262 return NewMIs[0]; 263 } 264 265 // Branch analysis. 266 bool 267 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 268 MachineBasicBlock *&FBB, 269 SmallVectorImpl<MachineOperand> &Cond, 270 bool AllowModify) const { 271 // If the block has no terminators, it just falls into the block after it. 272 MachineBasicBlock::iterator I = MBB.end(); 273 if (I == MBB.begin()) 274 return false; 275 --I; 276 while (I->isDebugValue()) { 277 if (I == MBB.begin()) 278 return false; 279 --I; 280 } 281 if (!isUnpredicatedTerminator(I)) 282 return false; 283 284 // Get the last instruction in the block. 285 MachineInstr *LastInst = I; 286 287 // If there is only one terminator instruction, process it. 288 unsigned LastOpc = LastInst->getOpcode(); 289 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 290 if (isUncondBranchOpcode(LastOpc)) { 291 TBB = LastInst->getOperand(0).getMBB(); 292 return false; 293 } 294 if (isCondBranchOpcode(LastOpc)) { 295 // Block ends with fall-through condbranch. 296 TBB = LastInst->getOperand(0).getMBB(); 297 Cond.push_back(LastInst->getOperand(1)); 298 Cond.push_back(LastInst->getOperand(2)); 299 return false; 300 } 301 return true; // Can't handle indirect branch. 302 } 303 304 // Get the instruction before it if it is a terminator. 305 MachineInstr *SecondLastInst = I; 306 unsigned SecondLastOpc = SecondLastInst->getOpcode(); 307 308 // If AllowModify is true and the block ends with two or more unconditional 309 // branches, delete all but the first unconditional branch. 310 if (AllowModify && isUncondBranchOpcode(LastOpc)) { 311 while (isUncondBranchOpcode(SecondLastOpc)) { 312 LastInst->eraseFromParent(); 313 LastInst = SecondLastInst; 314 LastOpc = LastInst->getOpcode(); 315 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 316 // Return now the only terminator is an unconditional branch. 317 TBB = LastInst->getOperand(0).getMBB(); 318 return false; 319 } else { 320 SecondLastInst = I; 321 SecondLastOpc = SecondLastInst->getOpcode(); 322 } 323 } 324 } 325 326 // If there are three terminators, we don't know what sort of block this is. 327 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I)) 328 return true; 329 330 // If the block ends with a B and a Bcc, handle it. 331 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 332 TBB = SecondLastInst->getOperand(0).getMBB(); 333 Cond.push_back(SecondLastInst->getOperand(1)); 334 Cond.push_back(SecondLastInst->getOperand(2)); 335 FBB = LastInst->getOperand(0).getMBB(); 336 return false; 337 } 338 339 // If the block ends with two unconditional branches, handle it. The second 340 // one is not executed, so remove it. 341 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { 342 TBB = SecondLastInst->getOperand(0).getMBB(); 343 I = LastInst; 344 if (AllowModify) 345 I->eraseFromParent(); 346 return false; 347 } 348 349 // ...likewise if it ends with a branch table followed by an unconditional 350 // branch. The branch folder can create these, and we must get rid of them for 351 // correctness of Thumb constant islands. 352 if ((isJumpTableBranchOpcode(SecondLastOpc) || 353 isIndirectBranchOpcode(SecondLastOpc)) && 354 isUncondBranchOpcode(LastOpc)) { 355 I = LastInst; 356 if (AllowModify) 357 I->eraseFromParent(); 358 return true; 359 } 360 361 // Otherwise, can't handle this. 362 return true; 363 } 364 365 366 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 367 MachineBasicBlock::iterator I = MBB.end(); 368 if (I == MBB.begin()) return 0; 369 --I; 370 while (I->isDebugValue()) { 371 if (I == MBB.begin()) 372 return 0; 373 --I; 374 } 375 if (!isUncondBranchOpcode(I->getOpcode()) && 376 !isCondBranchOpcode(I->getOpcode())) 377 return 0; 378 379 // Remove the branch. 380 I->eraseFromParent(); 381 382 I = MBB.end(); 383 384 if (I == MBB.begin()) return 1; 385 --I; 386 if (!isCondBranchOpcode(I->getOpcode())) 387 return 1; 388 389 // Remove the branch. 390 I->eraseFromParent(); 391 return 2; 392 } 393 394 unsigned 395 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 396 MachineBasicBlock *FBB, 397 const SmallVectorImpl<MachineOperand> &Cond, 398 DebugLoc DL) const { 399 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>(); 400 int BOpc = !AFI->isThumbFunction() 401 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); 402 int BccOpc = !AFI->isThumbFunction() 403 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); 404 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); 405 406 // Shouldn't be a fall through. 407 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 408 assert((Cond.size() == 2 || Cond.size() == 0) && 409 "ARM branch conditions have two components!"); 410 411 if (FBB == 0) { 412 if (Cond.empty()) { // Unconditional branch? 413 if (isThumb) 414 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0); 415 else 416 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); 417 } else 418 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 419 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 420 return 1; 421 } 422 423 // Two-way conditional branch. 424 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) 425 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); 426 if (isThumb) 427 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); 428 else 429 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); 430 return 2; 431 } 432 433 bool ARMBaseInstrInfo:: 434 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 435 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm(); 436 Cond[0].setImm(ARMCC::getOppositeCondition(CC)); 437 return false; 438 } 439 440 bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const { 441 if (MI->isBundle()) { 442 MachineBasicBlock::const_instr_iterator I = MI; 443 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 444 while (++I != E && I->isInsideBundle()) { 445 int PIdx = I->findFirstPredOperandIdx(); 446 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL) 447 return true; 448 } 449 return false; 450 } 451 452 int PIdx = MI->findFirstPredOperandIdx(); 453 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL; 454 } 455 456 bool ARMBaseInstrInfo:: 457 PredicateInstruction(MachineInstr *MI, 458 const SmallVectorImpl<MachineOperand> &Pred) const { 459 unsigned Opc = MI->getOpcode(); 460 if (isUncondBranchOpcode(Opc)) { 461 MI->setDesc(get(getMatchingCondBranchOpcode(Opc))); 462 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm())); 463 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false)); 464 return true; 465 } 466 467 int PIdx = MI->findFirstPredOperandIdx(); 468 if (PIdx != -1) { 469 MachineOperand &PMO = MI->getOperand(PIdx); 470 PMO.setImm(Pred[0].getImm()); 471 MI->getOperand(PIdx+1).setReg(Pred[1].getReg()); 472 return true; 473 } 474 return false; 475 } 476 477 bool ARMBaseInstrInfo:: 478 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 479 const SmallVectorImpl<MachineOperand> &Pred2) const { 480 if (Pred1.size() > 2 || Pred2.size() > 2) 481 return false; 482 483 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm(); 484 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm(); 485 if (CC1 == CC2) 486 return true; 487 488 switch (CC1) { 489 default: 490 return false; 491 case ARMCC::AL: 492 return true; 493 case ARMCC::HS: 494 return CC2 == ARMCC::HI; 495 case ARMCC::LS: 496 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ; 497 case ARMCC::GE: 498 return CC2 == ARMCC::GT; 499 case ARMCC::LE: 500 return CC2 == ARMCC::LT; 501 } 502 } 503 504 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, 505 std::vector<MachineOperand> &Pred) const { 506 bool Found = false; 507 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 508 const MachineOperand &MO = MI->getOperand(i); 509 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) || 510 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) { 511 Pred.push_back(MO); 512 Found = true; 513 } 514 } 515 516 return Found; 517 } 518 519 /// isPredicable - Return true if the specified instruction can be predicated. 520 /// By default, this returns true for every instruction with a 521 /// PredicateOperand. 522 bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { 523 if (!MI->isPredicable()) 524 return false; 525 526 if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { 527 ARMFunctionInfo *AFI = 528 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>(); 529 return AFI->isThumb2Function(); 530 } 531 return true; 532 } 533 534 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing. 535 LLVM_ATTRIBUTE_NOINLINE 536 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 537 unsigned JTI); 538 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT, 539 unsigned JTI) { 540 assert(JTI < JT.size()); 541 return JT[JTI].MBBs.size(); 542 } 543 544 /// GetInstSize - Return the size of the specified MachineInstr. 545 /// 546 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 547 const MachineBasicBlock &MBB = *MI->getParent(); 548 const MachineFunction *MF = MBB.getParent(); 549 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); 550 551 const MCInstrDesc &MCID = MI->getDesc(); 552 if (MCID.getSize()) 553 return MCID.getSize(); 554 555 // If this machine instr is an inline asm, measure it. 556 if (MI->getOpcode() == ARM::INLINEASM) 557 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI); 558 if (MI->isLabel()) 559 return 0; 560 unsigned Opc = MI->getOpcode(); 561 switch (Opc) { 562 case TargetOpcode::IMPLICIT_DEF: 563 case TargetOpcode::KILL: 564 case TargetOpcode::PROLOG_LABEL: 565 case TargetOpcode::EH_LABEL: 566 case TargetOpcode::DBG_VALUE: 567 return 0; 568 case TargetOpcode::BUNDLE: 569 return getInstBundleLength(MI); 570 case ARM::MOVi16_ga_pcrel: 571 case ARM::MOVTi16_ga_pcrel: 572 case ARM::t2MOVi16_ga_pcrel: 573 case ARM::t2MOVTi16_ga_pcrel: 574 return 4; 575 case ARM::MOVi32imm: 576 case ARM::t2MOVi32imm: 577 return 8; 578 case ARM::CONSTPOOL_ENTRY: 579 // If this machine instr is a constant pool entry, its size is recorded as 580 // operand #2. 581 return MI->getOperand(2).getImm(); 582 case ARM::Int_eh_sjlj_longjmp: 583 return 16; 584 case ARM::tInt_eh_sjlj_longjmp: 585 return 10; 586 case ARM::Int_eh_sjlj_setjmp: 587 case ARM::Int_eh_sjlj_setjmp_nofp: 588 return 20; 589 case ARM::tInt_eh_sjlj_setjmp: 590 case ARM::t2Int_eh_sjlj_setjmp: 591 case ARM::t2Int_eh_sjlj_setjmp_nofp: 592 return 12; 593 case ARM::BR_JTr: 594 case ARM::BR_JTm: 595 case ARM::BR_JTadd: 596 case ARM::tBR_JTr: 597 case ARM::t2BR_JT: 598 case ARM::t2TBB_JT: 599 case ARM::t2TBH_JT: { 600 // These are jumptable branches, i.e. a branch followed by an inlined 601 // jumptable. The size is 4 + 4 * number of entries. For TBB, each 602 // entry is one byte; TBH two byte each. 603 unsigned EntrySize = (Opc == ARM::t2TBB_JT) 604 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); 605 unsigned NumOps = MCID.getNumOperands(); 606 MachineOperand JTOP = 607 MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2)); 608 unsigned JTI = JTOP.getIndex(); 609 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); 610 assert(MJTI != 0); 611 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables(); 612 assert(JTI < JT.size()); 613 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte 614 // 4 aligned. The assembler / linker may add 2 byte padding just before 615 // the JT entries. The size does not include this padding; the 616 // constant islands pass does separate bookkeeping for it. 617 // FIXME: If we know the size of the function is less than (1 << 16) *2 618 // bytes, we can use 16-bit entries instead. Then there won't be an 619 // alignment issue. 620 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4; 621 unsigned NumEntries = getNumJTEntries(JT, JTI); 622 if (Opc == ARM::t2TBB_JT && (NumEntries & 1)) 623 // Make sure the instruction that follows TBB is 2-byte aligned. 624 // FIXME: Constant island pass should insert an "ALIGN" instruction 625 // instead. 626 ++NumEntries; 627 return NumEntries * EntrySize + InstSize; 628 } 629 default: 630 // Otherwise, pseudo-instruction sizes are zero. 631 return 0; 632 } 633 } 634 635 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const { 636 unsigned Size = 0; 637 MachineBasicBlock::const_instr_iterator I = MI; 638 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 639 while (++I != E && I->isInsideBundle()) { 640 assert(!I->isBundle() && "No nested bundle!"); 641 Size += GetInstSizeInBytes(&*I); 642 } 643 return Size; 644 } 645 646 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 647 MachineBasicBlock::iterator I, DebugLoc DL, 648 unsigned DestReg, unsigned SrcReg, 649 bool KillSrc) const { 650 bool GPRDest = ARM::GPRRegClass.contains(DestReg); 651 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg); 652 653 if (GPRDest && GPRSrc) { 654 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg) 655 .addReg(SrcReg, getKillRegState(KillSrc)))); 656 return; 657 } 658 659 bool SPRDest = ARM::SPRRegClass.contains(DestReg); 660 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); 661 662 unsigned Opc = 0; 663 if (SPRDest && SPRSrc) 664 Opc = ARM::VMOVS; 665 else if (GPRDest && SPRSrc) 666 Opc = ARM::VMOVRS; 667 else if (SPRDest && GPRSrc) 668 Opc = ARM::VMOVSR; 669 else if (ARM::DPRRegClass.contains(DestReg, SrcReg)) 670 Opc = ARM::VMOVD; 671 else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) 672 Opc = ARM::VORRq; 673 674 if (Opc) { 675 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); 676 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 677 if (Opc == ARM::VORRq) 678 MIB.addReg(SrcReg, getKillRegState(KillSrc)); 679 AddDefaultPred(MIB); 680 return; 681 } 682 683 // Handle register classes that require multiple instructions. 684 unsigned BeginIdx = 0; 685 unsigned SubRegs = 0; 686 int Spacing = 1; 687 688 // Use VORRq when possible. 689 if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) 690 Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 2; 691 else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) 692 Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 4; 693 // Fall back to VMOVD. 694 else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) 695 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2; 696 else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) 697 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3; 698 else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) 699 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4; 700 701 else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) 702 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2; 703 else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) 704 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3, Spacing = 2; 705 else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) 706 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2; 707 708 assert(Opc && "Impossible reg-to-reg copy"); 709 710 const TargetRegisterInfo *TRI = &getRegisterInfo(); 711 MachineInstrBuilder Mov; 712 713 // Copy register tuples backward when the first Dest reg overlaps with SrcReg. 714 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { 715 BeginIdx = BeginIdx + ((SubRegs-1)*Spacing); 716 Spacing = -Spacing; 717 } 718 #ifndef NDEBUG 719 SmallSet<unsigned, 4> DstRegs; 720 #endif 721 for (unsigned i = 0; i != SubRegs; ++i) { 722 unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing); 723 unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing); 724 assert(Dst && Src && "Bad sub-register"); 725 #ifndef NDEBUG 726 assert(!DstRegs.count(Src) && "destructive vector copy"); 727 DstRegs.insert(Dst); 728 #endif 729 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst) 730 .addReg(Src); 731 // VORR takes two source operands. 732 if (Opc == ARM::VORRq) 733 Mov.addReg(Src); 734 Mov = AddDefaultPred(Mov); 735 } 736 // Add implicit super-register defs and kills to the last instruction. 737 Mov->addRegisterDefined(DestReg, TRI); 738 if (KillSrc) 739 Mov->addRegisterKilled(SrcReg, TRI); 740 } 741 742 static const 743 MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, 744 unsigned Reg, unsigned SubIdx, unsigned State, 745 const TargetRegisterInfo *TRI) { 746 if (!SubIdx) 747 return MIB.addReg(Reg, State); 748 749 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 750 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State); 751 return MIB.addReg(Reg, State, SubIdx); 752 } 753 754 void ARMBaseInstrInfo:: 755 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 756 unsigned SrcReg, bool isKill, int FI, 757 const TargetRegisterClass *RC, 758 const TargetRegisterInfo *TRI) const { 759 DebugLoc DL; 760 if (I != MBB.end()) DL = I->getDebugLoc(); 761 MachineFunction &MF = *MBB.getParent(); 762 MachineFrameInfo &MFI = *MF.getFrameInfo(); 763 unsigned Align = MFI.getObjectAlignment(FI); 764 765 MachineMemOperand *MMO = 766 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 767 MachineMemOperand::MOStore, 768 MFI.getObjectSize(FI), 769 Align); 770 771 switch (RC->getSize()) { 772 case 4: 773 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 774 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) 775 .addReg(SrcReg, getKillRegState(isKill)) 776 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 777 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 778 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) 779 .addReg(SrcReg, getKillRegState(isKill)) 780 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 781 } else 782 llvm_unreachable("Unknown reg class!"); 783 break; 784 case 8: 785 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 786 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) 787 .addReg(SrcReg, getKillRegState(isKill)) 788 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 789 } else 790 llvm_unreachable("Unknown reg class!"); 791 break; 792 case 16: 793 if (ARM::DPairRegClass.hasSubClassEq(RC)) { 794 // Use aligned spills if the stack can be realigned. 795 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 796 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64)) 797 .addFrameIndex(FI).addImm(16) 798 .addReg(SrcReg, getKillRegState(isKill)) 799 .addMemOperand(MMO)); 800 } else { 801 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) 802 .addReg(SrcReg, getKillRegState(isKill)) 803 .addFrameIndex(FI) 804 .addMemOperand(MMO)); 805 } 806 } else 807 llvm_unreachable("Unknown reg class!"); 808 break; 809 case 24: 810 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 811 // Use aligned spills if the stack can be realigned. 812 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 813 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo)) 814 .addFrameIndex(FI).addImm(16) 815 .addReg(SrcReg, getKillRegState(isKill)) 816 .addMemOperand(MMO)); 817 } else { 818 MachineInstrBuilder MIB = 819 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 820 .addFrameIndex(FI)) 821 .addMemOperand(MMO); 822 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 823 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 824 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 825 } 826 } else 827 llvm_unreachable("Unknown reg class!"); 828 break; 829 case 32: 830 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 831 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 832 // FIXME: It's possible to only store part of the QQ register if the 833 // spilled def has a sub-register index. 834 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) 835 .addFrameIndex(FI).addImm(16) 836 .addReg(SrcReg, getKillRegState(isKill)) 837 .addMemOperand(MMO)); 838 } else { 839 MachineInstrBuilder MIB = 840 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 841 .addFrameIndex(FI)) 842 .addMemOperand(MMO); 843 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 844 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 845 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 846 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 847 } 848 } else 849 llvm_unreachable("Unknown reg class!"); 850 break; 851 case 64: 852 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 853 MachineInstrBuilder MIB = 854 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) 855 .addFrameIndex(FI)) 856 .addMemOperand(MMO); 857 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); 858 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); 859 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); 860 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); 861 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); 862 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); 863 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); 864 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); 865 } else 866 llvm_unreachable("Unknown reg class!"); 867 break; 868 default: 869 llvm_unreachable("Unknown reg class!"); 870 } 871 } 872 873 unsigned 874 ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 875 int &FrameIndex) const { 876 switch (MI->getOpcode()) { 877 default: break; 878 case ARM::STRrs: 879 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame. 880 if (MI->getOperand(1).isFI() && 881 MI->getOperand(2).isReg() && 882 MI->getOperand(3).isImm() && 883 MI->getOperand(2).getReg() == 0 && 884 MI->getOperand(3).getImm() == 0) { 885 FrameIndex = MI->getOperand(1).getIndex(); 886 return MI->getOperand(0).getReg(); 887 } 888 break; 889 case ARM::STRi12: 890 case ARM::t2STRi12: 891 case ARM::tSTRspi: 892 case ARM::VSTRD: 893 case ARM::VSTRS: 894 if (MI->getOperand(1).isFI() && 895 MI->getOperand(2).isImm() && 896 MI->getOperand(2).getImm() == 0) { 897 FrameIndex = MI->getOperand(1).getIndex(); 898 return MI->getOperand(0).getReg(); 899 } 900 break; 901 case ARM::VST1q64: 902 case ARM::VST1d64TPseudo: 903 case ARM::VST1d64QPseudo: 904 if (MI->getOperand(0).isFI() && 905 MI->getOperand(2).getSubReg() == 0) { 906 FrameIndex = MI->getOperand(0).getIndex(); 907 return MI->getOperand(2).getReg(); 908 } 909 break; 910 case ARM::VSTMQIA: 911 if (MI->getOperand(1).isFI() && 912 MI->getOperand(0).getSubReg() == 0) { 913 FrameIndex = MI->getOperand(1).getIndex(); 914 return MI->getOperand(0).getReg(); 915 } 916 break; 917 } 918 919 return 0; 920 } 921 922 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, 923 int &FrameIndex) const { 924 const MachineMemOperand *Dummy; 925 return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); 926 } 927 928 void ARMBaseInstrInfo:: 929 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 930 unsigned DestReg, int FI, 931 const TargetRegisterClass *RC, 932 const TargetRegisterInfo *TRI) const { 933 DebugLoc DL; 934 if (I != MBB.end()) DL = I->getDebugLoc(); 935 MachineFunction &MF = *MBB.getParent(); 936 MachineFrameInfo &MFI = *MF.getFrameInfo(); 937 unsigned Align = MFI.getObjectAlignment(FI); 938 MachineMemOperand *MMO = 939 MF.getMachineMemOperand( 940 MachinePointerInfo::getFixedStack(FI), 941 MachineMemOperand::MOLoad, 942 MFI.getObjectSize(FI), 943 Align); 944 945 switch (RC->getSize()) { 946 case 4: 947 if (ARM::GPRRegClass.hasSubClassEq(RC)) { 948 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) 949 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 950 951 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { 952 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) 953 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 954 } else 955 llvm_unreachable("Unknown reg class!"); 956 break; 957 case 8: 958 if (ARM::DPRRegClass.hasSubClassEq(RC)) { 959 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) 960 .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); 961 } else 962 llvm_unreachable("Unknown reg class!"); 963 break; 964 case 16: 965 if (ARM::DPairRegClass.hasSubClassEq(RC)) { 966 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 967 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg) 968 .addFrameIndex(FI).addImm(16) 969 .addMemOperand(MMO)); 970 } else { 971 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) 972 .addFrameIndex(FI) 973 .addMemOperand(MMO)); 974 } 975 } else 976 llvm_unreachable("Unknown reg class!"); 977 break; 978 case 24: 979 if (ARM::DTripleRegClass.hasSubClassEq(RC)) { 980 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 981 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg) 982 .addFrameIndex(FI).addImm(16) 983 .addMemOperand(MMO)); 984 } else { 985 MachineInstrBuilder MIB = 986 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 987 .addFrameIndex(FI) 988 .addMemOperand(MMO)); 989 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 990 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 991 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 992 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 993 MIB.addReg(DestReg, RegState::ImplicitDefine); 994 } 995 } else 996 llvm_unreachable("Unknown reg class!"); 997 break; 998 case 32: 999 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { 1000 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { 1001 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) 1002 .addFrameIndex(FI).addImm(16) 1003 .addMemOperand(MMO)); 1004 } else { 1005 MachineInstrBuilder MIB = 1006 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1007 .addFrameIndex(FI)) 1008 .addMemOperand(MMO); 1009 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1010 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1011 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1012 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1013 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 1014 MIB.addReg(DestReg, RegState::ImplicitDefine); 1015 } 1016 } else 1017 llvm_unreachable("Unknown reg class!"); 1018 break; 1019 case 64: 1020 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { 1021 MachineInstrBuilder MIB = 1022 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) 1023 .addFrameIndex(FI)) 1024 .addMemOperand(MMO); 1025 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI); 1026 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI); 1027 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI); 1028 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI); 1029 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI); 1030 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI); 1031 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI); 1032 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI); 1033 if (TargetRegisterInfo::isPhysicalRegister(DestReg)) 1034 MIB.addReg(DestReg, RegState::ImplicitDefine); 1035 } else 1036 llvm_unreachable("Unknown reg class!"); 1037 break; 1038 default: 1039 llvm_unreachable("Unknown regclass!"); 1040 } 1041 } 1042 1043 unsigned 1044 ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 1045 int &FrameIndex) const { 1046 switch (MI->getOpcode()) { 1047 default: break; 1048 case ARM::LDRrs: 1049 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame. 1050 if (MI->getOperand(1).isFI() && 1051 MI->getOperand(2).isReg() && 1052 MI->getOperand(3).isImm() && 1053 MI->getOperand(2).getReg() == 0 && 1054 MI->getOperand(3).getImm() == 0) { 1055 FrameIndex = MI->getOperand(1).getIndex(); 1056 return MI->getOperand(0).getReg(); 1057 } 1058 break; 1059 case ARM::LDRi12: 1060 case ARM::t2LDRi12: 1061 case ARM::tLDRspi: 1062 case ARM::VLDRD: 1063 case ARM::VLDRS: 1064 if (MI->getOperand(1).isFI() && 1065 MI->getOperand(2).isImm() && 1066 MI->getOperand(2).getImm() == 0) { 1067 FrameIndex = MI->getOperand(1).getIndex(); 1068 return MI->getOperand(0).getReg(); 1069 } 1070 break; 1071 case ARM::VLD1q64: 1072 case ARM::VLD1d64TPseudo: 1073 case ARM::VLD1d64QPseudo: 1074 if (MI->getOperand(1).isFI() && 1075 MI->getOperand(0).getSubReg() == 0) { 1076 FrameIndex = MI->getOperand(1).getIndex(); 1077 return MI->getOperand(0).getReg(); 1078 } 1079 break; 1080 case ARM::VLDMQIA: 1081 if (MI->getOperand(1).isFI() && 1082 MI->getOperand(0).getSubReg() == 0) { 1083 FrameIndex = MI->getOperand(1).getIndex(); 1084 return MI->getOperand(0).getReg(); 1085 } 1086 break; 1087 } 1088 1089 return 0; 1090 } 1091 1092 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, 1093 int &FrameIndex) const { 1094 const MachineMemOperand *Dummy; 1095 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); 1096 } 1097 1098 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ 1099 // This hook gets to expand COPY instructions before they become 1100 // copyPhysReg() calls. Look for VMOVS instructions that can legally be 1101 // widened to VMOVD. We prefer the VMOVD when possible because it may be 1102 // changed into a VORR that can go down the NEON pipeline. 1103 if (!WidenVMOVS || !MI->isCopy()) 1104 return false; 1105 1106 // Look for a copy between even S-registers. That is where we keep floats 1107 // when using NEON v2f32 instructions for f32 arithmetic. 1108 unsigned DstRegS = MI->getOperand(0).getReg(); 1109 unsigned SrcRegS = MI->getOperand(1).getReg(); 1110 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) 1111 return false; 1112 1113 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1114 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, 1115 &ARM::DPRRegClass); 1116 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, 1117 &ARM::DPRRegClass); 1118 if (!DstRegD || !SrcRegD) 1119 return false; 1120 1121 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only 1122 // legal if the COPY already defines the full DstRegD, and it isn't a 1123 // sub-register insertion. 1124 if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI)) 1125 return false; 1126 1127 // A dead copy shouldn't show up here, but reject it just in case. 1128 if (MI->getOperand(0).isDead()) 1129 return false; 1130 1131 // All clear, widen the COPY. 1132 DEBUG(dbgs() << "widening: " << *MI); 1133 1134 // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg 1135 // or some other super-register. 1136 int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD); 1137 if (ImpDefIdx != -1) 1138 MI->RemoveOperand(ImpDefIdx); 1139 1140 // Change the opcode and operands. 1141 MI->setDesc(get(ARM::VMOVD)); 1142 MI->getOperand(0).setReg(DstRegD); 1143 MI->getOperand(1).setReg(SrcRegD); 1144 AddDefaultPred(MachineInstrBuilder(MI)); 1145 1146 // We are now reading SrcRegD instead of SrcRegS. This may upset the 1147 // register scavenger and machine verifier, so we need to indicate that we 1148 // are reading an undefined value from SrcRegD, but a proper value from 1149 // SrcRegS. 1150 MI->getOperand(1).setIsUndef(); 1151 MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit); 1152 1153 // SrcRegD may actually contain an unrelated value in the ssub_1 1154 // sub-register. Don't kill it. Only kill the ssub_0 sub-register. 1155 if (MI->getOperand(1).isKill()) { 1156 MI->getOperand(1).setIsKill(false); 1157 MI->addRegisterKilled(SrcRegS, TRI, true); 1158 } 1159 1160 DEBUG(dbgs() << "replaced by: " << *MI); 1161 return true; 1162 } 1163 1164 MachineInstr* 1165 ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, 1166 int FrameIx, uint64_t Offset, 1167 const MDNode *MDPtr, 1168 DebugLoc DL) const { 1169 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE)) 1170 .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); 1171 return &*MIB; 1172 } 1173 1174 /// Create a copy of a const pool value. Update CPI to the new index and return 1175 /// the label UID. 1176 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { 1177 MachineConstantPool *MCP = MF.getConstantPool(); 1178 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1179 1180 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI]; 1181 assert(MCPE.isMachineConstantPoolEntry() && 1182 "Expecting a machine constantpool entry!"); 1183 ARMConstantPoolValue *ACPV = 1184 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal); 1185 1186 unsigned PCLabelId = AFI->createPICLabelUId(); 1187 ARMConstantPoolValue *NewCPV = 0; 1188 // FIXME: The below assumes PIC relocation model and that the function 1189 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and 1190 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR 1191 // instructions, so that's probably OK, but is PIC always correct when 1192 // we get here? 1193 if (ACPV->isGlobalValue()) 1194 NewCPV = ARMConstantPoolConstant:: 1195 Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId, 1196 ARMCP::CPValue, 4); 1197 else if (ACPV->isExtSymbol()) 1198 NewCPV = ARMConstantPoolSymbol:: 1199 Create(MF.getFunction()->getContext(), 1200 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4); 1201 else if (ACPV->isBlockAddress()) 1202 NewCPV = ARMConstantPoolConstant:: 1203 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId, 1204 ARMCP::CPBlockAddress, 4); 1205 else if (ACPV->isLSDA()) 1206 NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId, 1207 ARMCP::CPLSDA, 4); 1208 else if (ACPV->isMachineBasicBlock()) 1209 NewCPV = ARMConstantPoolMBB:: 1210 Create(MF.getFunction()->getContext(), 1211 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4); 1212 else 1213 llvm_unreachable("Unexpected ARM constantpool value type!!"); 1214 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment()); 1215 return PCLabelId; 1216 } 1217 1218 void ARMBaseInstrInfo:: 1219 reMaterialize(MachineBasicBlock &MBB, 1220 MachineBasicBlock::iterator I, 1221 unsigned DestReg, unsigned SubIdx, 1222 const MachineInstr *Orig, 1223 const TargetRegisterInfo &TRI) const { 1224 unsigned Opcode = Orig->getOpcode(); 1225 switch (Opcode) { 1226 default: { 1227 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 1228 MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI); 1229 MBB.insert(I, MI); 1230 break; 1231 } 1232 case ARM::tLDRpci_pic: 1233 case ARM::t2LDRpci_pic: { 1234 MachineFunction &MF = *MBB.getParent(); 1235 unsigned CPI = Orig->getOperand(1).getIndex(); 1236 unsigned PCLabelId = duplicateCPV(MF, CPI); 1237 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode), 1238 DestReg) 1239 .addConstantPoolIndex(CPI).addImm(PCLabelId); 1240 MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end()); 1241 break; 1242 } 1243 } 1244 } 1245 1246 MachineInstr * 1247 ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const { 1248 MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF); 1249 switch(Orig->getOpcode()) { 1250 case ARM::tLDRpci_pic: 1251 case ARM::t2LDRpci_pic: { 1252 unsigned CPI = Orig->getOperand(1).getIndex(); 1253 unsigned PCLabelId = duplicateCPV(MF, CPI); 1254 Orig->getOperand(1).setIndex(CPI); 1255 Orig->getOperand(2).setImm(PCLabelId); 1256 break; 1257 } 1258 } 1259 return MI; 1260 } 1261 1262 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0, 1263 const MachineInstr *MI1, 1264 const MachineRegisterInfo *MRI) const { 1265 int Opcode = MI0->getOpcode(); 1266 if (Opcode == ARM::t2LDRpci || 1267 Opcode == ARM::t2LDRpci_pic || 1268 Opcode == ARM::tLDRpci || 1269 Opcode == ARM::tLDRpci_pic || 1270 Opcode == ARM::MOV_ga_dyn || 1271 Opcode == ARM::MOV_ga_pcrel || 1272 Opcode == ARM::MOV_ga_pcrel_ldr || 1273 Opcode == ARM::t2MOV_ga_dyn || 1274 Opcode == ARM::t2MOV_ga_pcrel) { 1275 if (MI1->getOpcode() != Opcode) 1276 return false; 1277 if (MI0->getNumOperands() != MI1->getNumOperands()) 1278 return false; 1279 1280 const MachineOperand &MO0 = MI0->getOperand(1); 1281 const MachineOperand &MO1 = MI1->getOperand(1); 1282 if (MO0.getOffset() != MO1.getOffset()) 1283 return false; 1284 1285 if (Opcode == ARM::MOV_ga_dyn || 1286 Opcode == ARM::MOV_ga_pcrel || 1287 Opcode == ARM::MOV_ga_pcrel_ldr || 1288 Opcode == ARM::t2MOV_ga_dyn || 1289 Opcode == ARM::t2MOV_ga_pcrel) 1290 // Ignore the PC labels. 1291 return MO0.getGlobal() == MO1.getGlobal(); 1292 1293 const MachineFunction *MF = MI0->getParent()->getParent(); 1294 const MachineConstantPool *MCP = MF->getConstantPool(); 1295 int CPI0 = MO0.getIndex(); 1296 int CPI1 = MO1.getIndex(); 1297 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; 1298 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; 1299 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); 1300 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); 1301 if (isARMCP0 && isARMCP1) { 1302 ARMConstantPoolValue *ACPV0 = 1303 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal); 1304 ARMConstantPoolValue *ACPV1 = 1305 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal); 1306 return ACPV0->hasSameValue(ACPV1); 1307 } else if (!isARMCP0 && !isARMCP1) { 1308 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; 1309 } 1310 return false; 1311 } else if (Opcode == ARM::PICLDR) { 1312 if (MI1->getOpcode() != Opcode) 1313 return false; 1314 if (MI0->getNumOperands() != MI1->getNumOperands()) 1315 return false; 1316 1317 unsigned Addr0 = MI0->getOperand(1).getReg(); 1318 unsigned Addr1 = MI1->getOperand(1).getReg(); 1319 if (Addr0 != Addr1) { 1320 if (!MRI || 1321 !TargetRegisterInfo::isVirtualRegister(Addr0) || 1322 !TargetRegisterInfo::isVirtualRegister(Addr1)) 1323 return false; 1324 1325 // This assumes SSA form. 1326 MachineInstr *Def0 = MRI->getVRegDef(Addr0); 1327 MachineInstr *Def1 = MRI->getVRegDef(Addr1); 1328 // Check if the loaded value, e.g. a constantpool of a global address, are 1329 // the same. 1330 if (!produceSameValue(Def0, Def1, MRI)) 1331 return false; 1332 } 1333 1334 for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) { 1335 // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg 1336 const MachineOperand &MO0 = MI0->getOperand(i); 1337 const MachineOperand &MO1 = MI1->getOperand(i); 1338 if (!MO0.isIdenticalTo(MO1)) 1339 return false; 1340 } 1341 return true; 1342 } 1343 1344 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 1345 } 1346 1347 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 1348 /// determine if two loads are loading from the same base address. It should 1349 /// only return true if the base pointers are the same and the only differences 1350 /// between the two addresses is the offset. It also returns the offsets by 1351 /// reference. 1352 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1353 int64_t &Offset1, 1354 int64_t &Offset2) const { 1355 // Don't worry about Thumb: just ARM and Thumb2. 1356 if (Subtarget.isThumb1Only()) return false; 1357 1358 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode()) 1359 return false; 1360 1361 switch (Load1->getMachineOpcode()) { 1362 default: 1363 return false; 1364 case ARM::LDRi12: 1365 case ARM::LDRBi12: 1366 case ARM::LDRD: 1367 case ARM::LDRH: 1368 case ARM::LDRSB: 1369 case ARM::LDRSH: 1370 case ARM::VLDRD: 1371 case ARM::VLDRS: 1372 case ARM::t2LDRi8: 1373 case ARM::t2LDRDi8: 1374 case ARM::t2LDRSHi8: 1375 case ARM::t2LDRi12: 1376 case ARM::t2LDRSHi12: 1377 break; 1378 } 1379 1380 switch (Load2->getMachineOpcode()) { 1381 default: 1382 return false; 1383 case ARM::LDRi12: 1384 case ARM::LDRBi12: 1385 case ARM::LDRD: 1386 case ARM::LDRH: 1387 case ARM::LDRSB: 1388 case ARM::LDRSH: 1389 case ARM::VLDRD: 1390 case ARM::VLDRS: 1391 case ARM::t2LDRi8: 1392 case ARM::t2LDRDi8: 1393 case ARM::t2LDRSHi8: 1394 case ARM::t2LDRi12: 1395 case ARM::t2LDRSHi12: 1396 break; 1397 } 1398 1399 // Check if base addresses and chain operands match. 1400 if (Load1->getOperand(0) != Load2->getOperand(0) || 1401 Load1->getOperand(4) != Load2->getOperand(4)) 1402 return false; 1403 1404 // Index should be Reg0. 1405 if (Load1->getOperand(3) != Load2->getOperand(3)) 1406 return false; 1407 1408 // Determine the offsets. 1409 if (isa<ConstantSDNode>(Load1->getOperand(1)) && 1410 isa<ConstantSDNode>(Load2->getOperand(1))) { 1411 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); 1412 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue(); 1413 return true; 1414 } 1415 1416 return false; 1417 } 1418 1419 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 1420 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 1421 /// be scheduled togther. On some targets if two loads are loading from 1422 /// addresses in the same cache line, it's better if they are scheduled 1423 /// together. This function takes two integers that represent the load offsets 1424 /// from the common base address. It returns true if it decides it's desirable 1425 /// to schedule the two loads together. "NumLoads" is the number of loads that 1426 /// have already been scheduled after Load1. 1427 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1428 int64_t Offset1, int64_t Offset2, 1429 unsigned NumLoads) const { 1430 // Don't worry about Thumb: just ARM and Thumb2. 1431 if (Subtarget.isThumb1Only()) return false; 1432 1433 assert(Offset2 > Offset1); 1434 1435 if ((Offset2 - Offset1) / 8 > 64) 1436 return false; 1437 1438 if (Load1->getMachineOpcode() != Load2->getMachineOpcode()) 1439 return false; // FIXME: overly conservative? 1440 1441 // Four loads in a row should be sufficient. 1442 if (NumLoads >= 3) 1443 return false; 1444 1445 return true; 1446 } 1447 1448 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, 1449 const MachineBasicBlock *MBB, 1450 const MachineFunction &MF) const { 1451 // Debug info is never a scheduling boundary. It's necessary to be explicit 1452 // due to the special treatment of IT instructions below, otherwise a 1453 // dbg_value followed by an IT will result in the IT instruction being 1454 // considered a scheduling hazard, which is wrong. It should be the actual 1455 // instruction preceding the dbg_value instruction(s), just like it is 1456 // when debug info is not present. 1457 if (MI->isDebugValue()) 1458 return false; 1459 1460 // Terminators and labels can't be scheduled around. 1461 if (MI->isTerminator() || MI->isLabel()) 1462 return true; 1463 1464 // Treat the start of the IT block as a scheduling boundary, but schedule 1465 // t2IT along with all instructions following it. 1466 // FIXME: This is a big hammer. But the alternative is to add all potential 1467 // true and anti dependencies to IT block instructions as implicit operands 1468 // to the t2IT instruction. The added compile time and complexity does not 1469 // seem worth it. 1470 MachineBasicBlock::const_iterator I = MI; 1471 // Make sure to skip any dbg_value instructions 1472 while (++I != MBB->end() && I->isDebugValue()) 1473 ; 1474 if (I != MBB->end() && I->getOpcode() == ARM::t2IT) 1475 return true; 1476 1477 // Don't attempt to schedule around any instruction that defines 1478 // a stack-oriented pointer, as it's unlikely to be profitable. This 1479 // saves compile time, because it doesn't require every single 1480 // stack slot reference to depend on the instruction that does the 1481 // modification. 1482 // Calls don't actually change the stack pointer, even if they have imp-defs. 1483 // No ARM calling conventions change the stack pointer. (X86 calling 1484 // conventions sometimes do). 1485 if (!MI->isCall() && MI->definesRegister(ARM::SP)) 1486 return true; 1487 1488 return false; 1489 } 1490 1491 bool ARMBaseInstrInfo:: 1492 isProfitableToIfCvt(MachineBasicBlock &MBB, 1493 unsigned NumCycles, unsigned ExtraPredCycles, 1494 const BranchProbability &Probability) const { 1495 if (!NumCycles) 1496 return false; 1497 1498 // Attempt to estimate the relative costs of predication versus branching. 1499 unsigned UnpredCost = Probability.getNumerator() * NumCycles; 1500 UnpredCost /= Probability.getDenominator(); 1501 UnpredCost += 1; // The branch itself 1502 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1503 1504 return (NumCycles + ExtraPredCycles) <= UnpredCost; 1505 } 1506 1507 bool ARMBaseInstrInfo:: 1508 isProfitableToIfCvt(MachineBasicBlock &TMBB, 1509 unsigned TCycles, unsigned TExtra, 1510 MachineBasicBlock &FMBB, 1511 unsigned FCycles, unsigned FExtra, 1512 const BranchProbability &Probability) const { 1513 if (!TCycles || !FCycles) 1514 return false; 1515 1516 // Attempt to estimate the relative costs of predication versus branching. 1517 unsigned TUnpredCost = Probability.getNumerator() * TCycles; 1518 TUnpredCost /= Probability.getDenominator(); 1519 1520 uint32_t Comp = Probability.getDenominator() - Probability.getNumerator(); 1521 unsigned FUnpredCost = Comp * FCycles; 1522 FUnpredCost /= Probability.getDenominator(); 1523 1524 unsigned UnpredCost = TUnpredCost + FUnpredCost; 1525 UnpredCost += 1; // The branch itself 1526 UnpredCost += Subtarget.getMispredictionPenalty() / 10; 1527 1528 return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost; 1529 } 1530 1531 /// getInstrPredicate - If instruction is predicated, returns its predicate 1532 /// condition, otherwise returns AL. It also returns the condition code 1533 /// register by reference. 1534 ARMCC::CondCodes 1535 llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) { 1536 int PIdx = MI->findFirstPredOperandIdx(); 1537 if (PIdx == -1) { 1538 PredReg = 0; 1539 return ARMCC::AL; 1540 } 1541 1542 PredReg = MI->getOperand(PIdx+1).getReg(); 1543 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm(); 1544 } 1545 1546 1547 int llvm::getMatchingCondBranchOpcode(int Opc) { 1548 if (Opc == ARM::B) 1549 return ARM::Bcc; 1550 if (Opc == ARM::tB) 1551 return ARM::tBcc; 1552 if (Opc == ARM::t2B) 1553 return ARM::t2Bcc; 1554 1555 llvm_unreachable("Unknown unconditional branch opcode!"); 1556 } 1557 1558 /// commuteInstruction - Handle commutable instructions. 1559 MachineInstr * 1560 ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 1561 switch (MI->getOpcode()) { 1562 case ARM::MOVCCr: 1563 case ARM::t2MOVCCr: { 1564 // MOVCC can be commuted by inverting the condition. 1565 unsigned PredReg = 0; 1566 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg); 1567 // MOVCC AL can't be inverted. Shouldn't happen. 1568 if (CC == ARMCC::AL || PredReg != ARM::CPSR) 1569 return NULL; 1570 MI = TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1571 if (!MI) 1572 return NULL; 1573 // After swapping the MOVCC operands, also invert the condition. 1574 MI->getOperand(MI->findFirstPredOperandIdx()) 1575 .setImm(ARMCC::getOppositeCondition(CC)); 1576 return MI; 1577 } 1578 } 1579 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); 1580 } 1581 1582 /// Identify instructions that can be folded into a MOVCC instruction, and 1583 /// return the defining instruction. 1584 static MachineInstr *canFoldIntoMOVCC(unsigned Reg, 1585 const MachineRegisterInfo &MRI, 1586 const TargetInstrInfo *TII) { 1587 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1588 return 0; 1589 if (!MRI.hasOneNonDBGUse(Reg)) 1590 return 0; 1591 MachineInstr *MI = MRI.getVRegDef(Reg); 1592 if (!MI) 1593 return 0; 1594 // MI is folded into the MOVCC by predicating it. 1595 if (!MI->isPredicable()) 1596 return 0; 1597 // Check if MI has any non-dead defs or physreg uses. This also detects 1598 // predicated instructions which will be reading CPSR. 1599 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { 1600 const MachineOperand &MO = MI->getOperand(i); 1601 // Reject frame index operands, PEI can't handle the predicated pseudos. 1602 if (MO.isFI() || MO.isCPI() || MO.isJTI()) 1603 return 0; 1604 if (!MO.isReg()) 1605 continue; 1606 // MI can't have any tied operands, that would conflict with predication. 1607 if (MO.isTied()) 1608 return 0; 1609 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 1610 return 0; 1611 if (MO.isDef() && !MO.isDead()) 1612 return 0; 1613 } 1614 bool DontMoveAcrossStores = true; 1615 if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores)) 1616 return 0; 1617 return MI; 1618 } 1619 1620 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI, 1621 SmallVectorImpl<MachineOperand> &Cond, 1622 unsigned &TrueOp, unsigned &FalseOp, 1623 bool &Optimizable) const { 1624 assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) && 1625 "Unknown select instruction"); 1626 // MOVCC operands: 1627 // 0: Def. 1628 // 1: True use. 1629 // 2: False use. 1630 // 3: Condition code. 1631 // 4: CPSR use. 1632 TrueOp = 1; 1633 FalseOp = 2; 1634 Cond.push_back(MI->getOperand(3)); 1635 Cond.push_back(MI->getOperand(4)); 1636 // We can always fold a def. 1637 Optimizable = true; 1638 return false; 1639 } 1640 1641 MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI, 1642 bool PreferFalse) const { 1643 assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) && 1644 "Unknown select instruction"); 1645 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1646 MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this); 1647 bool Invert = !DefMI; 1648 if (!DefMI) 1649 DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this); 1650 if (!DefMI) 1651 return 0; 1652 1653 // Create a new predicated version of DefMI. 1654 // Rfalse is the first use. 1655 MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 1656 DefMI->getDesc(), 1657 MI->getOperand(0).getReg()); 1658 1659 // Copy all the DefMI operands, excluding its (null) predicate. 1660 const MCInstrDesc &DefDesc = DefMI->getDesc(); 1661 for (unsigned i = 1, e = DefDesc.getNumOperands(); 1662 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) 1663 NewMI.addOperand(DefMI->getOperand(i)); 1664 1665 unsigned CondCode = MI->getOperand(3).getImm(); 1666 if (Invert) 1667 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); 1668 else 1669 NewMI.addImm(CondCode); 1670 NewMI.addOperand(MI->getOperand(4)); 1671 1672 // DefMI is not the -S version that sets CPSR, so add an optional %noreg. 1673 if (NewMI->hasOptionalDef()) 1674 AddDefaultCC(NewMI); 1675 1676 // The output register value when the predicate is false is an implicit 1677 // register operand tied to the first def. 1678 // The tie makes the register allocator ensure the FalseReg is allocated the 1679 // same register as operand 0. 1680 MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1); 1681 FalseReg.setImplicit(); 1682 NewMI->addOperand(FalseReg); 1683 NewMI->tieOperands(0, NewMI->getNumOperands() - 1); 1684 1685 // The caller will erase MI, but not DefMI. 1686 DefMI->eraseFromParent(); 1687 return NewMI; 1688 } 1689 1690 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the 1691 /// instruction is encoded with an 'S' bit is determined by the optional CPSR 1692 /// def operand. 1693 /// 1694 /// This will go away once we can teach tblgen how to set the optional CPSR def 1695 /// operand itself. 1696 struct AddSubFlagsOpcodePair { 1697 uint16_t PseudoOpc; 1698 uint16_t MachineOpc; 1699 }; 1700 1701 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { 1702 {ARM::ADDSri, ARM::ADDri}, 1703 {ARM::ADDSrr, ARM::ADDrr}, 1704 {ARM::ADDSrsi, ARM::ADDrsi}, 1705 {ARM::ADDSrsr, ARM::ADDrsr}, 1706 1707 {ARM::SUBSri, ARM::SUBri}, 1708 {ARM::SUBSrr, ARM::SUBrr}, 1709 {ARM::SUBSrsi, ARM::SUBrsi}, 1710 {ARM::SUBSrsr, ARM::SUBrsr}, 1711 1712 {ARM::RSBSri, ARM::RSBri}, 1713 {ARM::RSBSrsi, ARM::RSBrsi}, 1714 {ARM::RSBSrsr, ARM::RSBrsr}, 1715 1716 {ARM::t2ADDSri, ARM::t2ADDri}, 1717 {ARM::t2ADDSrr, ARM::t2ADDrr}, 1718 {ARM::t2ADDSrs, ARM::t2ADDrs}, 1719 1720 {ARM::t2SUBSri, ARM::t2SUBri}, 1721 {ARM::t2SUBSrr, ARM::t2SUBrr}, 1722 {ARM::t2SUBSrs, ARM::t2SUBrs}, 1723 1724 {ARM::t2RSBSri, ARM::t2RSBri}, 1725 {ARM::t2RSBSrs, ARM::t2RSBrs}, 1726 }; 1727 1728 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { 1729 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i) 1730 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc) 1731 return AddSubFlagsOpcodeMap[i].MachineOpc; 1732 return 0; 1733 } 1734 1735 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, 1736 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 1737 unsigned DestReg, unsigned BaseReg, int NumBytes, 1738 ARMCC::CondCodes Pred, unsigned PredReg, 1739 const ARMBaseInstrInfo &TII, unsigned MIFlags) { 1740 bool isSub = NumBytes < 0; 1741 if (isSub) NumBytes = -NumBytes; 1742 1743 while (NumBytes) { 1744 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes); 1745 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt); 1746 assert(ThisVal && "Didn't extract field correctly"); 1747 1748 // We will handle these bits from offset, clear them. 1749 NumBytes &= ~ThisVal; 1750 1751 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?"); 1752 1753 // Build the new ADD / SUB. 1754 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; 1755 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) 1756 .addReg(BaseReg, RegState::Kill).addImm(ThisVal) 1757 .addImm((unsigned)Pred).addReg(PredReg).addReg(0) 1758 .setMIFlags(MIFlags); 1759 BaseReg = DestReg; 1760 } 1761 } 1762 1763 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 1764 unsigned FrameReg, int &Offset, 1765 const ARMBaseInstrInfo &TII) { 1766 unsigned Opcode = MI.getOpcode(); 1767 const MCInstrDesc &Desc = MI.getDesc(); 1768 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 1769 bool isSub = false; 1770 1771 // Memory operands in inline assembly always use AddrMode2. 1772 if (Opcode == ARM::INLINEASM) 1773 AddrMode = ARMII::AddrMode2; 1774 1775 if (Opcode == ARM::ADDri) { 1776 Offset += MI.getOperand(FrameRegIdx+1).getImm(); 1777 if (Offset == 0) { 1778 // Turn it into a move. 1779 MI.setDesc(TII.get(ARM::MOVr)); 1780 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1781 MI.RemoveOperand(FrameRegIdx+1); 1782 Offset = 0; 1783 return true; 1784 } else if (Offset < 0) { 1785 Offset = -Offset; 1786 isSub = true; 1787 MI.setDesc(TII.get(ARM::SUBri)); 1788 } 1789 1790 // Common case: small offset, fits into instruction. 1791 if (ARM_AM::getSOImmVal(Offset) != -1) { 1792 // Replace the FrameIndex with sp / fp 1793 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1794 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset); 1795 Offset = 0; 1796 return true; 1797 } 1798 1799 // Otherwise, pull as much of the immedidate into this ADDri/SUBri 1800 // as possible. 1801 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset); 1802 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt); 1803 1804 // We will handle these bits from offset, clear them. 1805 Offset &= ~ThisImmVal; 1806 1807 // Get the properly encoded SOImmVal field. 1808 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 && 1809 "Bit extraction didn't work?"); 1810 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal); 1811 } else { 1812 unsigned ImmIdx = 0; 1813 int InstrOffs = 0; 1814 unsigned NumBits = 0; 1815 unsigned Scale = 1; 1816 switch (AddrMode) { 1817 case ARMII::AddrMode_i12: { 1818 ImmIdx = FrameRegIdx + 1; 1819 InstrOffs = MI.getOperand(ImmIdx).getImm(); 1820 NumBits = 12; 1821 break; 1822 } 1823 case ARMII::AddrMode2: { 1824 ImmIdx = FrameRegIdx+2; 1825 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm()); 1826 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1827 InstrOffs *= -1; 1828 NumBits = 12; 1829 break; 1830 } 1831 case ARMII::AddrMode3: { 1832 ImmIdx = FrameRegIdx+2; 1833 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm()); 1834 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1835 InstrOffs *= -1; 1836 NumBits = 8; 1837 break; 1838 } 1839 case ARMII::AddrMode4: 1840 case ARMII::AddrMode6: 1841 // Can't fold any offset even if it's zero. 1842 return false; 1843 case ARMII::AddrMode5: { 1844 ImmIdx = FrameRegIdx+1; 1845 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm()); 1846 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub) 1847 InstrOffs *= -1; 1848 NumBits = 8; 1849 Scale = 4; 1850 break; 1851 } 1852 default: 1853 llvm_unreachable("Unsupported addressing mode!"); 1854 } 1855 1856 Offset += InstrOffs * Scale; 1857 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!"); 1858 if (Offset < 0) { 1859 Offset = -Offset; 1860 isSub = true; 1861 } 1862 1863 // Attempt to fold address comp. if opcode has offset bits 1864 if (NumBits > 0) { 1865 // Common case: small offset, fits into instruction. 1866 MachineOperand &ImmOp = MI.getOperand(ImmIdx); 1867 int ImmedOffset = Offset / Scale; 1868 unsigned Mask = (1 << NumBits) - 1; 1869 if ((unsigned)Offset <= Mask * Scale) { 1870 // Replace the FrameIndex with sp 1871 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false); 1872 // FIXME: When addrmode2 goes away, this will simplify (like the 1873 // T2 version), as the LDR.i12 versions don't need the encoding 1874 // tricks for the offset value. 1875 if (isSub) { 1876 if (AddrMode == ARMII::AddrMode_i12) 1877 ImmedOffset = -ImmedOffset; 1878 else 1879 ImmedOffset |= 1 << NumBits; 1880 } 1881 ImmOp.ChangeToImmediate(ImmedOffset); 1882 Offset = 0; 1883 return true; 1884 } 1885 1886 // Otherwise, it didn't fit. Pull in what we can to simplify the immed. 1887 ImmedOffset = ImmedOffset & Mask; 1888 if (isSub) { 1889 if (AddrMode == ARMII::AddrMode_i12) 1890 ImmedOffset = -ImmedOffset; 1891 else 1892 ImmedOffset |= 1 << NumBits; 1893 } 1894 ImmOp.ChangeToImmediate(ImmedOffset); 1895 Offset &= ~(Mask*Scale); 1896 } 1897 } 1898 1899 Offset = (isSub) ? -Offset : Offset; 1900 return Offset == 0; 1901 } 1902 1903 /// analyzeCompare - For a comparison instruction, return the source registers 1904 /// in SrcReg and SrcReg2 if having two register operands, and the value it 1905 /// compares against in CmpValue. Return true if the comparison instruction 1906 /// can be analyzed. 1907 bool ARMBaseInstrInfo:: 1908 analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2, 1909 int &CmpMask, int &CmpValue) const { 1910 switch (MI->getOpcode()) { 1911 default: break; 1912 case ARM::CMPri: 1913 case ARM::t2CMPri: 1914 SrcReg = MI->getOperand(0).getReg(); 1915 SrcReg2 = 0; 1916 CmpMask = ~0; 1917 CmpValue = MI->getOperand(1).getImm(); 1918 return true; 1919 case ARM::CMPrr: 1920 case ARM::t2CMPrr: 1921 SrcReg = MI->getOperand(0).getReg(); 1922 SrcReg2 = MI->getOperand(1).getReg(); 1923 CmpMask = ~0; 1924 CmpValue = 0; 1925 return true; 1926 case ARM::TSTri: 1927 case ARM::t2TSTri: 1928 SrcReg = MI->getOperand(0).getReg(); 1929 SrcReg2 = 0; 1930 CmpMask = MI->getOperand(1).getImm(); 1931 CmpValue = 0; 1932 return true; 1933 } 1934 1935 return false; 1936 } 1937 1938 /// isSuitableForMask - Identify a suitable 'and' instruction that 1939 /// operates on the given source register and applies the same mask 1940 /// as a 'tst' instruction. Provide a limited look-through for copies. 1941 /// When successful, MI will hold the found instruction. 1942 static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, 1943 int CmpMask, bool CommonUse) { 1944 switch (MI->getOpcode()) { 1945 case ARM::ANDri: 1946 case ARM::t2ANDri: 1947 if (CmpMask != MI->getOperand(2).getImm()) 1948 return false; 1949 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg()) 1950 return true; 1951 break; 1952 case ARM::COPY: { 1953 // Walk down one instruction which is potentially an 'and'. 1954 const MachineInstr &Copy = *MI; 1955 MachineBasicBlock::iterator AND( 1956 llvm::next(MachineBasicBlock::iterator(MI))); 1957 if (AND == MI->getParent()->end()) return false; 1958 MI = AND; 1959 return isSuitableForMask(MI, Copy.getOperand(0).getReg(), 1960 CmpMask, true); 1961 } 1962 } 1963 1964 return false; 1965 } 1966 1967 /// getSwappedCondition - assume the flags are set by MI(a,b), return 1968 /// the condition code if we modify the instructions such that flags are 1969 /// set by MI(b,a). 1970 inline static ARMCC::CondCodes getSwappedCondition(ARMCC::CondCodes CC) { 1971 switch (CC) { 1972 default: return ARMCC::AL; 1973 case ARMCC::EQ: return ARMCC::EQ; 1974 case ARMCC::NE: return ARMCC::NE; 1975 case ARMCC::HS: return ARMCC::LS; 1976 case ARMCC::LO: return ARMCC::HI; 1977 case ARMCC::HI: return ARMCC::LO; 1978 case ARMCC::LS: return ARMCC::HS; 1979 case ARMCC::GE: return ARMCC::LE; 1980 case ARMCC::LT: return ARMCC::GT; 1981 case ARMCC::GT: return ARMCC::LT; 1982 case ARMCC::LE: return ARMCC::GE; 1983 } 1984 } 1985 1986 /// isRedundantFlagInstr - check whether the first instruction, whose only 1987 /// purpose is to update flags, can be made redundant. 1988 /// CMPrr can be made redundant by SUBrr if the operands are the same. 1989 /// CMPri can be made redundant by SUBri if the operands are the same. 1990 /// This function can be extended later on. 1991 inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg, 1992 unsigned SrcReg2, int ImmValue, 1993 MachineInstr *OI) { 1994 if ((CmpI->getOpcode() == ARM::CMPrr || 1995 CmpI->getOpcode() == ARM::t2CMPrr) && 1996 (OI->getOpcode() == ARM::SUBrr || 1997 OI->getOpcode() == ARM::t2SUBrr) && 1998 ((OI->getOperand(1).getReg() == SrcReg && 1999 OI->getOperand(2).getReg() == SrcReg2) || 2000 (OI->getOperand(1).getReg() == SrcReg2 && 2001 OI->getOperand(2).getReg() == SrcReg))) 2002 return true; 2003 2004 if ((CmpI->getOpcode() == ARM::CMPri || 2005 CmpI->getOpcode() == ARM::t2CMPri) && 2006 (OI->getOpcode() == ARM::SUBri || 2007 OI->getOpcode() == ARM::t2SUBri) && 2008 OI->getOperand(1).getReg() == SrcReg && 2009 OI->getOperand(2).getImm() == ImmValue) 2010 return true; 2011 return false; 2012 } 2013 2014 /// optimizeCompareInstr - Convert the instruction supplying the argument to the 2015 /// comparison into one that sets the zero bit in the flags register; 2016 /// Remove a redundant Compare instruction if an earlier instruction can set the 2017 /// flags in the same way as Compare. 2018 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two 2019 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the 2020 /// condition code of instructions which use the flags. 2021 bool ARMBaseInstrInfo:: 2022 optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, 2023 int CmpMask, int CmpValue, 2024 const MachineRegisterInfo *MRI) const { 2025 // Get the unique definition of SrcReg. 2026 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 2027 if (!MI) return false; 2028 2029 // Masked compares sometimes use the same register as the corresponding 'and'. 2030 if (CmpMask != ~0) { 2031 if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) { 2032 MI = 0; 2033 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg), 2034 UE = MRI->use_end(); UI != UE; ++UI) { 2035 if (UI->getParent() != CmpInstr->getParent()) continue; 2036 MachineInstr *PotentialAND = &*UI; 2037 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || 2038 isPredicated(PotentialAND)) 2039 continue; 2040 MI = PotentialAND; 2041 break; 2042 } 2043 if (!MI) return false; 2044 } 2045 } 2046 2047 // Get ready to iterate backward from CmpInstr. 2048 MachineBasicBlock::iterator I = CmpInstr, E = MI, 2049 B = CmpInstr->getParent()->begin(); 2050 2051 // Early exit if CmpInstr is at the beginning of the BB. 2052 if (I == B) return false; 2053 2054 // There are two possible candidates which can be changed to set CPSR: 2055 // One is MI, the other is a SUB instruction. 2056 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). 2057 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue). 2058 MachineInstr *Sub = NULL; 2059 if (SrcReg2 != 0) 2060 // MI is not a candidate for CMPrr. 2061 MI = NULL; 2062 else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) { 2063 // Conservatively refuse to convert an instruction which isn't in the same 2064 // BB as the comparison. 2065 // For CMPri, we need to check Sub, thus we can't return here. 2066 if (CmpInstr->getOpcode() == ARM::CMPri || 2067 CmpInstr->getOpcode() == ARM::t2CMPri) 2068 MI = NULL; 2069 else 2070 return false; 2071 } 2072 2073 // Check that CPSR isn't set between the comparison instruction and the one we 2074 // want to change. At the same time, search for Sub. 2075 const TargetRegisterInfo *TRI = &getRegisterInfo(); 2076 --I; 2077 for (; I != E; --I) { 2078 const MachineInstr &Instr = *I; 2079 2080 if (Instr.modifiesRegister(ARM::CPSR, TRI) || 2081 Instr.readsRegister(ARM::CPSR, TRI)) 2082 // This instruction modifies or uses CPSR after the one we want to 2083 // change. We can't do this transformation. 2084 return false; 2085 2086 // Check whether CmpInstr can be made redundant by the current instruction. 2087 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) { 2088 Sub = &*I; 2089 break; 2090 } 2091 2092 if (I == B) 2093 // The 'and' is below the comparison instruction. 2094 return false; 2095 } 2096 2097 // Return false if no candidates exist. 2098 if (!MI && !Sub) 2099 return false; 2100 2101 // The single candidate is called MI. 2102 if (!MI) MI = Sub; 2103 2104 // We can't use a predicated instruction - it doesn't always write the flags. 2105 if (isPredicated(MI)) 2106 return false; 2107 2108 switch (MI->getOpcode()) { 2109 default: break; 2110 case ARM::RSBrr: 2111 case ARM::RSBri: 2112 case ARM::RSCrr: 2113 case ARM::RSCri: 2114 case ARM::ADDrr: 2115 case ARM::ADDri: 2116 case ARM::ADCrr: 2117 case ARM::ADCri: 2118 case ARM::SUBrr: 2119 case ARM::SUBri: 2120 case ARM::SBCrr: 2121 case ARM::SBCri: 2122 case ARM::t2RSBri: 2123 case ARM::t2ADDrr: 2124 case ARM::t2ADDri: 2125 case ARM::t2ADCrr: 2126 case ARM::t2ADCri: 2127 case ARM::t2SUBrr: 2128 case ARM::t2SUBri: 2129 case ARM::t2SBCrr: 2130 case ARM::t2SBCri: 2131 case ARM::ANDrr: 2132 case ARM::ANDri: 2133 case ARM::t2ANDrr: 2134 case ARM::t2ANDri: 2135 case ARM::ORRrr: 2136 case ARM::ORRri: 2137 case ARM::t2ORRrr: 2138 case ARM::t2ORRri: 2139 case ARM::EORrr: 2140 case ARM::EORri: 2141 case ARM::t2EORrr: 2142 case ARM::t2EORri: { 2143 // Scan forward for the use of CPSR 2144 // When checking against MI: if it's a conditional code requires 2145 // checking of V bit, then this is not safe to do. 2146 // It is safe to remove CmpInstr if CPSR is redefined or killed. 2147 // If we are done with the basic block, we need to check whether CPSR is 2148 // live-out. 2149 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4> 2150 OperandsToUpdate; 2151 bool isSafe = false; 2152 I = CmpInstr; 2153 E = CmpInstr->getParent()->end(); 2154 while (!isSafe && ++I != E) { 2155 const MachineInstr &Instr = *I; 2156 for (unsigned IO = 0, EO = Instr.getNumOperands(); 2157 !isSafe && IO != EO; ++IO) { 2158 const MachineOperand &MO = Instr.getOperand(IO); 2159 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) { 2160 isSafe = true; 2161 break; 2162 } 2163 if (!MO.isReg() || MO.getReg() != ARM::CPSR) 2164 continue; 2165 if (MO.isDef()) { 2166 isSafe = true; 2167 break; 2168 } 2169 // Condition code is after the operand before CPSR. 2170 ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm(); 2171 if (Sub) { 2172 ARMCC::CondCodes NewCC = getSwappedCondition(CC); 2173 if (NewCC == ARMCC::AL) 2174 return false; 2175 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based 2176 // on CMP needs to be updated to be based on SUB. 2177 // Push the condition code operands to OperandsToUpdate. 2178 // If it is safe to remove CmpInstr, the condition code of these 2179 // operands will be modified. 2180 if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 2181 Sub->getOperand(2).getReg() == SrcReg) 2182 OperandsToUpdate.push_back(std::make_pair(&((*I).getOperand(IO-1)), 2183 NewCC)); 2184 } 2185 else 2186 switch (CC) { 2187 default: 2188 // CPSR can be used multiple times, we should continue. 2189 break; 2190 case ARMCC::VS: 2191 case ARMCC::VC: 2192 case ARMCC::GE: 2193 case ARMCC::LT: 2194 case ARMCC::GT: 2195 case ARMCC::LE: 2196 return false; 2197 } 2198 } 2199 } 2200 2201 // If CPSR is not killed nor re-defined, we should check whether it is 2202 // live-out. If it is live-out, do not optimize. 2203 if (!isSafe) { 2204 MachineBasicBlock *MBB = CmpInstr->getParent(); 2205 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 2206 SE = MBB->succ_end(); SI != SE; ++SI) 2207 if ((*SI)->isLiveIn(ARM::CPSR)) 2208 return false; 2209 } 2210 2211 // Toggle the optional operand to CPSR. 2212 MI->getOperand(5).setReg(ARM::CPSR); 2213 MI->getOperand(5).setIsDef(true); 2214 assert(!isPredicated(MI) && "Can't use flags from predicated instruction"); 2215 CmpInstr->eraseFromParent(); 2216 2217 // Modify the condition code of operands in OperandsToUpdate. 2218 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 2219 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 2220 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++) 2221 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second); 2222 return true; 2223 } 2224 } 2225 2226 return false; 2227 } 2228 2229 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, 2230 MachineInstr *DefMI, unsigned Reg, 2231 MachineRegisterInfo *MRI) const { 2232 // Fold large immediates into add, sub, or, xor. 2233 unsigned DefOpc = DefMI->getOpcode(); 2234 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) 2235 return false; 2236 if (!DefMI->getOperand(1).isImm()) 2237 // Could be t2MOVi32imm <ga:xx> 2238 return false; 2239 2240 if (!MRI->hasOneNonDBGUse(Reg)) 2241 return false; 2242 2243 const MCInstrDesc &DefMCID = DefMI->getDesc(); 2244 if (DefMCID.hasOptionalDef()) { 2245 unsigned NumOps = DefMCID.getNumOperands(); 2246 const MachineOperand &MO = DefMI->getOperand(NumOps-1); 2247 if (MO.getReg() == ARM::CPSR && !MO.isDead()) 2248 // If DefMI defines CPSR and it is not dead, it's obviously not safe 2249 // to delete DefMI. 2250 return false; 2251 } 2252 2253 const MCInstrDesc &UseMCID = UseMI->getDesc(); 2254 if (UseMCID.hasOptionalDef()) { 2255 unsigned NumOps = UseMCID.getNumOperands(); 2256 if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR) 2257 // If the instruction sets the flag, do not attempt this optimization 2258 // since it may change the semantics of the code. 2259 return false; 2260 } 2261 2262 unsigned UseOpc = UseMI->getOpcode(); 2263 unsigned NewUseOpc = 0; 2264 uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm(); 2265 uint32_t SOImmValV1 = 0, SOImmValV2 = 0; 2266 bool Commute = false; 2267 switch (UseOpc) { 2268 default: return false; 2269 case ARM::SUBrr: 2270 case ARM::ADDrr: 2271 case ARM::ORRrr: 2272 case ARM::EORrr: 2273 case ARM::t2SUBrr: 2274 case ARM::t2ADDrr: 2275 case ARM::t2ORRrr: 2276 case ARM::t2EORrr: { 2277 Commute = UseMI->getOperand(2).getReg() != Reg; 2278 switch (UseOpc) { 2279 default: break; 2280 case ARM::SUBrr: { 2281 if (Commute) 2282 return false; 2283 ImmVal = -ImmVal; 2284 NewUseOpc = ARM::SUBri; 2285 // Fallthrough 2286 } 2287 case ARM::ADDrr: 2288 case ARM::ORRrr: 2289 case ARM::EORrr: { 2290 if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) 2291 return false; 2292 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); 2293 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); 2294 switch (UseOpc) { 2295 default: break; 2296 case ARM::ADDrr: NewUseOpc = ARM::ADDri; break; 2297 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; 2298 case ARM::EORrr: NewUseOpc = ARM::EORri; break; 2299 } 2300 break; 2301 } 2302 case ARM::t2SUBrr: { 2303 if (Commute) 2304 return false; 2305 ImmVal = -ImmVal; 2306 NewUseOpc = ARM::t2SUBri; 2307 // Fallthrough 2308 } 2309 case ARM::t2ADDrr: 2310 case ARM::t2ORRrr: 2311 case ARM::t2EORrr: { 2312 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) 2313 return false; 2314 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); 2315 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); 2316 switch (UseOpc) { 2317 default: break; 2318 case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break; 2319 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; 2320 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; 2321 } 2322 break; 2323 } 2324 } 2325 } 2326 } 2327 2328 unsigned OpIdx = Commute ? 2 : 1; 2329 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg(); 2330 bool isKill = UseMI->getOperand(OpIdx).isKill(); 2331 unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); 2332 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(), 2333 UseMI, UseMI->getDebugLoc(), 2334 get(NewUseOpc), NewReg) 2335 .addReg(Reg1, getKillRegState(isKill)) 2336 .addImm(SOImmValV1))); 2337 UseMI->setDesc(get(NewUseOpc)); 2338 UseMI->getOperand(1).setReg(NewReg); 2339 UseMI->getOperand(1).setIsKill(); 2340 UseMI->getOperand(2).ChangeToImmediate(SOImmValV2); 2341 DefMI->eraseFromParent(); 2342 return true; 2343 } 2344 2345 unsigned 2346 ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 2347 const MachineInstr *MI) const { 2348 if (!ItinData || ItinData->isEmpty()) 2349 return 1; 2350 2351 const MCInstrDesc &Desc = MI->getDesc(); 2352 unsigned Class = Desc.getSchedClass(); 2353 int ItinUOps = ItinData->getNumMicroOps(Class); 2354 if (ItinUOps >= 0) 2355 return ItinUOps; 2356 2357 unsigned Opc = MI->getOpcode(); 2358 switch (Opc) { 2359 default: 2360 llvm_unreachable("Unexpected multi-uops instruction!"); 2361 case ARM::VLDMQIA: 2362 case ARM::VSTMQIA: 2363 return 2; 2364 2365 // The number of uOps for load / store multiple are determined by the number 2366 // registers. 2367 // 2368 // On Cortex-A8, each pair of register loads / stores can be scheduled on the 2369 // same cycle. The scheduling for the first load / store must be done 2370 // separately by assuming the address is not 64-bit aligned. 2371 // 2372 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address 2373 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON 2374 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. 2375 case ARM::VLDMDIA: 2376 case ARM::VLDMDIA_UPD: 2377 case ARM::VLDMDDB_UPD: 2378 case ARM::VLDMSIA: 2379 case ARM::VLDMSIA_UPD: 2380 case ARM::VLDMSDB_UPD: 2381 case ARM::VSTMDIA: 2382 case ARM::VSTMDIA_UPD: 2383 case ARM::VSTMDDB_UPD: 2384 case ARM::VSTMSIA: 2385 case ARM::VSTMSIA_UPD: 2386 case ARM::VSTMSDB_UPD: { 2387 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands(); 2388 return (NumRegs / 2) + (NumRegs % 2) + 1; 2389 } 2390 2391 case ARM::LDMIA_RET: 2392 case ARM::LDMIA: 2393 case ARM::LDMDA: 2394 case ARM::LDMDB: 2395 case ARM::LDMIB: 2396 case ARM::LDMIA_UPD: 2397 case ARM::LDMDA_UPD: 2398 case ARM::LDMDB_UPD: 2399 case ARM::LDMIB_UPD: 2400 case ARM::STMIA: 2401 case ARM::STMDA: 2402 case ARM::STMDB: 2403 case ARM::STMIB: 2404 case ARM::STMIA_UPD: 2405 case ARM::STMDA_UPD: 2406 case ARM::STMDB_UPD: 2407 case ARM::STMIB_UPD: 2408 case ARM::tLDMIA: 2409 case ARM::tLDMIA_UPD: 2410 case ARM::tSTMIA_UPD: 2411 case ARM::tPOP_RET: 2412 case ARM::tPOP: 2413 case ARM::tPUSH: 2414 case ARM::t2LDMIA_RET: 2415 case ARM::t2LDMIA: 2416 case ARM::t2LDMDB: 2417 case ARM::t2LDMIA_UPD: 2418 case ARM::t2LDMDB_UPD: 2419 case ARM::t2STMIA: 2420 case ARM::t2STMDB: 2421 case ARM::t2STMIA_UPD: 2422 case ARM::t2STMDB_UPD: { 2423 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1; 2424 if (Subtarget.isCortexA8()) { 2425 if (NumRegs < 4) 2426 return 2; 2427 // 4 registers would be issued: 2, 2. 2428 // 5 registers would be issued: 2, 2, 1. 2429 int A8UOps = (NumRegs / 2); 2430 if (NumRegs % 2) 2431 ++A8UOps; 2432 return A8UOps; 2433 } else if (Subtarget.isCortexA9()) { 2434 int A9UOps = (NumRegs / 2); 2435 // If there are odd number of registers or if it's not 64-bit aligned, 2436 // then it takes an extra AGU (Address Generation Unit) cycle. 2437 if ((NumRegs % 2) || 2438 !MI->hasOneMemOperand() || 2439 (*MI->memoperands_begin())->getAlignment() < 8) 2440 ++A9UOps; 2441 return A9UOps; 2442 } else { 2443 // Assume the worst. 2444 return NumRegs; 2445 } 2446 } 2447 } 2448 } 2449 2450 int 2451 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, 2452 const MCInstrDesc &DefMCID, 2453 unsigned DefClass, 2454 unsigned DefIdx, unsigned DefAlign) const { 2455 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 2456 if (RegNo <= 0) 2457 // Def is the address writeback. 2458 return ItinData->getOperandCycle(DefClass, DefIdx); 2459 2460 int DefCycle; 2461 if (Subtarget.isCortexA8()) { 2462 // (regno / 2) + (regno % 2) + 1 2463 DefCycle = RegNo / 2 + 1; 2464 if (RegNo % 2) 2465 ++DefCycle; 2466 } else if (Subtarget.isCortexA9()) { 2467 DefCycle = RegNo; 2468 bool isSLoad = false; 2469 2470 switch (DefMCID.getOpcode()) { 2471 default: break; 2472 case ARM::VLDMSIA: 2473 case ARM::VLDMSIA_UPD: 2474 case ARM::VLDMSDB_UPD: 2475 isSLoad = true; 2476 break; 2477 } 2478 2479 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 2480 // then it takes an extra cycle. 2481 if ((isSLoad && (RegNo % 2)) || DefAlign < 8) 2482 ++DefCycle; 2483 } else { 2484 // Assume the worst. 2485 DefCycle = RegNo + 2; 2486 } 2487 2488 return DefCycle; 2489 } 2490 2491 int 2492 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, 2493 const MCInstrDesc &DefMCID, 2494 unsigned DefClass, 2495 unsigned DefIdx, unsigned DefAlign) const { 2496 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; 2497 if (RegNo <= 0) 2498 // Def is the address writeback. 2499 return ItinData->getOperandCycle(DefClass, DefIdx); 2500 2501 int DefCycle; 2502 if (Subtarget.isCortexA8()) { 2503 // 4 registers would be issued: 1, 2, 1. 2504 // 5 registers would be issued: 1, 2, 2. 2505 DefCycle = RegNo / 2; 2506 if (DefCycle < 1) 2507 DefCycle = 1; 2508 // Result latency is issue cycle + 2: E2. 2509 DefCycle += 2; 2510 } else if (Subtarget.isCortexA9()) { 2511 DefCycle = (RegNo / 2); 2512 // If there are odd number of registers or if it's not 64-bit aligned, 2513 // then it takes an extra AGU (Address Generation Unit) cycle. 2514 if ((RegNo % 2) || DefAlign < 8) 2515 ++DefCycle; 2516 // Result latency is AGU cycles + 2. 2517 DefCycle += 2; 2518 } else { 2519 // Assume the worst. 2520 DefCycle = RegNo + 2; 2521 } 2522 2523 return DefCycle; 2524 } 2525 2526 int 2527 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, 2528 const MCInstrDesc &UseMCID, 2529 unsigned UseClass, 2530 unsigned UseIdx, unsigned UseAlign) const { 2531 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 2532 if (RegNo <= 0) 2533 return ItinData->getOperandCycle(UseClass, UseIdx); 2534 2535 int UseCycle; 2536 if (Subtarget.isCortexA8()) { 2537 // (regno / 2) + (regno % 2) + 1 2538 UseCycle = RegNo / 2 + 1; 2539 if (RegNo % 2) 2540 ++UseCycle; 2541 } else if (Subtarget.isCortexA9()) { 2542 UseCycle = RegNo; 2543 bool isSStore = false; 2544 2545 switch (UseMCID.getOpcode()) { 2546 default: break; 2547 case ARM::VSTMSIA: 2548 case ARM::VSTMSIA_UPD: 2549 case ARM::VSTMSDB_UPD: 2550 isSStore = true; 2551 break; 2552 } 2553 2554 // If there are odd number of 'S' registers or if it's not 64-bit aligned, 2555 // then it takes an extra cycle. 2556 if ((isSStore && (RegNo % 2)) || UseAlign < 8) 2557 ++UseCycle; 2558 } else { 2559 // Assume the worst. 2560 UseCycle = RegNo + 2; 2561 } 2562 2563 return UseCycle; 2564 } 2565 2566 int 2567 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, 2568 const MCInstrDesc &UseMCID, 2569 unsigned UseClass, 2570 unsigned UseIdx, unsigned UseAlign) const { 2571 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; 2572 if (RegNo <= 0) 2573 return ItinData->getOperandCycle(UseClass, UseIdx); 2574 2575 int UseCycle; 2576 if (Subtarget.isCortexA8()) { 2577 UseCycle = RegNo / 2; 2578 if (UseCycle < 2) 2579 UseCycle = 2; 2580 // Read in E3. 2581 UseCycle += 2; 2582 } else if (Subtarget.isCortexA9()) { 2583 UseCycle = (RegNo / 2); 2584 // If there are odd number of registers or if it's not 64-bit aligned, 2585 // then it takes an extra AGU (Address Generation Unit) cycle. 2586 if ((RegNo % 2) || UseAlign < 8) 2587 ++UseCycle; 2588 } else { 2589 // Assume the worst. 2590 UseCycle = 1; 2591 } 2592 return UseCycle; 2593 } 2594 2595 int 2596 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2597 const MCInstrDesc &DefMCID, 2598 unsigned DefIdx, unsigned DefAlign, 2599 const MCInstrDesc &UseMCID, 2600 unsigned UseIdx, unsigned UseAlign) const { 2601 unsigned DefClass = DefMCID.getSchedClass(); 2602 unsigned UseClass = UseMCID.getSchedClass(); 2603 2604 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) 2605 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 2606 2607 // This may be a def / use of a variable_ops instruction, the operand 2608 // latency might be determinable dynamically. Let the target try to 2609 // figure it out. 2610 int DefCycle = -1; 2611 bool LdmBypass = false; 2612 switch (DefMCID.getOpcode()) { 2613 default: 2614 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 2615 break; 2616 2617 case ARM::VLDMDIA: 2618 case ARM::VLDMDIA_UPD: 2619 case ARM::VLDMDDB_UPD: 2620 case ARM::VLDMSIA: 2621 case ARM::VLDMSIA_UPD: 2622 case ARM::VLDMSDB_UPD: 2623 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 2624 break; 2625 2626 case ARM::LDMIA_RET: 2627 case ARM::LDMIA: 2628 case ARM::LDMDA: 2629 case ARM::LDMDB: 2630 case ARM::LDMIB: 2631 case ARM::LDMIA_UPD: 2632 case ARM::LDMDA_UPD: 2633 case ARM::LDMDB_UPD: 2634 case ARM::LDMIB_UPD: 2635 case ARM::tLDMIA: 2636 case ARM::tLDMIA_UPD: 2637 case ARM::tPUSH: 2638 case ARM::t2LDMIA_RET: 2639 case ARM::t2LDMIA: 2640 case ARM::t2LDMDB: 2641 case ARM::t2LDMIA_UPD: 2642 case ARM::t2LDMDB_UPD: 2643 LdmBypass = 1; 2644 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); 2645 break; 2646 } 2647 2648 if (DefCycle == -1) 2649 // We can't seem to determine the result latency of the def, assume it's 2. 2650 DefCycle = 2; 2651 2652 int UseCycle = -1; 2653 switch (UseMCID.getOpcode()) { 2654 default: 2655 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); 2656 break; 2657 2658 case ARM::VSTMDIA: 2659 case ARM::VSTMDIA_UPD: 2660 case ARM::VSTMDDB_UPD: 2661 case ARM::VSTMSIA: 2662 case ARM::VSTMSIA_UPD: 2663 case ARM::VSTMSDB_UPD: 2664 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 2665 break; 2666 2667 case ARM::STMIA: 2668 case ARM::STMDA: 2669 case ARM::STMDB: 2670 case ARM::STMIB: 2671 case ARM::STMIA_UPD: 2672 case ARM::STMDA_UPD: 2673 case ARM::STMDB_UPD: 2674 case ARM::STMIB_UPD: 2675 case ARM::tSTMIA_UPD: 2676 case ARM::tPOP_RET: 2677 case ARM::tPOP: 2678 case ARM::t2STMIA: 2679 case ARM::t2STMDB: 2680 case ARM::t2STMIA_UPD: 2681 case ARM::t2STMDB_UPD: 2682 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); 2683 break; 2684 } 2685 2686 if (UseCycle == -1) 2687 // Assume it's read in the first stage. 2688 UseCycle = 1; 2689 2690 UseCycle = DefCycle - UseCycle + 1; 2691 if (UseCycle > 0) { 2692 if (LdmBypass) { 2693 // It's a variable_ops instruction so we can't use DefIdx here. Just use 2694 // first def operand. 2695 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, 2696 UseClass, UseIdx)) 2697 --UseCycle; 2698 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, 2699 UseClass, UseIdx)) { 2700 --UseCycle; 2701 } 2702 } 2703 2704 return UseCycle; 2705 } 2706 2707 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI, 2708 const MachineInstr *MI, unsigned Reg, 2709 unsigned &DefIdx, unsigned &Dist) { 2710 Dist = 0; 2711 2712 MachineBasicBlock::const_iterator I = MI; ++I; 2713 MachineBasicBlock::const_instr_iterator II = 2714 llvm::prior(I.getInstrIterator()); 2715 assert(II->isInsideBundle() && "Empty bundle?"); 2716 2717 int Idx = -1; 2718 while (II->isInsideBundle()) { 2719 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI); 2720 if (Idx != -1) 2721 break; 2722 --II; 2723 ++Dist; 2724 } 2725 2726 assert(Idx != -1 && "Cannot find bundled definition!"); 2727 DefIdx = Idx; 2728 return II; 2729 } 2730 2731 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI, 2732 const MachineInstr *MI, unsigned Reg, 2733 unsigned &UseIdx, unsigned &Dist) { 2734 Dist = 0; 2735 2736 MachineBasicBlock::const_instr_iterator II = MI; ++II; 2737 assert(II->isInsideBundle() && "Empty bundle?"); 2738 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 2739 2740 // FIXME: This doesn't properly handle multiple uses. 2741 int Idx = -1; 2742 while (II != E && II->isInsideBundle()) { 2743 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI); 2744 if (Idx != -1) 2745 break; 2746 if (II->getOpcode() != ARM::t2IT) 2747 ++Dist; 2748 ++II; 2749 } 2750 2751 if (Idx == -1) { 2752 Dist = 0; 2753 return 0; 2754 } 2755 2756 UseIdx = Idx; 2757 return II; 2758 } 2759 2760 /// Return the number of cycles to add to (or subtract from) the static 2761 /// itinerary based on the def opcode and alignment. The caller will ensure that 2762 /// adjusted latency is at least one cycle. 2763 static int adjustDefLatency(const ARMSubtarget &Subtarget, 2764 const MachineInstr *DefMI, 2765 const MCInstrDesc *DefMCID, unsigned DefAlign) { 2766 int Adjust = 0; 2767 if (Subtarget.isCortexA8() || Subtarget.isCortexA9()) { 2768 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 2769 // variants are one cycle cheaper. 2770 switch (DefMCID->getOpcode()) { 2771 default: break; 2772 case ARM::LDRrs: 2773 case ARM::LDRBrs: { 2774 unsigned ShOpVal = DefMI->getOperand(3).getImm(); 2775 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 2776 if (ShImm == 0 || 2777 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 2778 --Adjust; 2779 break; 2780 } 2781 case ARM::t2LDRs: 2782 case ARM::t2LDRBs: 2783 case ARM::t2LDRHs: 2784 case ARM::t2LDRSHs: { 2785 // Thumb2 mode: lsl only. 2786 unsigned ShAmt = DefMI->getOperand(3).getImm(); 2787 if (ShAmt == 0 || ShAmt == 2) 2788 --Adjust; 2789 break; 2790 } 2791 } 2792 } 2793 2794 if (DefAlign < 8 && Subtarget.isCortexA9()) { 2795 switch (DefMCID->getOpcode()) { 2796 default: break; 2797 case ARM::VLD1q8: 2798 case ARM::VLD1q16: 2799 case ARM::VLD1q32: 2800 case ARM::VLD1q64: 2801 case ARM::VLD1q8wb_fixed: 2802 case ARM::VLD1q16wb_fixed: 2803 case ARM::VLD1q32wb_fixed: 2804 case ARM::VLD1q64wb_fixed: 2805 case ARM::VLD1q8wb_register: 2806 case ARM::VLD1q16wb_register: 2807 case ARM::VLD1q32wb_register: 2808 case ARM::VLD1q64wb_register: 2809 case ARM::VLD2d8: 2810 case ARM::VLD2d16: 2811 case ARM::VLD2d32: 2812 case ARM::VLD2q8: 2813 case ARM::VLD2q16: 2814 case ARM::VLD2q32: 2815 case ARM::VLD2d8wb_fixed: 2816 case ARM::VLD2d16wb_fixed: 2817 case ARM::VLD2d32wb_fixed: 2818 case ARM::VLD2q8wb_fixed: 2819 case ARM::VLD2q16wb_fixed: 2820 case ARM::VLD2q32wb_fixed: 2821 case ARM::VLD2d8wb_register: 2822 case ARM::VLD2d16wb_register: 2823 case ARM::VLD2d32wb_register: 2824 case ARM::VLD2q8wb_register: 2825 case ARM::VLD2q16wb_register: 2826 case ARM::VLD2q32wb_register: 2827 case ARM::VLD3d8: 2828 case ARM::VLD3d16: 2829 case ARM::VLD3d32: 2830 case ARM::VLD1d64T: 2831 case ARM::VLD3d8_UPD: 2832 case ARM::VLD3d16_UPD: 2833 case ARM::VLD3d32_UPD: 2834 case ARM::VLD1d64Twb_fixed: 2835 case ARM::VLD1d64Twb_register: 2836 case ARM::VLD3q8_UPD: 2837 case ARM::VLD3q16_UPD: 2838 case ARM::VLD3q32_UPD: 2839 case ARM::VLD4d8: 2840 case ARM::VLD4d16: 2841 case ARM::VLD4d32: 2842 case ARM::VLD1d64Q: 2843 case ARM::VLD4d8_UPD: 2844 case ARM::VLD4d16_UPD: 2845 case ARM::VLD4d32_UPD: 2846 case ARM::VLD1d64Qwb_fixed: 2847 case ARM::VLD1d64Qwb_register: 2848 case ARM::VLD4q8_UPD: 2849 case ARM::VLD4q16_UPD: 2850 case ARM::VLD4q32_UPD: 2851 case ARM::VLD1DUPq8: 2852 case ARM::VLD1DUPq16: 2853 case ARM::VLD1DUPq32: 2854 case ARM::VLD1DUPq8wb_fixed: 2855 case ARM::VLD1DUPq16wb_fixed: 2856 case ARM::VLD1DUPq32wb_fixed: 2857 case ARM::VLD1DUPq8wb_register: 2858 case ARM::VLD1DUPq16wb_register: 2859 case ARM::VLD1DUPq32wb_register: 2860 case ARM::VLD2DUPd8: 2861 case ARM::VLD2DUPd16: 2862 case ARM::VLD2DUPd32: 2863 case ARM::VLD2DUPd8wb_fixed: 2864 case ARM::VLD2DUPd16wb_fixed: 2865 case ARM::VLD2DUPd32wb_fixed: 2866 case ARM::VLD2DUPd8wb_register: 2867 case ARM::VLD2DUPd16wb_register: 2868 case ARM::VLD2DUPd32wb_register: 2869 case ARM::VLD4DUPd8: 2870 case ARM::VLD4DUPd16: 2871 case ARM::VLD4DUPd32: 2872 case ARM::VLD4DUPd8_UPD: 2873 case ARM::VLD4DUPd16_UPD: 2874 case ARM::VLD4DUPd32_UPD: 2875 case ARM::VLD1LNd8: 2876 case ARM::VLD1LNd16: 2877 case ARM::VLD1LNd32: 2878 case ARM::VLD1LNd8_UPD: 2879 case ARM::VLD1LNd16_UPD: 2880 case ARM::VLD1LNd32_UPD: 2881 case ARM::VLD2LNd8: 2882 case ARM::VLD2LNd16: 2883 case ARM::VLD2LNd32: 2884 case ARM::VLD2LNq16: 2885 case ARM::VLD2LNq32: 2886 case ARM::VLD2LNd8_UPD: 2887 case ARM::VLD2LNd16_UPD: 2888 case ARM::VLD2LNd32_UPD: 2889 case ARM::VLD2LNq16_UPD: 2890 case ARM::VLD2LNq32_UPD: 2891 case ARM::VLD4LNd8: 2892 case ARM::VLD4LNd16: 2893 case ARM::VLD4LNd32: 2894 case ARM::VLD4LNq16: 2895 case ARM::VLD4LNq32: 2896 case ARM::VLD4LNd8_UPD: 2897 case ARM::VLD4LNd16_UPD: 2898 case ARM::VLD4LNd32_UPD: 2899 case ARM::VLD4LNq16_UPD: 2900 case ARM::VLD4LNq32_UPD: 2901 // If the address is not 64-bit aligned, the latencies of these 2902 // instructions increases by one. 2903 ++Adjust; 2904 break; 2905 } 2906 } 2907 return Adjust; 2908 } 2909 2910 2911 2912 int 2913 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 2914 const MachineInstr *DefMI, unsigned DefIdx, 2915 const MachineInstr *UseMI, 2916 unsigned UseIdx) const { 2917 // No operand latency. The caller may fall back to getInstrLatency. 2918 if (!ItinData || ItinData->isEmpty()) 2919 return -1; 2920 2921 const MachineOperand &DefMO = DefMI->getOperand(DefIdx); 2922 unsigned Reg = DefMO.getReg(); 2923 const MCInstrDesc *DefMCID = &DefMI->getDesc(); 2924 const MCInstrDesc *UseMCID = &UseMI->getDesc(); 2925 2926 unsigned DefAdj = 0; 2927 if (DefMI->isBundle()) { 2928 DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj); 2929 DefMCID = &DefMI->getDesc(); 2930 } 2931 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() || 2932 DefMI->isRegSequence() || DefMI->isImplicitDef()) { 2933 return 1; 2934 } 2935 2936 unsigned UseAdj = 0; 2937 if (UseMI->isBundle()) { 2938 unsigned NewUseIdx; 2939 const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI, 2940 Reg, NewUseIdx, UseAdj); 2941 if (!NewUseMI) 2942 return -1; 2943 2944 UseMI = NewUseMI; 2945 UseIdx = NewUseIdx; 2946 UseMCID = &UseMI->getDesc(); 2947 } 2948 2949 if (Reg == ARM::CPSR) { 2950 if (DefMI->getOpcode() == ARM::FMSTAT) { 2951 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) 2952 return Subtarget.isCortexA9() ? 1 : 20; 2953 } 2954 2955 // CPSR set and branch can be paired in the same cycle. 2956 if (UseMI->isBranch()) 2957 return 0; 2958 2959 // Otherwise it takes the instruction latency (generally one). 2960 unsigned Latency = getInstrLatency(ItinData, DefMI); 2961 2962 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to 2963 // its uses. Instructions which are otherwise scheduled between them may 2964 // incur a code size penalty (not able to use the CPSR setting 16-bit 2965 // instructions). 2966 if (Latency > 0 && Subtarget.isThumb2()) { 2967 const MachineFunction *MF = DefMI->getParent()->getParent(); 2968 if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize)) 2969 --Latency; 2970 } 2971 return Latency; 2972 } 2973 2974 if (DefMO.isImplicit() || UseMI->getOperand(UseIdx).isImplicit()) 2975 return -1; 2976 2977 unsigned DefAlign = DefMI->hasOneMemOperand() 2978 ? (*DefMI->memoperands_begin())->getAlignment() : 0; 2979 unsigned UseAlign = UseMI->hasOneMemOperand() 2980 ? (*UseMI->memoperands_begin())->getAlignment() : 0; 2981 2982 // Get the itinerary's latency if possible, and handle variable_ops. 2983 int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign, 2984 *UseMCID, UseIdx, UseAlign); 2985 // Unable to find operand latency. The caller may resort to getInstrLatency. 2986 if (Latency < 0) 2987 return Latency; 2988 2989 // Adjust for IT block position. 2990 int Adj = DefAdj + UseAdj; 2991 2992 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 2993 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign); 2994 if (Adj >= 0 || (int)Latency > -Adj) { 2995 return Latency + Adj; 2996 } 2997 // Return the itinerary latency, which may be zero but not less than zero. 2998 return Latency; 2999 } 3000 3001 int 3002 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 3003 SDNode *DefNode, unsigned DefIdx, 3004 SDNode *UseNode, unsigned UseIdx) const { 3005 if (!DefNode->isMachineOpcode()) 3006 return 1; 3007 3008 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); 3009 3010 if (isZeroCost(DefMCID.Opcode)) 3011 return 0; 3012 3013 if (!ItinData || ItinData->isEmpty()) 3014 return DefMCID.mayLoad() ? 3 : 1; 3015 3016 if (!UseNode->isMachineOpcode()) { 3017 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); 3018 if (Subtarget.isCortexA9()) 3019 return Latency <= 2 ? 1 : Latency - 1; 3020 else 3021 return Latency <= 3 ? 1 : Latency - 2; 3022 } 3023 3024 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); 3025 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode); 3026 unsigned DefAlign = !DefMN->memoperands_empty() 3027 ? (*DefMN->memoperands_begin())->getAlignment() : 0; 3028 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode); 3029 unsigned UseAlign = !UseMN->memoperands_empty() 3030 ? (*UseMN->memoperands_begin())->getAlignment() : 0; 3031 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, 3032 UseMCID, UseIdx, UseAlign); 3033 3034 if (Latency > 1 && 3035 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { 3036 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] 3037 // variants are one cycle cheaper. 3038 switch (DefMCID.getOpcode()) { 3039 default: break; 3040 case ARM::LDRrs: 3041 case ARM::LDRBrs: { 3042 unsigned ShOpVal = 3043 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 3044 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); 3045 if (ShImm == 0 || 3046 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) 3047 --Latency; 3048 break; 3049 } 3050 case ARM::t2LDRs: 3051 case ARM::t2LDRBs: 3052 case ARM::t2LDRHs: 3053 case ARM::t2LDRSHs: { 3054 // Thumb2 mode: lsl only. 3055 unsigned ShAmt = 3056 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue(); 3057 if (ShAmt == 0 || ShAmt == 2) 3058 --Latency; 3059 break; 3060 } 3061 } 3062 } 3063 3064 if (DefAlign < 8 && Subtarget.isCortexA9()) 3065 switch (DefMCID.getOpcode()) { 3066 default: break; 3067 case ARM::VLD1q8: 3068 case ARM::VLD1q16: 3069 case ARM::VLD1q32: 3070 case ARM::VLD1q64: 3071 case ARM::VLD1q8wb_register: 3072 case ARM::VLD1q16wb_register: 3073 case ARM::VLD1q32wb_register: 3074 case ARM::VLD1q64wb_register: 3075 case ARM::VLD1q8wb_fixed: 3076 case ARM::VLD1q16wb_fixed: 3077 case ARM::VLD1q32wb_fixed: 3078 case ARM::VLD1q64wb_fixed: 3079 case ARM::VLD2d8: 3080 case ARM::VLD2d16: 3081 case ARM::VLD2d32: 3082 case ARM::VLD2q8Pseudo: 3083 case ARM::VLD2q16Pseudo: 3084 case ARM::VLD2q32Pseudo: 3085 case ARM::VLD2d8wb_fixed: 3086 case ARM::VLD2d16wb_fixed: 3087 case ARM::VLD2d32wb_fixed: 3088 case ARM::VLD2q8PseudoWB_fixed: 3089 case ARM::VLD2q16PseudoWB_fixed: 3090 case ARM::VLD2q32PseudoWB_fixed: 3091 case ARM::VLD2d8wb_register: 3092 case ARM::VLD2d16wb_register: 3093 case ARM::VLD2d32wb_register: 3094 case ARM::VLD2q8PseudoWB_register: 3095 case ARM::VLD2q16PseudoWB_register: 3096 case ARM::VLD2q32PseudoWB_register: 3097 case ARM::VLD3d8Pseudo: 3098 case ARM::VLD3d16Pseudo: 3099 case ARM::VLD3d32Pseudo: 3100 case ARM::VLD1d64TPseudo: 3101 case ARM::VLD3d8Pseudo_UPD: 3102 case ARM::VLD3d16Pseudo_UPD: 3103 case ARM::VLD3d32Pseudo_UPD: 3104 case ARM::VLD3q8Pseudo_UPD: 3105 case ARM::VLD3q16Pseudo_UPD: 3106 case ARM::VLD3q32Pseudo_UPD: 3107 case ARM::VLD3q8oddPseudo: 3108 case ARM::VLD3q16oddPseudo: 3109 case ARM::VLD3q32oddPseudo: 3110 case ARM::VLD3q8oddPseudo_UPD: 3111 case ARM::VLD3q16oddPseudo_UPD: 3112 case ARM::VLD3q32oddPseudo_UPD: 3113 case ARM::VLD4d8Pseudo: 3114 case ARM::VLD4d16Pseudo: 3115 case ARM::VLD4d32Pseudo: 3116 case ARM::VLD1d64QPseudo: 3117 case ARM::VLD4d8Pseudo_UPD: 3118 case ARM::VLD4d16Pseudo_UPD: 3119 case ARM::VLD4d32Pseudo_UPD: 3120 case ARM::VLD4q8Pseudo_UPD: 3121 case ARM::VLD4q16Pseudo_UPD: 3122 case ARM::VLD4q32Pseudo_UPD: 3123 case ARM::VLD4q8oddPseudo: 3124 case ARM::VLD4q16oddPseudo: 3125 case ARM::VLD4q32oddPseudo: 3126 case ARM::VLD4q8oddPseudo_UPD: 3127 case ARM::VLD4q16oddPseudo_UPD: 3128 case ARM::VLD4q32oddPseudo_UPD: 3129 case ARM::VLD1DUPq8: 3130 case ARM::VLD1DUPq16: 3131 case ARM::VLD1DUPq32: 3132 case ARM::VLD1DUPq8wb_fixed: 3133 case ARM::VLD1DUPq16wb_fixed: 3134 case ARM::VLD1DUPq32wb_fixed: 3135 case ARM::VLD1DUPq8wb_register: 3136 case ARM::VLD1DUPq16wb_register: 3137 case ARM::VLD1DUPq32wb_register: 3138 case ARM::VLD2DUPd8: 3139 case ARM::VLD2DUPd16: 3140 case ARM::VLD2DUPd32: 3141 case ARM::VLD2DUPd8wb_fixed: 3142 case ARM::VLD2DUPd16wb_fixed: 3143 case ARM::VLD2DUPd32wb_fixed: 3144 case ARM::VLD2DUPd8wb_register: 3145 case ARM::VLD2DUPd16wb_register: 3146 case ARM::VLD2DUPd32wb_register: 3147 case ARM::VLD4DUPd8Pseudo: 3148 case ARM::VLD4DUPd16Pseudo: 3149 case ARM::VLD4DUPd32Pseudo: 3150 case ARM::VLD4DUPd8Pseudo_UPD: 3151 case ARM::VLD4DUPd16Pseudo_UPD: 3152 case ARM::VLD4DUPd32Pseudo_UPD: 3153 case ARM::VLD1LNq8Pseudo: 3154 case ARM::VLD1LNq16Pseudo: 3155 case ARM::VLD1LNq32Pseudo: 3156 case ARM::VLD1LNq8Pseudo_UPD: 3157 case ARM::VLD1LNq16Pseudo_UPD: 3158 case ARM::VLD1LNq32Pseudo_UPD: 3159 case ARM::VLD2LNd8Pseudo: 3160 case ARM::VLD2LNd16Pseudo: 3161 case ARM::VLD2LNd32Pseudo: 3162 case ARM::VLD2LNq16Pseudo: 3163 case ARM::VLD2LNq32Pseudo: 3164 case ARM::VLD2LNd8Pseudo_UPD: 3165 case ARM::VLD2LNd16Pseudo_UPD: 3166 case ARM::VLD2LNd32Pseudo_UPD: 3167 case ARM::VLD2LNq16Pseudo_UPD: 3168 case ARM::VLD2LNq32Pseudo_UPD: 3169 case ARM::VLD4LNd8Pseudo: 3170 case ARM::VLD4LNd16Pseudo: 3171 case ARM::VLD4LNd32Pseudo: 3172 case ARM::VLD4LNq16Pseudo: 3173 case ARM::VLD4LNq32Pseudo: 3174 case ARM::VLD4LNd8Pseudo_UPD: 3175 case ARM::VLD4LNd16Pseudo_UPD: 3176 case ARM::VLD4LNd32Pseudo_UPD: 3177 case ARM::VLD4LNq16Pseudo_UPD: 3178 case ARM::VLD4LNq32Pseudo_UPD: 3179 // If the address is not 64-bit aligned, the latencies of these 3180 // instructions increases by one. 3181 ++Latency; 3182 break; 3183 } 3184 3185 return Latency; 3186 } 3187 3188 unsigned 3189 ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData, 3190 const MachineInstr *DefMI, unsigned DefIdx, 3191 const MachineInstr *DepMI) const { 3192 unsigned Reg = DefMI->getOperand(DefIdx).getReg(); 3193 if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI)) 3194 return 1; 3195 3196 // If the second MI is predicated, then there is an implicit use dependency. 3197 return getInstrLatency(ItinData, DefMI); 3198 } 3199 3200 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 3201 const MachineInstr *MI, 3202 unsigned *PredCost) const { 3203 if (MI->isCopyLike() || MI->isInsertSubreg() || 3204 MI->isRegSequence() || MI->isImplicitDef()) 3205 return 1; 3206 3207 // An instruction scheduler typically runs on unbundled instructions, however 3208 // other passes may query the latency of a bundled instruction. 3209 if (MI->isBundle()) { 3210 unsigned Latency = 0; 3211 MachineBasicBlock::const_instr_iterator I = MI; 3212 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); 3213 while (++I != E && I->isInsideBundle()) { 3214 if (I->getOpcode() != ARM::t2IT) 3215 Latency += getInstrLatency(ItinData, I, PredCost); 3216 } 3217 return Latency; 3218 } 3219 3220 const MCInstrDesc &MCID = MI->getDesc(); 3221 if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) { 3222 // When predicated, CPSR is an additional source operand for CPSR updating 3223 // instructions, this apparently increases their latencies. 3224 *PredCost = 1; 3225 } 3226 // Be sure to call getStageLatency for an empty itinerary in case it has a 3227 // valid MinLatency property. 3228 if (!ItinData) 3229 return MI->mayLoad() ? 3 : 1; 3230 3231 unsigned Class = MCID.getSchedClass(); 3232 3233 // For instructions with variable uops, use uops as latency. 3234 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0) 3235 return getNumMicroOps(ItinData, MI); 3236 3237 // For the common case, fall back on the itinerary's latency. 3238 unsigned Latency = ItinData->getStageLatency(Class); 3239 3240 // Adjust for dynamic def-side opcode variants not captured by the itinerary. 3241 unsigned DefAlign = MI->hasOneMemOperand() 3242 ? (*MI->memoperands_begin())->getAlignment() : 0; 3243 int Adj = adjustDefLatency(Subtarget, MI, &MCID, DefAlign); 3244 if (Adj >= 0 || (int)Latency > -Adj) { 3245 return Latency + Adj; 3246 } 3247 return Latency; 3248 } 3249 3250 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 3251 SDNode *Node) const { 3252 if (!Node->isMachineOpcode()) 3253 return 1; 3254 3255 if (!ItinData || ItinData->isEmpty()) 3256 return 1; 3257 3258 unsigned Opcode = Node->getMachineOpcode(); 3259 switch (Opcode) { 3260 default: 3261 return ItinData->getStageLatency(get(Opcode).getSchedClass()); 3262 case ARM::VLDMQIA: 3263 case ARM::VSTMQIA: 3264 return 2; 3265 } 3266 } 3267 3268 bool ARMBaseInstrInfo:: 3269 hasHighOperandLatency(const InstrItineraryData *ItinData, 3270 const MachineRegisterInfo *MRI, 3271 const MachineInstr *DefMI, unsigned DefIdx, 3272 const MachineInstr *UseMI, unsigned UseIdx) const { 3273 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 3274 unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask; 3275 if (Subtarget.isCortexA8() && 3276 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP)) 3277 // CortexA8 VFP instructions are not pipelined. 3278 return true; 3279 3280 // Hoist VFP / NEON instructions with 4 or higher latency. 3281 int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx, 3282 /*FindMin=*/false); 3283 if (Latency < 0) 3284 Latency = getInstrLatency(ItinData, DefMI); 3285 if (Latency <= 3) 3286 return false; 3287 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON || 3288 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON; 3289 } 3290 3291 bool ARMBaseInstrInfo:: 3292 hasLowDefLatency(const InstrItineraryData *ItinData, 3293 const MachineInstr *DefMI, unsigned DefIdx) const { 3294 if (!ItinData || ItinData->isEmpty()) 3295 return false; 3296 3297 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask; 3298 if (DDomain == ARMII::DomainGeneral) { 3299 unsigned DefClass = DefMI->getDesc().getSchedClass(); 3300 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 3301 return (DefCycle != -1 && DefCycle <= 2); 3302 } 3303 return false; 3304 } 3305 3306 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI, 3307 StringRef &ErrInfo) const { 3308 if (convertAddSubFlagsOpcode(MI->getOpcode())) { 3309 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; 3310 return false; 3311 } 3312 return true; 3313 } 3314 3315 bool 3316 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 3317 unsigned &AddSubOpc, 3318 bool &NegAcc, bool &HasLane) const { 3319 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode); 3320 if (I == MLxEntryMap.end()) 3321 return false; 3322 3323 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; 3324 MulOpc = Entry.MulOpc; 3325 AddSubOpc = Entry.AddSubOpc; 3326 NegAcc = Entry.NegAcc; 3327 HasLane = Entry.HasLane; 3328 return true; 3329 } 3330 3331 //===----------------------------------------------------------------------===// 3332 // Execution domains. 3333 //===----------------------------------------------------------------------===// 3334 // 3335 // Some instructions go down the NEON pipeline, some go down the VFP pipeline, 3336 // and some can go down both. The vmov instructions go down the VFP pipeline, 3337 // but they can be changed to vorr equivalents that are executed by the NEON 3338 // pipeline. 3339 // 3340 // We use the following execution domain numbering: 3341 // 3342 enum ARMExeDomain { 3343 ExeGeneric = 0, 3344 ExeVFP = 1, 3345 ExeNEON = 2 3346 }; 3347 // 3348 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h 3349 // 3350 std::pair<uint16_t, uint16_t> 3351 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { 3352 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON 3353 // if they are not predicated. 3354 if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) 3355 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); 3356 3357 // Cortex-A9 is particularly picky about mixing the two and wants these 3358 // converted. 3359 if (Subtarget.isCortexA9() && !isPredicated(MI) && 3360 (MI->getOpcode() == ARM::VMOVRS || 3361 MI->getOpcode() == ARM::VMOVSR || 3362 MI->getOpcode() == ARM::VMOVS)) 3363 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON)); 3364 3365 // No other instructions can be swizzled, so just determine their domain. 3366 unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask; 3367 3368 if (Domain & ARMII::DomainNEON) 3369 return std::make_pair(ExeNEON, 0); 3370 3371 // Certain instructions can go either way on Cortex-A8. 3372 // Treat them as NEON instructions. 3373 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) 3374 return std::make_pair(ExeNEON, 0); 3375 3376 if (Domain & ARMII::DomainVFP) 3377 return std::make_pair(ExeVFP, 0); 3378 3379 return std::make_pair(ExeGeneric, 0); 3380 } 3381 3382 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, 3383 unsigned SReg, unsigned &Lane) { 3384 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); 3385 Lane = 0; 3386 3387 if (DReg != ARM::NoRegister) 3388 return DReg; 3389 3390 Lane = 1; 3391 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); 3392 3393 assert(DReg && "S-register with no D super-register?"); 3394 return DReg; 3395 } 3396 3397 3398 void 3399 ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { 3400 unsigned DstReg, SrcReg, DReg; 3401 unsigned Lane; 3402 MachineInstrBuilder MIB(MI); 3403 const TargetRegisterInfo *TRI = &getRegisterInfo(); 3404 switch (MI->getOpcode()) { 3405 default: 3406 llvm_unreachable("cannot handle opcode!"); 3407 break; 3408 case ARM::VMOVD: 3409 if (Domain != ExeNEON) 3410 break; 3411 3412 // Zap the predicate operands. 3413 assert(!isPredicated(MI) && "Cannot predicate a VORRd"); 3414 3415 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) 3416 DstReg = MI->getOperand(0).getReg(); 3417 SrcReg = MI->getOperand(1).getReg(); 3418 3419 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 3420 MI->RemoveOperand(i-1); 3421 3422 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) 3423 MI->setDesc(get(ARM::VORRd)); 3424 AddDefaultPred(MIB.addReg(DstReg, RegState::Define) 3425 .addReg(SrcReg) 3426 .addReg(SrcReg)); 3427 break; 3428 case ARM::VMOVRS: 3429 if (Domain != ExeNEON) 3430 break; 3431 assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); 3432 3433 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) 3434 DstReg = MI->getOperand(0).getReg(); 3435 SrcReg = MI->getOperand(1).getReg(); 3436 3437 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 3438 MI->RemoveOperand(i-1); 3439 3440 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 3441 3442 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) 3443 // Note that DSrc has been widened and the other lane may be undef, which 3444 // contaminates the entire register. 3445 MI->setDesc(get(ARM::VGETLNi32)); 3446 AddDefaultPred(MIB.addReg(DstReg, RegState::Define) 3447 .addReg(DReg, RegState::Undef) 3448 .addImm(Lane)); 3449 3450 // The old source should be an implicit use, otherwise we might think it 3451 // was dead before here. 3452 MIB.addReg(SrcReg, RegState::Implicit); 3453 break; 3454 case ARM::VMOVSR: 3455 if (Domain != ExeNEON) 3456 break; 3457 assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); 3458 3459 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) 3460 DstReg = MI->getOperand(0).getReg(); 3461 SrcReg = MI->getOperand(1).getReg(); 3462 3463 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); 3464 3465 // If we insert both a novel <def> and an <undef> on the DReg, we break 3466 // any existing dependency chain on the unused lane. Either already being 3467 // present means this instruction is in that chain anyway so we can make 3468 // the transformation. 3469 if (!MI->definesRegister(DReg, TRI) && !MI->readsRegister(DReg, TRI)) 3470 break; 3471 3472 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 3473 MI->RemoveOperand(i-1); 3474 3475 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) 3476 // Again DDst may be undefined at the beginning of this instruction. 3477 MI->setDesc(get(ARM::VSETLNi32)); 3478 MIB.addReg(DReg, RegState::Define) 3479 .addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI))) 3480 .addReg(SrcReg) 3481 .addImm(Lane); 3482 AddDefaultPred(MIB); 3483 3484 // The narrower destination must be marked as set to keep previous chains 3485 // in place. 3486 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 3487 break; 3488 case ARM::VMOVS: { 3489 if (Domain != ExeNEON) 3490 break; 3491 3492 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) 3493 DstReg = MI->getOperand(0).getReg(); 3494 SrcReg = MI->getOperand(1).getReg(); 3495 3496 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; 3497 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); 3498 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); 3499 3500 // If we insert both a novel <def> and an <undef> on the DReg, we break 3501 // any existing dependency chain on the unused lane. Either already being 3502 // present means this instruction is in that chain anyway so we can make 3503 // the transformation. 3504 if (!MI->definesRegister(DDst, TRI) && !MI->readsRegister(DDst, TRI)) 3505 break; 3506 3507 for (unsigned i = MI->getDesc().getNumOperands(); i; --i) 3508 MI->RemoveOperand(i-1); 3509 3510 if (DSrc == DDst) { 3511 // Destination can be: 3512 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) 3513 MI->setDesc(get(ARM::VDUPLN32d)); 3514 MIB.addReg(DDst, RegState::Define) 3515 .addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI))) 3516 .addImm(SrcLane); 3517 AddDefaultPred(MIB); 3518 3519 // Neither the source or the destination are naturally represented any 3520 // more, so add them in manually. 3521 MIB.addReg(DstReg, RegState::Implicit | RegState::Define); 3522 MIB.addReg(SrcReg, RegState::Implicit); 3523 break; 3524 } 3525 3526 // In general there's no single instruction that can perform an S <-> S 3527 // move in NEON space, but a pair of VEXT instructions *can* do the 3528 // job. It turns out that the VEXTs needed will only use DSrc once, with 3529 // the position based purely on the combination of lane-0 and lane-1 3530 // involved. For example 3531 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 3532 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 3533 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 3534 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 3535 // 3536 // Pattern of the MachineInstrs is: 3537 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) 3538 MachineInstrBuilder NewMIB; 3539 NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 3540 get(ARM::VEXTd32), DDst); 3541 3542 // On the first instruction, both DSrc and DDst may be <undef> if present. 3543 // Specifically when the original instruction didn't have them as an 3544 // <imp-use>. 3545 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; 3546 bool CurUndef = !MI->readsRegister(CurReg, TRI); 3547 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 3548 3549 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; 3550 CurUndef = !MI->readsRegister(CurReg, TRI); 3551 NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); 3552 3553 NewMIB.addImm(1); 3554 AddDefaultPred(NewMIB); 3555 3556 if (SrcLane == DstLane) 3557 NewMIB.addReg(SrcReg, RegState::Implicit); 3558 3559 MI->setDesc(get(ARM::VEXTd32)); 3560 MIB.addReg(DDst, RegState::Define); 3561 3562 // On the second instruction, DDst has definitely been defined above, so 3563 // it is not <undef>. DSrc, if present, can be <undef> as above. 3564 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; 3565 CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI); 3566 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 3567 3568 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; 3569 CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI); 3570 MIB.addReg(CurReg, getUndefRegState(CurUndef)); 3571 3572 MIB.addImm(1); 3573 AddDefaultPred(MIB); 3574 3575 if (SrcLane != DstLane) 3576 MIB.addReg(SrcReg, RegState::Implicit); 3577 3578 // As before, the original destination is no longer represented, add it 3579 // implicitly. 3580 MIB.addReg(DstReg, RegState::Define | RegState::Implicit); 3581 break; 3582 } 3583 } 3584 3585 } 3586 3587 bool ARMBaseInstrInfo::hasNOP() const { 3588 return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0; 3589 } 3590