1 //===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the X86 implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef X86INSTRUCTIONINFO_H 15 #define X86INSTRUCTIONINFO_H 16 17 #include "MCTargetDesc/X86BaseInfo.h" 18 #include "X86RegisterInfo.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/Target/TargetInstrInfo.h" 21 22 #define GET_INSTRINFO_HEADER 23 #include "X86GenInstrInfo.inc" 24 25 namespace llvm { 26 class X86RegisterInfo; 27 class X86Subtarget; 28 29 namespace X86 { 30 // X86 specific condition code. These correspond to X86_*_COND in 31 // X86InstrInfo.td. They must be kept in synch. 32 enum CondCode { 33 COND_A = 0, 34 COND_AE = 1, 35 COND_B = 2, 36 COND_BE = 3, 37 COND_E = 4, 38 COND_G = 5, 39 COND_GE = 6, 40 COND_L = 7, 41 COND_LE = 8, 42 COND_NE = 9, 43 COND_NO = 10, 44 COND_NP = 11, 45 COND_NS = 12, 46 COND_O = 13, 47 COND_P = 14, 48 COND_S = 15, 49 LAST_VALID_COND = COND_S, 50 51 // Artificial condition codes. These are used by AnalyzeBranch 52 // to indicate a block terminated with two conditional branches to 53 // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE, 54 // which can't be represented on x86 with a single condition. These 55 // are never used in MachineInstrs. 56 COND_NE_OR_P, 57 COND_NP_OR_E, 58 59 COND_INVALID 60 }; 61 62 // Turn condition code into conditional branch opcode. 63 unsigned GetCondBranchFromCond(CondCode CC); 64 65 /// \brief Return a set opcode for the given condition and whether it has 66 /// a memory operand. 67 unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false); 68 69 /// \brief Return a cmov opcode for the given condition, register size in 70 /// bytes, and operand type. 71 unsigned getCMovFromCond(CondCode CC, unsigned RegBytes, 72 bool HasMemoryOperand = false); 73 74 // Turn CMov opcode into condition code. 75 CondCode getCondFromCMovOpc(unsigned Opc); 76 77 /// GetOppositeBranchCondition - Return the inverse of the specified cond, 78 /// e.g. turning COND_E to COND_NE. 79 CondCode GetOppositeBranchCondition(CondCode CC); 80 } // end namespace X86; 81 82 83 /// isGlobalStubReference - Return true if the specified TargetFlag operand is 84 /// a reference to a stub for a global, not the global itself. 85 inline static bool isGlobalStubReference(unsigned char TargetFlag) { 86 switch (TargetFlag) { 87 case X86II::MO_DLLIMPORT: // dllimport stub. 88 case X86II::MO_GOTPCREL: // rip-relative GOT reference. 89 case X86II::MO_GOT: // normal GOT reference. 90 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref. 91 case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref. 92 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref. 93 return true; 94 default: 95 return false; 96 } 97 } 98 99 /// isGlobalRelativeToPICBase - Return true if the specified global value 100 /// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg). If this 101 /// is true, the addressing mode has the PIC base register added in (e.g. EBX). 102 inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) { 103 switch (TargetFlag) { 104 case X86II::MO_GOTOFF: // isPICStyleGOT: local global. 105 case X86II::MO_GOT: // isPICStyleGOT: other global. 106 case X86II::MO_PIC_BASE_OFFSET: // Darwin local global. 107 case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global. 108 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global. 109 case X86II::MO_TLVP: // ??? Pretty sure.. 110 return true; 111 default: 112 return false; 113 } 114 } 115 116 inline static bool isScale(const MachineOperand &MO) { 117 return MO.isImm() && 118 (MO.getImm() == 1 || MO.getImm() == 2 || 119 MO.getImm() == 4 || MO.getImm() == 8); 120 } 121 122 inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) { 123 if (MI->getOperand(Op).isFI()) return true; 124 return Op+X86::AddrSegmentReg <= MI->getNumOperands() && 125 MI->getOperand(Op+X86::AddrBaseReg).isReg() && 126 isScale(MI->getOperand(Op+X86::AddrScaleAmt)) && 127 MI->getOperand(Op+X86::AddrIndexReg).isReg() && 128 (MI->getOperand(Op+X86::AddrDisp).isImm() || 129 MI->getOperand(Op+X86::AddrDisp).isGlobal() || 130 MI->getOperand(Op+X86::AddrDisp).isCPI() || 131 MI->getOperand(Op+X86::AddrDisp).isJTI()); 132 } 133 134 inline static bool isMem(const MachineInstr *MI, unsigned Op) { 135 if (MI->getOperand(Op).isFI()) return true; 136 return Op+X86::AddrNumOperands <= MI->getNumOperands() && 137 MI->getOperand(Op+X86::AddrSegmentReg).isReg() && 138 isLeaMem(MI, Op); 139 } 140 141 class X86InstrInfo final : public X86GenInstrInfo { 142 X86Subtarget &Subtarget; 143 const X86RegisterInfo RI; 144 145 /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1, 146 /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps. 147 /// 148 typedef DenseMap<unsigned, 149 std::pair<unsigned, unsigned> > RegOp2MemOpTableType; 150 RegOp2MemOpTableType RegOp2MemOpTable2Addr; 151 RegOp2MemOpTableType RegOp2MemOpTable0; 152 RegOp2MemOpTableType RegOp2MemOpTable1; 153 RegOp2MemOpTableType RegOp2MemOpTable2; 154 RegOp2MemOpTableType RegOp2MemOpTable3; 155 156 /// MemOp2RegOpTable - Load / store unfolding opcode map. 157 /// 158 typedef DenseMap<unsigned, 159 std::pair<unsigned, unsigned> > MemOp2RegOpTableType; 160 MemOp2RegOpTableType MemOp2RegOpTable; 161 162 static void AddTableEntry(RegOp2MemOpTableType &R2MTable, 163 MemOp2RegOpTableType &M2RTable, 164 unsigned RegOp, unsigned MemOp, unsigned Flags); 165 166 virtual void anchor(); 167 168 public: 169 explicit X86InstrInfo(X86Subtarget &STI); 170 171 /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As 172 /// such, whenever a client has an instance of instruction info, it should 173 /// always be able to get register info as well (through this method). 174 /// 175 const X86RegisterInfo &getRegisterInfo() const { return RI; } 176 177 /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" 178 /// extension instruction. That is, it's like a copy where it's legal for the 179 /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns 180 /// true, then it's expected the pre-extension value is available as a subreg 181 /// of the result register. This also returns the sub-register index in 182 /// SubIdx. 183 bool isCoalescableExtInstr(const MachineInstr &MI, 184 unsigned &SrcReg, unsigned &DstReg, 185 unsigned &SubIdx) const override; 186 187 unsigned isLoadFromStackSlot(const MachineInstr *MI, 188 int &FrameIndex) const override; 189 /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination 190 /// stack locations as well. This uses a heuristic so it isn't 191 /// reliable for correctness. 192 unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, 193 int &FrameIndex) const override; 194 195 unsigned isStoreToStackSlot(const MachineInstr *MI, 196 int &FrameIndex) const override; 197 /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination 198 /// stack locations as well. This uses a heuristic so it isn't 199 /// reliable for correctness. 200 unsigned isStoreToStackSlotPostFE(const MachineInstr *MI, 201 int &FrameIndex) const override; 202 203 bool isReallyTriviallyReMaterializable(const MachineInstr *MI, 204 AliasAnalysis *AA) const override; 205 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, 206 unsigned DestReg, unsigned SubIdx, 207 const MachineInstr *Orig, 208 const TargetRegisterInfo &TRI) const override; 209 210 /// Given an operand within a MachineInstr, insert preceding code to put it 211 /// into the right format for a particular kind of LEA instruction. This may 212 /// involve using an appropriate super-register instead (with an implicit use 213 /// of the original) or creating a new virtual register and inserting COPY 214 /// instructions to get the data into the right class. 215 /// 216 /// Reference parameters are set to indicate how caller should add this 217 /// operand to the LEA instruction. 218 bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src, 219 unsigned LEAOpcode, bool AllowSP, 220 unsigned &NewSrc, bool &isKill, 221 bool &isUndef, MachineOperand &ImplicitOp) const; 222 223 /// convertToThreeAddress - This method must be implemented by targets that 224 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 225 /// may be able to convert a two-address instruction into a true 226 /// three-address instruction on demand. This allows the X86 target (for 227 /// example) to convert ADD and SHL instructions into LEA instructions if they 228 /// would require register copies due to two-addressness. 229 /// 230 /// This method returns a null pointer if the transformation cannot be 231 /// performed, otherwise it returns the new instruction. 232 /// 233 MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, 234 MachineBasicBlock::iterator &MBBI, 235 LiveVariables *LV) const override; 236 237 /// commuteInstruction - We have a few instructions that must be hacked on to 238 /// commute them. 239 /// 240 MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const override; 241 242 bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, 243 unsigned &SrcOpIdx2) const override; 244 245 // Branch analysis. 246 bool isUnpredicatedTerminator(const MachineInstr* MI) const override; 247 bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 248 MachineBasicBlock *&FBB, 249 SmallVectorImpl<MachineOperand> &Cond, 250 bool AllowModify) const override; 251 unsigned RemoveBranch(MachineBasicBlock &MBB) const override; 252 unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 253 MachineBasicBlock *FBB, 254 const SmallVectorImpl<MachineOperand> &Cond, 255 DebugLoc DL) const override; 256 bool canInsertSelect(const MachineBasicBlock&, 257 const SmallVectorImpl<MachineOperand> &Cond, 258 unsigned, unsigned, int&, int&, int&) const override; 259 void insertSelect(MachineBasicBlock &MBB, 260 MachineBasicBlock::iterator MI, DebugLoc DL, 261 unsigned DstReg, 262 const SmallVectorImpl<MachineOperand> &Cond, 263 unsigned TrueReg, unsigned FalseReg) const override; 264 void copyPhysReg(MachineBasicBlock &MBB, 265 MachineBasicBlock::iterator MI, DebugLoc DL, 266 unsigned DestReg, unsigned SrcReg, 267 bool KillSrc) const override; 268 void storeRegToStackSlot(MachineBasicBlock &MBB, 269 MachineBasicBlock::iterator MI, 270 unsigned SrcReg, bool isKill, int FrameIndex, 271 const TargetRegisterClass *RC, 272 const TargetRegisterInfo *TRI) const override; 273 274 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, 275 SmallVectorImpl<MachineOperand> &Addr, 276 const TargetRegisterClass *RC, 277 MachineInstr::mmo_iterator MMOBegin, 278 MachineInstr::mmo_iterator MMOEnd, 279 SmallVectorImpl<MachineInstr*> &NewMIs) const; 280 281 void loadRegFromStackSlot(MachineBasicBlock &MBB, 282 MachineBasicBlock::iterator MI, 283 unsigned DestReg, int FrameIndex, 284 const TargetRegisterClass *RC, 285 const TargetRegisterInfo *TRI) const override; 286 287 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, 288 SmallVectorImpl<MachineOperand> &Addr, 289 const TargetRegisterClass *RC, 290 MachineInstr::mmo_iterator MMOBegin, 291 MachineInstr::mmo_iterator MMOEnd, 292 SmallVectorImpl<MachineInstr*> &NewMIs) const; 293 294 bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override; 295 296 /// foldMemoryOperand - If this target supports it, fold a load or store of 297 /// the specified stack slot into the specified machine instruction for the 298 /// specified operand(s). If this is possible, the target should perform the 299 /// folding and return true, otherwise it should return false. If it folds 300 /// the instruction, it is likely that the MachineInstruction the iterator 301 /// references has been changed. 302 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, 303 MachineInstr* MI, 304 const SmallVectorImpl<unsigned> &Ops, 305 int FrameIndex) const override; 306 307 /// foldMemoryOperand - Same as the previous version except it allows folding 308 /// of any load and store from / to any address, not just from a specific 309 /// stack slot. 310 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, 311 MachineInstr* MI, 312 const SmallVectorImpl<unsigned> &Ops, 313 MachineInstr* LoadMI) const override; 314 315 /// canFoldMemoryOperand - Returns true if the specified load / store is 316 /// folding is possible. 317 bool canFoldMemoryOperand(const MachineInstr*, 318 const SmallVectorImpl<unsigned> &) const override; 319 320 /// unfoldMemoryOperand - Separate a single instruction which folded a load or 321 /// a store or a load and a store into two or more instruction. If this is 322 /// possible, returns true as well as the new instructions by reference. 323 bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, 324 unsigned Reg, bool UnfoldLoad, bool UnfoldStore, 325 SmallVectorImpl<MachineInstr*> &NewMIs) const override; 326 327 bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 328 SmallVectorImpl<SDNode*> &NewNodes) const override; 329 330 /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new 331 /// instruction after load / store are unfolded from an instruction of the 332 /// specified opcode. It returns zero if the specified unfolding is not 333 /// possible. If LoadRegIndex is non-null, it is filled in with the operand 334 /// index of the operand which will hold the register holding the loaded 335 /// value. 336 unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, 337 bool UnfoldLoad, bool UnfoldStore, 338 unsigned *LoadRegIndex = nullptr) const override; 339 340 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler 341 /// to determine if two loads are loading from the same base address. It 342 /// should only return true if the base pointers are the same and the 343 /// only differences between the two addresses are the offset. It also returns 344 /// the offsets by reference. 345 bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, 346 int64_t &Offset2) const override; 347 348 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 349 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should 350 /// be scheduled togther. On some targets if two loads are loading from 351 /// addresses in the same cache line, it's better if they are scheduled 352 /// together. This function takes two integers that represent the load offsets 353 /// from the common base address. It returns true if it decides it's desirable 354 /// to schedule the two loads together. "NumLoads" is the number of loads that 355 /// have already been scheduled after Load1. 356 bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 357 int64_t Offset1, int64_t Offset2, 358 unsigned NumLoads) const override; 359 360 bool shouldScheduleAdjacent(MachineInstr* First, 361 MachineInstr *Second) const override; 362 363 void getNoopForMachoTarget(MCInst &NopInst) const override; 364 365 bool 366 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override; 367 368 /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine 369 /// instruction that defines the specified register class. 370 bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override; 371 372 /// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction tha 373 /// would clobber the EFLAGS condition register. Note the result may be 374 /// conservative. If it cannot definitely determine the safety after visiting 375 /// a few instructions in each direction it assumes it's not safe. 376 bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, 377 MachineBasicBlock::iterator I) const; 378 379 static bool isX86_64ExtendedReg(const MachineOperand &MO) { 380 if (!MO.isReg()) return false; 381 return X86II::isX86_64ExtendedReg(MO.getReg()); 382 } 383 384 /// getGlobalBaseReg - Return a virtual register initialized with the 385 /// the global base register value. Output instructions required to 386 /// initialize the register in the function entry block, if necessary. 387 /// 388 unsigned getGlobalBaseReg(MachineFunction *MF) const; 389 390 std::pair<uint16_t, uint16_t> 391 getExecutionDomain(const MachineInstr *MI) const override; 392 393 void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override; 394 395 unsigned 396 getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, 397 const TargetRegisterInfo *TRI) const override; 398 unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum, 399 const TargetRegisterInfo *TRI) const override; 400 void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, 401 const TargetRegisterInfo *TRI) const override; 402 403 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, 404 MachineInstr* MI, 405 unsigned OpNum, 406 const SmallVectorImpl<MachineOperand> &MOs, 407 unsigned Size, unsigned Alignment) const; 408 409 void 410 getUnconditionalBranch(MCInst &Branch, 411 const MCSymbolRefExpr *BranchTarget) const override; 412 413 void getTrap(MCInst &MI) const override; 414 415 bool isHighLatencyDef(int opc) const override; 416 417 bool hasHighOperandLatency(const InstrItineraryData *ItinData, 418 const MachineRegisterInfo *MRI, 419 const MachineInstr *DefMI, unsigned DefIdx, 420 const MachineInstr *UseMI, 421 unsigned UseIdx) const override; 422 423 /// analyzeCompare - For a comparison instruction, return the source registers 424 /// in SrcReg and SrcReg2 if having two register operands, and the value it 425 /// compares against in CmpValue. Return true if the comparison instruction 426 /// can be analyzed. 427 bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, 428 unsigned &SrcReg2, int &CmpMask, 429 int &CmpValue) const override; 430 431 /// optimizeCompareInstr - Check if there exists an earlier instruction that 432 /// operates on the same source operands and sets flags in the same way as 433 /// Compare; remove Compare if possible. 434 bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, 435 unsigned SrcReg2, int CmpMask, int CmpValue, 436 const MachineRegisterInfo *MRI) const override; 437 438 /// optimizeLoadInstr - Try to remove the load by folding it to a register 439 /// operand at the use. We fold the load instructions if and only if the 440 /// def and use are in the same BB. We only look at one load and see 441 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register 442 /// defined by the load we are trying to fold. DefMI returns the machine 443 /// instruction that defines FoldAsLoadDefReg, and the function returns 444 /// the machine instruction generated due to folding. 445 MachineInstr* optimizeLoadInstr(MachineInstr *MI, 446 const MachineRegisterInfo *MRI, 447 unsigned &FoldAsLoadDefReg, 448 MachineInstr *&DefMI) const override; 449 450 private: 451 MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc, 452 MachineFunction::iterator &MFI, 453 MachineBasicBlock::iterator &MBBI, 454 LiveVariables *LV) const; 455 456 /// isFrameOperand - Return true and the FrameIndex if the specified 457 /// operand and follow operands form a reference to the stack frame. 458 bool isFrameOperand(const MachineInstr *MI, unsigned int Op, 459 int &FrameIndex) const; 460 }; 461 462 } // End llvm namespace 463 464 #endif 465