1 //===-- X86VZeroUpper.cpp - AVX vzeroupper instruction inserter -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the pass which inserts x86 AVX vzeroupper instructions 11 // before calls to SSE encoded functions. This avoids transition latency 12 // penalty when tranfering control between AVX encoded instructions and old 13 // SSE encoding mode. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #define DEBUG_TYPE "x86-vzeroupper" 18 #include "X86.h" 19 #include "X86InstrInfo.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/CodeGen/MachineFunctionPass.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/raw_ostream.h" 27 #include "llvm/Target/TargetInstrInfo.h" 28 using namespace llvm; 29 30 STATISTIC(NumVZU, "Number of vzeroupper instructions inserted"); 31 32 namespace { 33 struct VZeroUpperInserter : public MachineFunctionPass { 34 static char ID; 35 VZeroUpperInserter() : MachineFunctionPass(ID) {} 36 37 virtual bool runOnMachineFunction(MachineFunction &MF); 38 39 bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB); 40 41 virtual const char *getPassName() const { return "X86 vzeroupper inserter";} 42 43 private: 44 const TargetInstrInfo *TII; // Machine instruction info. 45 46 // Any YMM register live-in to this function? 47 bool FnHasLiveInYmm; 48 49 // BBState - Contains the state of each MBB: unknown, clean, dirty 50 SmallVector<uint8_t, 8> BBState; 51 52 // BBSolved - Keep track of all MBB which had been already analyzed 53 // and there is no further processing required. 54 BitVector BBSolved; 55 56 // Machine Basic Blocks are classified according this pass: 57 // 58 // ST_UNKNOWN - The MBB state is unknown, meaning from the entry state 59 // until the MBB exit there isn't a instruction using YMM to change 60 // the state to dirty, or one of the incoming predecessors is unknown 61 // and there's not a dirty predecessor between them. 62 // 63 // ST_CLEAN - No YMM usage in the end of the MBB. A MBB could have 64 // instructions using YMM and be marked ST_CLEAN, as long as the state 65 // is cleaned by a vzeroupper before any call. 66 // 67 // ST_DIRTY - Any MBB ending with a YMM usage not cleaned up by a 68 // vzeroupper instruction. 69 // 70 // ST_INIT - Placeholder for an empty state set 71 // 72 enum { 73 ST_UNKNOWN = 0, 74 ST_CLEAN = 1, 75 ST_DIRTY = 2, 76 ST_INIT = 3 77 }; 78 79 // computeState - Given two states, compute the resulting state, in 80 // the following way 81 // 82 // 1) One dirty state yields another dirty state 83 // 2) All states must be clean for the result to be clean 84 // 3) If none above and one unknown, the result state is also unknown 85 // 86 static unsigned computeState(unsigned PrevState, unsigned CurState) { 87 if (PrevState == ST_INIT) 88 return CurState; 89 90 if (PrevState == ST_DIRTY || CurState == ST_DIRTY) 91 return ST_DIRTY; 92 93 if (PrevState == ST_CLEAN && CurState == ST_CLEAN) 94 return ST_CLEAN; 95 96 return ST_UNKNOWN; 97 } 98 99 }; 100 char VZeroUpperInserter::ID = 0; 101 } 102 103 FunctionPass *llvm::createX86IssueVZeroUpperPass() { 104 return new VZeroUpperInserter(); 105 } 106 107 static bool isYmmReg(unsigned Reg) { 108 return (Reg >= X86::YMM0 && Reg <= X86::YMM31); 109 } 110 111 static bool isZmmReg(unsigned Reg) { 112 return (Reg >= X86::ZMM0 && Reg <= X86::ZMM31); 113 } 114 115 static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) { 116 for (MachineRegisterInfo::livein_iterator I = MRI.livein_begin(), 117 E = MRI.livein_end(); I != E; ++I) 118 if (isYmmReg(I->first) || isZmmReg(I->first)) 119 return true; 120 121 return false; 122 } 123 124 static bool clobbersAllYmmRegs(const MachineOperand &MO) { 125 for (unsigned reg = X86::YMM0; reg < X86::YMM31; ++reg) { 126 if (!MO.clobbersPhysReg(reg)) 127 return false; 128 } 129 for (unsigned reg = X86::ZMM0; reg < X86::ZMM31; ++reg) { 130 if (!MO.clobbersPhysReg(reg)) 131 return false; 132 } 133 return true; 134 } 135 136 static bool hasYmmReg(MachineInstr *MI) { 137 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 138 const MachineOperand &MO = MI->getOperand(i); 139 if (MI->isCall() && MO.isRegMask() && !clobbersAllYmmRegs(MO)) 140 return true; 141 if (!MO.isReg()) 142 continue; 143 if (MO.isDebug()) 144 continue; 145 if (isYmmReg(MO.getReg())) 146 return true; 147 } 148 return false; 149 } 150 151 /// runOnMachineFunction - Loop over all of the basic blocks, inserting 152 /// vzero upper instructions before function calls. 153 bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { 154 TII = MF.getTarget().getInstrInfo(); 155 MachineRegisterInfo &MRI = MF.getRegInfo(); 156 bool EverMadeChange = false; 157 158 // Fast check: if the function doesn't use any ymm registers, we don't need 159 // to insert any VZEROUPPER instructions. This is constant-time, so it is 160 // cheap in the common case of no ymm use. 161 bool YMMUsed = false; 162 const TargetRegisterClass *RC = &X86::VR256RegClass; 163 for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end(); 164 i != e; i++) { 165 if (!MRI.reg_nodbg_empty(*i)) { 166 YMMUsed = true; 167 break; 168 } 169 } 170 if (!YMMUsed) 171 return EverMadeChange; 172 173 // Pre-compute the existence of any live-in YMM registers to this function 174 FnHasLiveInYmm = checkFnHasLiveInYmm(MRI); 175 176 assert(BBState.empty()); 177 BBState.resize(MF.getNumBlockIDs(), 0); 178 BBSolved.resize(MF.getNumBlockIDs(), 0); 179 180 // Each BB state depends on all predecessors, loop over until everything 181 // converges. (Once we converge, we can implicitly mark everything that is 182 // still ST_UNKNOWN as ST_CLEAN.) 183 while (1) { 184 bool MadeChange = false; 185 186 // Process all basic blocks. 187 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) 188 MadeChange |= processBasicBlock(MF, *I); 189 190 // If this iteration over the code changed anything, keep iterating. 191 if (!MadeChange) break; 192 EverMadeChange = true; 193 } 194 195 BBState.clear(); 196 BBSolved.clear(); 197 return EverMadeChange; 198 } 199 200 /// processBasicBlock - Loop over all of the instructions in the basic block, 201 /// inserting vzero upper instructions before function calls. 202 bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF, 203 MachineBasicBlock &BB) { 204 bool Changed = false; 205 unsigned BBNum = BB.getNumber(); 206 207 // Don't process already solved BBs 208 if (BBSolved[BBNum]) 209 return false; // No changes 210 211 // Check the state of all predecessors 212 unsigned EntryState = ST_INIT; 213 for (MachineBasicBlock::const_pred_iterator PI = BB.pred_begin(), 214 PE = BB.pred_end(); PI != PE; ++PI) { 215 EntryState = computeState(EntryState, BBState[(*PI)->getNumber()]); 216 if (EntryState == ST_DIRTY) 217 break; 218 } 219 220 221 // The entry MBB for the function may set the initial state to dirty if 222 // the function receives any YMM incoming arguments 223 if (&BB == MF.begin()) { 224 EntryState = ST_CLEAN; 225 if (FnHasLiveInYmm) 226 EntryState = ST_DIRTY; 227 } 228 229 // The current state is initialized according to the predecessors 230 unsigned CurState = EntryState; 231 bool BBHasCall = false; 232 233 for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) { 234 MachineInstr *MI = I; 235 DebugLoc dl = I->getDebugLoc(); 236 bool isControlFlow = MI->isCall() || MI->isReturn(); 237 238 // Shortcut: don't need to check regular instructions in dirty state. 239 if (!isControlFlow && CurState == ST_DIRTY) 240 continue; 241 242 if (hasYmmReg(MI)) { 243 // We found a ymm-using instruction; this could be an AVX instruction, 244 // or it could be control flow. 245 CurState = ST_DIRTY; 246 continue; 247 } 248 249 // Check for control-flow out of the current function (which might 250 // indirectly execute SSE instructions). 251 if (!isControlFlow) 252 continue; 253 254 BBHasCall = true; 255 256 // The VZEROUPPER instruction resets the upper 128 bits of all Intel AVX 257 // registers. This instruction has zero latency. In addition, the processor 258 // changes back to Clean state, after which execution of Intel SSE 259 // instructions or Intel AVX instructions has no transition penalty. Add 260 // the VZEROUPPER instruction before any function call/return that might 261 // execute SSE code. 262 // FIXME: In some cases, we may want to move the VZEROUPPER into a 263 // predecessor block. 264 if (CurState == ST_DIRTY) { 265 // Only insert the VZEROUPPER in case the entry state isn't unknown. 266 // When unknown, only compute the information within the block to have 267 // it available in the exit if possible, but don't change the block. 268 if (EntryState != ST_UNKNOWN) { 269 BuildMI(BB, I, dl, TII->get(X86::VZEROUPPER)); 270 ++NumVZU; 271 } 272 273 // After the inserted VZEROUPPER the state becomes clean again, but 274 // other YMM may appear before other subsequent calls or even before 275 // the end of the BB. 276 CurState = ST_CLEAN; 277 } 278 } 279 280 DEBUG(dbgs() << "MBB #" << BBNum 281 << ", current state: " << CurState << '\n'); 282 283 // A BB can only be considered solved when we both have done all the 284 // necessary transformations, and have computed the exit state. This happens 285 // in two cases: 286 // 1) We know the entry state: this immediately implies the exit state and 287 // all the necessary transformations. 288 // 2) There are no calls, and and a non-call instruction marks this block: 289 // no transformations are necessary, and we know the exit state. 290 if (EntryState != ST_UNKNOWN || (!BBHasCall && CurState != ST_UNKNOWN)) 291 BBSolved[BBNum] = true; 292 293 if (CurState != BBState[BBNum]) 294 Changed = true; 295 296 BBState[BBNum] = CurState; 297 return Changed; 298 } 299