1 //====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// 11 /// Provide a pass which mitigates speculative execution attacks which operate 12 /// by speculating incorrectly past some predicate (a type check, bounds check, 13 /// or other condition) to reach a load with invalid inputs and leak the data 14 /// accessed by that load using a side channel out of the speculative domain. 15 /// 16 /// For details on the attacks, see the first variant in both the Project Zero 17 /// writeup and the Spectre paper: 18 /// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html 19 /// https://spectreattack.com/spectre.pdf 20 /// 21 //===----------------------------------------------------------------------===// 22 23 #include "X86.h" 24 #include "X86InstrBuilder.h" 25 #include "X86InstrInfo.h" 26 #include "X86Subtarget.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/Optional.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/ScopeExit.h" 32 #include "llvm/ADT/SmallPtrSet.h" 33 #include "llvm/ADT/SmallSet.h" 34 #include "llvm/ADT/SmallVector.h" 35 #include "llvm/ADT/SparseBitVector.h" 36 #include "llvm/ADT/Statistic.h" 37 #include "llvm/CodeGen/MachineBasicBlock.h" 38 #include "llvm/CodeGen/MachineConstantPool.h" 39 #include "llvm/CodeGen/MachineFunction.h" 40 #include "llvm/CodeGen/MachineFunctionPass.h" 41 #include "llvm/CodeGen/MachineInstr.h" 42 #include "llvm/CodeGen/MachineInstrBuilder.h" 43 #include "llvm/CodeGen/MachineModuleInfo.h" 44 #include "llvm/CodeGen/MachineOperand.h" 45 #include "llvm/CodeGen/MachineRegisterInfo.h" 46 #include "llvm/CodeGen/MachineSSAUpdater.h" 47 #include "llvm/CodeGen/TargetInstrInfo.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSchedule.h" 50 #include "llvm/CodeGen/TargetSubtargetInfo.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/MC/MCSchedule.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <iterator> 60 #include <utility> 61 62 using namespace llvm; 63 64 #define PASS_KEY "x86-speculative-load-hardening" 65 #define DEBUG_TYPE PASS_KEY 66 67 STATISTIC(NumCondBranchesTraced, "Number of conditional branches traced"); 68 STATISTIC(NumBranchesUntraced, "Number of branches unable to trace"); 69 STATISTIC(NumAddrRegsHardened, 70 "Number of address mode used registers hardaned"); 71 STATISTIC(NumPostLoadRegsHardened, 72 "Number of post-load register values hardened"); 73 STATISTIC(NumCallsOrJumpsHardened, 74 "Number of calls or jumps requiring extra hardening"); 75 STATISTIC(NumInstsInserted, "Number of instructions inserted"); 76 STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted"); 77 78 static cl::opt<bool> HardenEdgesWithLFENCE( 79 PASS_KEY "-lfence", 80 cl::desc( 81 "Use LFENCE along each conditional edge to harden against speculative " 82 "loads rather than conditional movs and poisoned pointers."), 83 cl::init(false), cl::Hidden); 84 85 static cl::opt<bool> EnablePostLoadHardening( 86 PASS_KEY "-post-load", 87 cl::desc("Harden the value loaded *after* it is loaded by " 88 "flushing the loaded bits to 1. This is hard to do " 89 "in general but can be done easily for GPRs."), 90 cl::init(true), cl::Hidden); 91 92 static cl::opt<bool> FenceCallAndRet( 93 PASS_KEY "-fence-call-and-ret", 94 cl::desc("Use a full speculation fence to harden both call and ret edges " 95 "rather than a lighter weight mitigation."), 96 cl::init(false), cl::Hidden); 97 98 static cl::opt<bool> HardenInterprocedurally( 99 PASS_KEY "-ip", 100 cl::desc("Harden interprocedurally by passing our state in and out of " 101 "functions in the high bits of the stack pointer."), 102 cl::init(true), cl::Hidden); 103 104 static cl::opt<bool> 105 HardenLoads(PASS_KEY "-loads", 106 cl::desc("Sanitize loads from memory. When disable, no " 107 "significant security is provided."), 108 cl::init(true), cl::Hidden); 109 110 static cl::opt<bool> HardenIndirectCallsAndJumps( 111 PASS_KEY "-indirect", 112 cl::desc("Harden indirect calls and jumps against using speculatively " 113 "stored attacker controlled addresses. This is designed to " 114 "mitigate Spectre v1.2 style attacks."), 115 cl::init(true), cl::Hidden); 116 117 namespace llvm { 118 119 void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &); 120 121 } // end namespace llvm 122 123 namespace { 124 125 class X86SpeculativeLoadHardeningPass : public MachineFunctionPass { 126 public: 127 X86SpeculativeLoadHardeningPass() : MachineFunctionPass(ID) { 128 initializeX86SpeculativeLoadHardeningPassPass( 129 *PassRegistry::getPassRegistry()); 130 } 131 132 StringRef getPassName() const override { 133 return "X86 speculative load hardening"; 134 } 135 bool runOnMachineFunction(MachineFunction &MF) override; 136 void getAnalysisUsage(AnalysisUsage &AU) const override; 137 138 /// Pass identification, replacement for typeid. 139 static char ID; 140 141 private: 142 /// The information about a block's conditional terminators needed to trace 143 /// our predicate state through the exiting edges. 144 struct BlockCondInfo { 145 MachineBasicBlock *MBB; 146 147 // We mostly have one conditional branch, and in extremely rare cases have 148 // two. Three and more are so rare as to be unimportant for compile time. 149 SmallVector<MachineInstr *, 2> CondBrs; 150 151 MachineInstr *UncondBr; 152 }; 153 154 /// Manages the predicate state traced through the program. 155 struct PredState { 156 unsigned InitialReg; 157 unsigned PoisonReg; 158 159 const TargetRegisterClass *RC; 160 MachineSSAUpdater SSA; 161 162 PredState(MachineFunction &MF, const TargetRegisterClass *RC) 163 : RC(RC), SSA(MF) {} 164 }; 165 166 const X86Subtarget *Subtarget; 167 MachineRegisterInfo *MRI; 168 const X86InstrInfo *TII; 169 const TargetRegisterInfo *TRI; 170 171 Optional<PredState> PS; 172 173 void hardenEdgesWithLFENCE(MachineFunction &MF); 174 175 SmallVector<BlockCondInfo, 16> collectBlockCondInfo(MachineFunction &MF); 176 177 SmallVector<MachineInstr *, 16> 178 tracePredStateThroughCFG(MachineFunction &MF, ArrayRef<BlockCondInfo> Infos); 179 180 void unfoldCallAndJumpLoads(MachineFunction &MF); 181 182 void tracePredStateThroughBlocksAndHarden(MachineFunction &MF); 183 184 unsigned saveEFLAGS(MachineBasicBlock &MBB, 185 MachineBasicBlock::iterator InsertPt, DebugLoc Loc); 186 void restoreEFLAGS(MachineBasicBlock &MBB, 187 MachineBasicBlock::iterator InsertPt, DebugLoc Loc, 188 unsigned OFReg); 189 190 void mergePredStateIntoSP(MachineBasicBlock &MBB, 191 MachineBasicBlock::iterator InsertPt, DebugLoc Loc, 192 unsigned PredStateReg); 193 unsigned extractPredStateFromSP(MachineBasicBlock &MBB, 194 MachineBasicBlock::iterator InsertPt, 195 DebugLoc Loc); 196 197 void 198 hardenLoadAddr(MachineInstr &MI, MachineOperand &BaseMO, 199 MachineOperand &IndexMO, 200 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg); 201 MachineInstr * 202 sinkPostLoadHardenedInst(MachineInstr &MI, 203 SmallPtrSetImpl<MachineInstr *> &HardenedInstrs); 204 bool canHardenRegister(unsigned Reg); 205 unsigned hardenValueInRegister(unsigned Reg, MachineBasicBlock &MBB, 206 MachineBasicBlock::iterator InsertPt, 207 DebugLoc Loc); 208 unsigned hardenPostLoad(MachineInstr &MI); 209 void hardenReturnInstr(MachineInstr &MI); 210 void tracePredStateThroughCall(MachineInstr &MI); 211 void hardenIndirectCallOrJumpInstr( 212 MachineInstr &MI, 213 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg); 214 }; 215 216 } // end anonymous namespace 217 218 char X86SpeculativeLoadHardeningPass::ID = 0; 219 220 void X86SpeculativeLoadHardeningPass::getAnalysisUsage( 221 AnalysisUsage &AU) const { 222 MachineFunctionPass::getAnalysisUsage(AU); 223 } 224 225 static MachineBasicBlock &splitEdge(MachineBasicBlock &MBB, 226 MachineBasicBlock &Succ, int SuccCount, 227 MachineInstr *Br, MachineInstr *&UncondBr, 228 const X86InstrInfo &TII) { 229 assert(!Succ.isEHPad() && "Shouldn't get edges to EH pads!"); 230 231 MachineFunction &MF = *MBB.getParent(); 232 233 MachineBasicBlock &NewMBB = *MF.CreateMachineBasicBlock(); 234 235 // We have to insert the new block immediately after the current one as we 236 // don't know what layout-successor relationships the successor has and we 237 // may not be able to (and generally don't want to) try to fix those up. 238 MF.insert(std::next(MachineFunction::iterator(&MBB)), &NewMBB); 239 240 // Update the branch instruction if necessary. 241 if (Br) { 242 assert(Br->getOperand(0).getMBB() == &Succ && 243 "Didn't start with the right target!"); 244 Br->getOperand(0).setMBB(&NewMBB); 245 246 // If this successor was reached through a branch rather than fallthrough, 247 // we might have *broken* fallthrough and so need to inject a new 248 // unconditional branch. 249 if (!UncondBr) { 250 MachineBasicBlock &OldLayoutSucc = 251 *std::next(MachineFunction::iterator(&NewMBB)); 252 assert(MBB.isSuccessor(&OldLayoutSucc) && 253 "Without an unconditional branch, the old layout successor should " 254 "be an actual successor!"); 255 auto BrBuilder = 256 BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc); 257 // Update the unconditional branch now that we've added one. 258 UncondBr = &*BrBuilder; 259 } 260 261 // Insert unconditional "jump Succ" instruction in the new block if 262 // necessary. 263 if (!NewMBB.isLayoutSuccessor(&Succ)) { 264 SmallVector<MachineOperand, 4> Cond; 265 TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc()); 266 } 267 } else { 268 assert(!UncondBr && 269 "Cannot have a branchless successor and an unconditional branch!"); 270 assert(NewMBB.isLayoutSuccessor(&Succ) && 271 "A non-branch successor must have been a layout successor before " 272 "and now is a layout successor of the new block."); 273 } 274 275 // If this is the only edge to the successor, we can just replace it in the 276 // CFG. Otherwise we need to add a new entry in the CFG for the new 277 // successor. 278 if (SuccCount == 1) { 279 MBB.replaceSuccessor(&Succ, &NewMBB); 280 } else { 281 MBB.splitSuccessor(&Succ, &NewMBB); 282 } 283 284 // Hook up the edge from the new basic block to the old successor in the CFG. 285 NewMBB.addSuccessor(&Succ); 286 287 // Fix PHI nodes in Succ so they refer to NewMBB instead of MBB. 288 for (MachineInstr &MI : Succ) { 289 if (!MI.isPHI()) 290 break; 291 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps; 292 OpIdx += 2) { 293 MachineOperand &OpV = MI.getOperand(OpIdx); 294 MachineOperand &OpMBB = MI.getOperand(OpIdx + 1); 295 assert(OpMBB.isMBB() && "Block operand to a PHI is not a block!"); 296 if (OpMBB.getMBB() != &MBB) 297 continue; 298 299 // If this is the last edge to the succesor, just replace MBB in the PHI 300 if (SuccCount == 1) { 301 OpMBB.setMBB(&NewMBB); 302 break; 303 } 304 305 // Otherwise, append a new pair of operands for the new incoming edge. 306 MI.addOperand(MF, OpV); 307 MI.addOperand(MF, MachineOperand::CreateMBB(&NewMBB)); 308 break; 309 } 310 } 311 312 // Inherit live-ins from the successor 313 for (auto &LI : Succ.liveins()) 314 NewMBB.addLiveIn(LI); 315 316 LLVM_DEBUG(dbgs() << " Split edge from '" << MBB.getName() << "' to '" 317 << Succ.getName() << "'.\n"); 318 return NewMBB; 319 } 320 321 /// Removing duplicate PHI operands to leave the PHI in a canonical and 322 /// predictable form. 323 /// 324 /// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR 325 /// isn't what you might expect. We may have multiple entries in PHI nodes for 326 /// a single predecessor. This makes CFG-updating extremely complex, so here we 327 /// simplify all PHI nodes to a model even simpler than the IR's model: exactly 328 /// one entry per predecessor, regardless of how many edges there are. 329 static void canonicalizePHIOperands(MachineFunction &MF) { 330 SmallPtrSet<MachineBasicBlock *, 4> Preds; 331 SmallVector<int, 4> DupIndices; 332 for (auto &MBB : MF) 333 for (auto &MI : MBB) { 334 if (!MI.isPHI()) 335 break; 336 337 // First we scan the operands of the PHI looking for duplicate entries 338 // a particular predecessor. We retain the operand index of each duplicate 339 // entry found. 340 for (int OpIdx = 1, NumOps = MI.getNumOperands(); OpIdx < NumOps; 341 OpIdx += 2) 342 if (!Preds.insert(MI.getOperand(OpIdx + 1).getMBB()).second) 343 DupIndices.push_back(OpIdx); 344 345 // Now walk the duplicate indices, removing both the block and value. Note 346 // that these are stored as a vector making this element-wise removal 347 // :w 348 // potentially quadratic. 349 // 350 // FIXME: It is really frustrating that we have to use a quadratic 351 // removal algorithm here. There should be a better way, but the use-def 352 // updates required make that impossible using the public API. 353 // 354 // Note that we have to process these backwards so that we don't 355 // invalidate other indices with each removal. 356 while (!DupIndices.empty()) { 357 int OpIdx = DupIndices.pop_back_val(); 358 // Remove both the block and value operand, again in reverse order to 359 // preserve indices. 360 MI.RemoveOperand(OpIdx + 1); 361 MI.RemoveOperand(OpIdx); 362 } 363 364 Preds.clear(); 365 } 366 } 367 368 /// Helper to scan a function for loads vulnerable to misspeculation that we 369 /// want to harden. 370 /// 371 /// We use this to avoid making changes to functions where there is nothing we 372 /// need to do to harden against misspeculation. 373 static bool hasVulnerableLoad(MachineFunction &MF) { 374 for (MachineBasicBlock &MBB : MF) { 375 for (MachineInstr &MI : MBB) { 376 // Loads within this basic block after an LFENCE are not at risk of 377 // speculatively executing with invalid predicates from prior control 378 // flow. So break out of this block but continue scanning the function. 379 if (MI.getOpcode() == X86::LFENCE) 380 break; 381 382 // Looking for loads only. 383 if (!MI.mayLoad()) 384 continue; 385 386 // An MFENCE is modeled as a load but isn't vulnerable to misspeculation. 387 if (MI.getOpcode() == X86::MFENCE) 388 continue; 389 390 // We found a load. 391 return true; 392 } 393 } 394 395 // No loads found. 396 return false; 397 } 398 399 bool X86SpeculativeLoadHardeningPass::runOnMachineFunction( 400 MachineFunction &MF) { 401 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName() 402 << " **********\n"); 403 404 Subtarget = &MF.getSubtarget<X86Subtarget>(); 405 MRI = &MF.getRegInfo(); 406 TII = Subtarget->getInstrInfo(); 407 TRI = Subtarget->getRegisterInfo(); 408 409 // FIXME: Support for 32-bit. 410 PS.emplace(MF, &X86::GR64_NOSPRegClass); 411 412 if (MF.begin() == MF.end()) 413 // Nothing to do for a degenerate empty function... 414 return false; 415 416 // We support an alternative hardening technique based on a debug flag. 417 if (HardenEdgesWithLFENCE) { 418 hardenEdgesWithLFENCE(MF); 419 return true; 420 } 421 422 // Create a dummy debug loc to use for all the generated code here. 423 DebugLoc Loc; 424 425 MachineBasicBlock &Entry = *MF.begin(); 426 auto EntryInsertPt = Entry.SkipPHIsLabelsAndDebug(Entry.begin()); 427 428 // Do a quick scan to see if we have any checkable loads. 429 bool HasVulnerableLoad = hasVulnerableLoad(MF); 430 431 // See if we have any conditional branching blocks that we will need to trace 432 // predicate state through. 433 SmallVector<BlockCondInfo, 16> Infos = collectBlockCondInfo(MF); 434 435 // If we have no interesting conditions or loads, nothing to do here. 436 if (!HasVulnerableLoad && Infos.empty()) 437 return true; 438 439 // The poison value is required to be an all-ones value for many aspects of 440 // this mitigation. 441 const int PoisonVal = -1; 442 PS->PoisonReg = MRI->createVirtualRegister(PS->RC); 443 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg) 444 .addImm(PoisonVal); 445 ++NumInstsInserted; 446 447 // If we have loads being hardened and we've asked for call and ret edges to 448 // get a full fence-based mitigation, inject that fence. 449 if (HasVulnerableLoad && FenceCallAndRet) { 450 // We need to insert an LFENCE at the start of the function to suspend any 451 // incoming misspeculation from the caller. This helps two-fold: the caller 452 // may not have been protected as this code has been, and this code gets to 453 // not take any specific action to protect across calls. 454 // FIXME: We could skip this for functions which unconditionally return 455 // a constant. 456 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE)); 457 ++NumInstsInserted; 458 ++NumLFENCEsInserted; 459 } 460 461 // If we guarded the entry with an LFENCE and have no conditionals to protect 462 // in blocks, then we're done. 463 if (FenceCallAndRet && Infos.empty()) 464 // We may have changed the function's code at this point to insert fences. 465 return true; 466 467 // For every basic block in the function which can b 468 if (HardenInterprocedurally && !FenceCallAndRet) { 469 // Set up the predicate state by extracting it from the incoming stack 470 // pointer so we pick up any misspeculation in our caller. 471 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc); 472 } else { 473 // Otherwise, just build the predicate state itself by zeroing a register 474 // as we don't need any initial state. 475 PS->InitialReg = MRI->createVirtualRegister(PS->RC); 476 unsigned PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass); 477 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0), 478 PredStateSubReg); 479 ++NumInstsInserted; 480 MachineOperand *ZeroEFLAGSDefOp = 481 ZeroI->findRegisterDefOperand(X86::EFLAGS); 482 assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() && 483 "Must have an implicit def of EFLAGS!"); 484 ZeroEFLAGSDefOp->setIsDead(true); 485 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG), 486 PS->InitialReg) 487 .addImm(0) 488 .addReg(PredStateSubReg) 489 .addImm(X86::sub_32bit); 490 } 491 492 // We're going to need to trace predicate state throughout the function's 493 // CFG. Prepare for this by setting up our initial state of PHIs with unique 494 // predecessor entries and all the initial predicate state. 495 canonicalizePHIOperands(MF); 496 497 // Track the updated values in an SSA updater to rewrite into SSA form at the 498 // end. 499 PS->SSA.Initialize(PS->InitialReg); 500 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg); 501 502 // Trace through the CFG. 503 auto CMovs = tracePredStateThroughCFG(MF, Infos); 504 505 // We may also enter basic blocks in this function via exception handling 506 // control flow. Here, if we are hardening interprocedurally, we need to 507 // re-capture the predicate state from the throwing code. In the Itanium ABI, 508 // the throw will always look like a call to __cxa_throw and will have the 509 // predicate state in the stack pointer, so extract fresh predicate state from 510 // the stack pointer and make it available in SSA. 511 // FIXME: Handle non-itanium ABI EH models. 512 if (HardenInterprocedurally) { 513 for (MachineBasicBlock &MBB : MF) { 514 assert(!MBB.isEHScopeEntry() && "Only Itanium ABI EH supported!"); 515 assert(!MBB.isEHFuncletEntry() && "Only Itanium ABI EH supported!"); 516 assert(!MBB.isCleanupFuncletEntry() && "Only Itanium ABI EH supported!"); 517 if (!MBB.isEHPad()) 518 continue; 519 PS->SSA.AddAvailableValue( 520 &MBB, 521 extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc)); 522 } 523 } 524 525 // If we are going to harden calls and jumps we need to unfold their memory 526 // operands. 527 if (HardenIndirectCallsAndJumps) 528 unfoldCallAndJumpLoads(MF); 529 530 // Now that we have the predicate state available at the start of each block 531 // in the CFG, trace it through each block, hardening vulnerable instructions 532 // as we go. 533 tracePredStateThroughBlocksAndHarden(MF); 534 535 // Now rewrite all the uses of the pred state using the SSA updater to insert 536 // PHIs connecting the state between blocks along the CFG edges. 537 for (MachineInstr *CMovI : CMovs) 538 for (MachineOperand &Op : CMovI->operands()) { 539 if (!Op.isReg() || Op.getReg() != PS->InitialReg) 540 continue; 541 542 PS->SSA.RewriteUse(Op); 543 } 544 545 LLVM_DEBUG(dbgs() << "Final speculative load hardened function:\n"; MF.dump(); 546 dbgs() << "\n"; MF.verify(this)); 547 return true; 548 } 549 550 /// Implements the naive hardening approach of putting an LFENCE after every 551 /// potentially mis-predicted control flow construct. 552 /// 553 /// We include this as an alternative mostly for the purpose of comparison. The 554 /// performance impact of this is expected to be extremely severe and not 555 /// practical for any real-world users. 556 void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE( 557 MachineFunction &MF) { 558 // First, we scan the function looking for blocks that are reached along edges 559 // that we might want to harden. 560 SmallSetVector<MachineBasicBlock *, 8> Blocks; 561 for (MachineBasicBlock &MBB : MF) { 562 // If there are no or only one successor, nothing to do here. 563 if (MBB.succ_size() <= 1) 564 continue; 565 566 // Skip blocks unless their terminators start with a branch. Other 567 // terminators don't seem interesting for guarding against misspeculation. 568 auto TermIt = MBB.getFirstTerminator(); 569 if (TermIt == MBB.end() || !TermIt->isBranch()) 570 continue; 571 572 // Add all the non-EH-pad succossors to the blocks we want to harden. We 573 // skip EH pads because there isn't really a condition of interest on 574 // entering. 575 for (MachineBasicBlock *SuccMBB : MBB.successors()) 576 if (!SuccMBB->isEHPad()) 577 Blocks.insert(SuccMBB); 578 } 579 580 for (MachineBasicBlock *MBB : Blocks) { 581 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin()); 582 BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE)); 583 ++NumInstsInserted; 584 ++NumLFENCEsInserted; 585 } 586 } 587 588 SmallVector<X86SpeculativeLoadHardeningPass::BlockCondInfo, 16> 589 X86SpeculativeLoadHardeningPass::collectBlockCondInfo(MachineFunction &MF) { 590 SmallVector<BlockCondInfo, 16> Infos; 591 592 // Walk the function and build up a summary for each block's conditions that 593 // we need to trace through. 594 for (MachineBasicBlock &MBB : MF) { 595 // If there are no or only one successor, nothing to do here. 596 if (MBB.succ_size() <= 1) 597 continue; 598 599 // We want to reliably handle any conditional branch terminators in the 600 // MBB, so we manually analyze the branch. We can handle all of the 601 // permutations here, including ones that analyze branch cannot. 602 // 603 // The approach is to walk backwards across the terminators, resetting at 604 // any unconditional non-indirect branch, and track all conditional edges 605 // to basic blocks as well as the fallthrough or unconditional successor 606 // edge. For each conditional edge, we track the target and the opposite 607 // condition code in order to inject a "no-op" cmov into that successor 608 // that will harden the predicate. For the fallthrough/unconditional 609 // edge, we inject a separate cmov for each conditional branch with 610 // matching condition codes. This effectively implements an "and" of the 611 // condition flags, even if there isn't a single condition flag that would 612 // directly implement that. We don't bother trying to optimize either of 613 // these cases because if such an optimization is possible, LLVM should 614 // have optimized the conditional *branches* in that way already to reduce 615 // instruction count. This late, we simply assume the minimal number of 616 // branch instructions is being emitted and use that to guide our cmov 617 // insertion. 618 619 BlockCondInfo Info = {&MBB, {}, nullptr}; 620 621 // Now walk backwards through the terminators and build up successors they 622 // reach and the conditions. 623 for (MachineInstr &MI : llvm::reverse(MBB)) { 624 // Once we've handled all the terminators, we're done. 625 if (!MI.isTerminator()) 626 break; 627 628 // If we see a non-branch terminator, we can't handle anything so bail. 629 if (!MI.isBranch()) { 630 Info.CondBrs.clear(); 631 break; 632 } 633 634 // If we see an unconditional branch, reset our state, clear any 635 // fallthrough, and set this is the "else" successor. 636 if (MI.getOpcode() == X86::JMP_1) { 637 Info.CondBrs.clear(); 638 Info.UncondBr = &MI; 639 continue; 640 } 641 642 // If we get an invalid condition, we have an indirect branch or some 643 // other unanalyzable "fallthrough" case. We model this as a nullptr for 644 // the destination so we can still guard any conditional successors. 645 // Consider code sequences like: 646 // ``` 647 // jCC L1 648 // jmpq *%rax 649 // ``` 650 // We still want to harden the edge to `L1`. 651 if (X86::getCondFromBranchOpc(MI.getOpcode()) == X86::COND_INVALID) { 652 Info.CondBrs.clear(); 653 Info.UncondBr = &MI; 654 continue; 655 } 656 657 // We have a vanilla conditional branch, add it to our list. 658 Info.CondBrs.push_back(&MI); 659 } 660 if (Info.CondBrs.empty()) { 661 ++NumBranchesUntraced; 662 LLVM_DEBUG(dbgs() << "WARNING: unable to secure successors of block:\n"; 663 MBB.dump()); 664 continue; 665 } 666 667 Infos.push_back(Info); 668 } 669 670 return Infos; 671 } 672 673 /// Trace the predicate state through the CFG, instrumenting each conditional 674 /// branch such that misspeculation through an edge will poison the predicate 675 /// state. 676 /// 677 /// Returns the list of inserted CMov instructions so that they can have their 678 /// uses of the predicate state rewritten into proper SSA form once it is 679 /// complete. 680 SmallVector<MachineInstr *, 16> 681 X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG( 682 MachineFunction &MF, ArrayRef<BlockCondInfo> Infos) { 683 // Collect the inserted cmov instructions so we can rewrite their uses of the 684 // predicate state into SSA form. 685 SmallVector<MachineInstr *, 16> CMovs; 686 687 // Now walk all of the basic blocks looking for ones that end in conditional 688 // jumps where we need to update this register along each edge. 689 for (const BlockCondInfo &Info : Infos) { 690 MachineBasicBlock &MBB = *Info.MBB; 691 const SmallVectorImpl<MachineInstr *> &CondBrs = Info.CondBrs; 692 MachineInstr *UncondBr = Info.UncondBr; 693 694 LLVM_DEBUG(dbgs() << "Tracing predicate through block: " << MBB.getName() 695 << "\n"); 696 ++NumCondBranchesTraced; 697 698 // Compute the non-conditional successor as either the target of any 699 // unconditional branch or the layout successor. 700 MachineBasicBlock *UncondSucc = 701 UncondBr ? (UncondBr->getOpcode() == X86::JMP_1 702 ? UncondBr->getOperand(0).getMBB() 703 : nullptr) 704 : &*std::next(MachineFunction::iterator(&MBB)); 705 706 // Count how many edges there are to any given successor. 707 SmallDenseMap<MachineBasicBlock *, int> SuccCounts; 708 if (UncondSucc) 709 ++SuccCounts[UncondSucc]; 710 for (auto *CondBr : CondBrs) 711 ++SuccCounts[CondBr->getOperand(0).getMBB()]; 712 713 // A lambda to insert cmov instructions into a block checking all of the 714 // condition codes in a sequence. 715 auto BuildCheckingBlockForSuccAndConds = 716 [&](MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount, 717 MachineInstr *Br, MachineInstr *&UncondBr, 718 ArrayRef<X86::CondCode> Conds) { 719 // First, we split the edge to insert the checking block into a safe 720 // location. 721 auto &CheckingMBB = 722 (SuccCount == 1 && Succ.pred_size() == 1) 723 ? Succ 724 : splitEdge(MBB, Succ, SuccCount, Br, UncondBr, *TII); 725 726 bool LiveEFLAGS = Succ.isLiveIn(X86::EFLAGS); 727 if (!LiveEFLAGS) 728 CheckingMBB.addLiveIn(X86::EFLAGS); 729 730 // Now insert the cmovs to implement the checks. 731 auto InsertPt = CheckingMBB.begin(); 732 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) && 733 "Should never have a PHI in the initial checking block as it " 734 "always has a single predecessor!"); 735 736 // We will wire each cmov to each other, but need to start with the 737 // incoming pred state. 738 unsigned CurStateReg = PS->InitialReg; 739 740 for (X86::CondCode Cond : Conds) { 741 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8; 742 auto CMovOp = X86::getCMovFromCond(Cond, PredStateSizeInBytes); 743 744 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC); 745 // Note that we intentionally use an empty debug location so that 746 // this picks up the preceding location. 747 auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(), 748 TII->get(CMovOp), UpdatedStateReg) 749 .addReg(CurStateReg) 750 .addReg(PS->PoisonReg); 751 // If this is the last cmov and the EFLAGS weren't originally 752 // live-in, mark them as killed. 753 if (!LiveEFLAGS && Cond == Conds.back()) 754 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true); 755 756 ++NumInstsInserted; 757 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); 758 dbgs() << "\n"); 759 760 // The first one of the cmovs will be using the top level 761 // `PredStateReg` and need to get rewritten into SSA form. 762 if (CurStateReg == PS->InitialReg) 763 CMovs.push_back(&*CMovI); 764 765 // The next cmov should start from this one's def. 766 CurStateReg = UpdatedStateReg; 767 } 768 769 // And put the last one into the available values for SSA form of our 770 // predicate state. 771 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg); 772 }; 773 774 std::vector<X86::CondCode> UncondCodeSeq; 775 for (auto *CondBr : CondBrs) { 776 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB(); 777 int &SuccCount = SuccCounts[&Succ]; 778 779 X86::CondCode Cond = X86::getCondFromBranchOpc(CondBr->getOpcode()); 780 X86::CondCode InvCond = X86::GetOppositeBranchCondition(Cond); 781 UncondCodeSeq.push_back(Cond); 782 783 BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr, 784 {InvCond}); 785 786 // Decrement the successor count now that we've split one of the edges. 787 // We need to keep the count of edges to the successor accurate in order 788 // to know above when to *replace* the successor in the CFG vs. just 789 // adding the new successor. 790 --SuccCount; 791 } 792 793 // Since we may have split edges and changed the number of successors, 794 // normalize the probabilities. This avoids doing it each time we split an 795 // edge. 796 MBB.normalizeSuccProbs(); 797 798 // Finally, we need to insert cmovs into the "fallthrough" edge. Here, we 799 // need to intersect the other condition codes. We can do this by just 800 // doing a cmov for each one. 801 if (!UncondSucc) 802 // If we have no fallthrough to protect (perhaps it is an indirect jump?) 803 // just skip this and continue. 804 continue; 805 806 assert(SuccCounts[UncondSucc] == 1 && 807 "We should never have more than one edge to the unconditional " 808 "successor at this point because every other edge must have been " 809 "split above!"); 810 811 // Sort and unique the codes to minimize them. 812 llvm::sort(UncondCodeSeq.begin(), UncondCodeSeq.end()); 813 UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()), 814 UncondCodeSeq.end()); 815 816 // Build a checking version of the successor. 817 BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, /*SuccCount*/ 1, 818 UncondBr, UncondBr, UncondCodeSeq); 819 } 820 821 return CMovs; 822 } 823 824 /// Compute the register class for the unfolded load. 825 /// 826 /// FIXME: This should probably live in X86InstrInfo, potentially by adding 827 /// a way to unfold into a newly created vreg rather than requiring a register 828 /// input. 829 static const TargetRegisterClass * 830 getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII, 831 unsigned Opcode) { 832 unsigned Index; 833 unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold( 834 Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index); 835 const MCInstrDesc &MCID = TII.get(UnfoldedOpc); 836 return TII.getRegClass(MCID, Index, &TII.getRegisterInfo(), MF); 837 } 838 839 void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads( 840 MachineFunction &MF) { 841 for (MachineBasicBlock &MBB : MF) 842 for (auto MII = MBB.instr_begin(), MIE = MBB.instr_end(); MII != MIE;) { 843 // Grab a reference and increment the iterator so we can remove this 844 // instruction if needed without disturbing the iteration. 845 MachineInstr &MI = *MII++; 846 847 // Must either be a call or a branch. 848 if (!MI.isCall() && !MI.isBranch()) 849 continue; 850 // We only care about loading variants of these instructions. 851 if (!MI.mayLoad()) 852 continue; 853 854 switch (MI.getOpcode()) { 855 default: { 856 LLVM_DEBUG( 857 dbgs() << "ERROR: Found an unexpected loading branch or call " 858 "instruction:\n"; 859 MI.dump(); dbgs() << "\n"); 860 report_fatal_error("Unexpected loading branch or call!"); 861 } 862 863 case X86::FARCALL16m: 864 case X86::FARCALL32m: 865 case X86::FARCALL64: 866 case X86::FARJMP16m: 867 case X86::FARJMP32m: 868 case X86::FARJMP64: 869 // We cannot mitigate far jumps or calls, but we also don't expect them 870 // to be vulnerable to Spectre v1.2 style attacks. 871 continue; 872 873 case X86::CALL16m: 874 case X86::CALL16m_NT: 875 case X86::CALL32m: 876 case X86::CALL32m_NT: 877 case X86::CALL64m: 878 case X86::CALL64m_NT: 879 case X86::JMP16m: 880 case X86::JMP16m_NT: 881 case X86::JMP32m: 882 case X86::JMP32m_NT: 883 case X86::JMP64m: 884 case X86::JMP64m_NT: 885 case X86::TAILJMPm64: 886 case X86::TAILJMPm64_REX: 887 case X86::TAILJMPm: 888 case X86::TCRETURNmi64: 889 case X86::TCRETURNmi: { 890 // Use the generic unfold logic now that we know we're dealing with 891 // expected instructions. 892 // FIXME: We don't have test coverage for all of these! 893 auto *UnfoldedRC = getRegClassForUnfoldedLoad(MF, *TII, MI.getOpcode()); 894 if (!UnfoldedRC) { 895 LLVM_DEBUG(dbgs() 896 << "ERROR: Unable to unfold load from instruction:\n"; 897 MI.dump(); dbgs() << "\n"); 898 report_fatal_error("Unable to unfold load!"); 899 } 900 unsigned Reg = MRI->createVirtualRegister(UnfoldedRC); 901 SmallVector<MachineInstr *, 2> NewMIs; 902 // If we were able to compute an unfolded reg class, any failure here 903 // is just a programming error so just assert. 904 bool Unfolded = 905 TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true, 906 /*UnfoldStore*/ false, NewMIs); 907 (void)Unfolded; 908 assert(Unfolded && 909 "Computed unfolded register class but failed to unfold"); 910 // Now stitch the new instructions into place and erase the old one. 911 for (auto *NewMI : NewMIs) 912 MBB.insert(MI.getIterator(), NewMI); 913 MI.eraseFromParent(); 914 LLVM_DEBUG({ 915 dbgs() << "Unfolded load successfully into:\n"; 916 for (auto *NewMI : NewMIs) { 917 NewMI->dump(); 918 dbgs() << "\n"; 919 } 920 }); 921 continue; 922 } 923 } 924 llvm_unreachable("Escaped switch with default!"); 925 } 926 } 927 928 /// Returns true if the instruction has no behavior (specified or otherwise) 929 /// that is based on the value of any of its register operands 930 /// 931 /// A classical example of something that is inherently not data invariant is an 932 /// indirect jump -- the destination is loaded into icache based on the bits set 933 /// in the jump destination register. 934 /// 935 /// FIXME: This should become part of our instruction tables. 936 static bool isDataInvariant(MachineInstr &MI) { 937 switch (MI.getOpcode()) { 938 default: 939 // By default, assume that the instruction is not data invariant. 940 return false; 941 942 // Some target-independent operations that trivially lower to data-invariant 943 // instructions. 944 case TargetOpcode::COPY: 945 case TargetOpcode::INSERT_SUBREG: 946 case TargetOpcode::SUBREG_TO_REG: 947 return true; 948 949 // On x86 it is believed that imul is constant time w.r.t. the loaded data. 950 // However, they set flags and are perhaps the most surprisingly constant 951 // time operations so we call them out here separately. 952 case X86::IMUL16rr: 953 case X86::IMUL16rri8: 954 case X86::IMUL16rri: 955 case X86::IMUL32rr: 956 case X86::IMUL32rri8: 957 case X86::IMUL32rri: 958 case X86::IMUL64rr: 959 case X86::IMUL64rri32: 960 case X86::IMUL64rri8: 961 962 // Bit scanning and counting instructions that are somewhat surprisingly 963 // constant time as they scan across bits and do other fairly complex 964 // operations like popcnt, but are believed to be constant time on x86. 965 // However, these set flags. 966 case X86::BSF16rr: 967 case X86::BSF32rr: 968 case X86::BSF64rr: 969 case X86::BSR16rr: 970 case X86::BSR32rr: 971 case X86::BSR64rr: 972 case X86::LZCNT16rr: 973 case X86::LZCNT32rr: 974 case X86::LZCNT64rr: 975 case X86::POPCNT16rr: 976 case X86::POPCNT32rr: 977 case X86::POPCNT64rr: 978 case X86::TZCNT16rr: 979 case X86::TZCNT32rr: 980 case X86::TZCNT64rr: 981 982 // Bit manipulation instructions are effectively combinations of basic 983 // arithmetic ops, and should still execute in constant time. These also 984 // set flags. 985 case X86::BLCFILL32rr: 986 case X86::BLCFILL64rr: 987 case X86::BLCI32rr: 988 case X86::BLCI64rr: 989 case X86::BLCIC32rr: 990 case X86::BLCIC64rr: 991 case X86::BLCMSK32rr: 992 case X86::BLCMSK64rr: 993 case X86::BLCS32rr: 994 case X86::BLCS64rr: 995 case X86::BLSFILL32rr: 996 case X86::BLSFILL64rr: 997 case X86::BLSI32rr: 998 case X86::BLSI64rr: 999 case X86::BLSIC32rr: 1000 case X86::BLSIC64rr: 1001 case X86::BLSMSK32rr: 1002 case X86::BLSMSK64rr: 1003 case X86::BLSR32rr: 1004 case X86::BLSR64rr: 1005 case X86::TZMSK32rr: 1006 case X86::TZMSK64rr: 1007 1008 // Bit extracting and clearing instructions should execute in constant time, 1009 // and set flags. 1010 case X86::BEXTR32rr: 1011 case X86::BEXTR64rr: 1012 case X86::BEXTRI32ri: 1013 case X86::BEXTRI64ri: 1014 case X86::BZHI32rr: 1015 case X86::BZHI64rr: 1016 1017 // Shift and rotate. 1018 case X86::ROL8r1: case X86::ROL16r1: case X86::ROL32r1: case X86::ROL64r1: 1019 case X86::ROL8rCL: case X86::ROL16rCL: case X86::ROL32rCL: case X86::ROL64rCL: 1020 case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri: 1021 case X86::ROR8r1: case X86::ROR16r1: case X86::ROR32r1: case X86::ROR64r1: 1022 case X86::ROR8rCL: case X86::ROR16rCL: case X86::ROR32rCL: case X86::ROR64rCL: 1023 case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri: 1024 case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1: case X86::SAR64r1: 1025 case X86::SAR8rCL: case X86::SAR16rCL: case X86::SAR32rCL: case X86::SAR64rCL: 1026 case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri: 1027 case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1: case X86::SHL64r1: 1028 case X86::SHL8rCL: case X86::SHL16rCL: case X86::SHL32rCL: case X86::SHL64rCL: 1029 case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri: 1030 case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1: case X86::SHR64r1: 1031 case X86::SHR8rCL: case X86::SHR16rCL: case X86::SHR32rCL: case X86::SHR64rCL: 1032 case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri: 1033 case X86::SHLD16rrCL: case X86::SHLD32rrCL: case X86::SHLD64rrCL: 1034 case X86::SHLD16rri8: case X86::SHLD32rri8: case X86::SHLD64rri8: 1035 case X86::SHRD16rrCL: case X86::SHRD32rrCL: case X86::SHRD64rrCL: 1036 case X86::SHRD16rri8: case X86::SHRD32rri8: case X86::SHRD64rri8: 1037 1038 // Basic arithmetic is constant time on the input but does set flags. 1039 case X86::ADC8rr: case X86::ADC8ri: 1040 case X86::ADC16rr: case X86::ADC16ri: case X86::ADC16ri8: 1041 case X86::ADC32rr: case X86::ADC32ri: case X86::ADC32ri8: 1042 case X86::ADC64rr: case X86::ADC64ri8: case X86::ADC64ri32: 1043 case X86::ADD8rr: case X86::ADD8ri: 1044 case X86::ADD16rr: case X86::ADD16ri: case X86::ADD16ri8: 1045 case X86::ADD32rr: case X86::ADD32ri: case X86::ADD32ri8: 1046 case X86::ADD64rr: case X86::ADD64ri8: case X86::ADD64ri32: 1047 case X86::AND8rr: case X86::AND8ri: 1048 case X86::AND16rr: case X86::AND16ri: case X86::AND16ri8: 1049 case X86::AND32rr: case X86::AND32ri: case X86::AND32ri8: 1050 case X86::AND64rr: case X86::AND64ri8: case X86::AND64ri32: 1051 case X86::OR8rr: case X86::OR8ri: 1052 case X86::OR16rr: case X86::OR16ri: case X86::OR16ri8: 1053 case X86::OR32rr: case X86::OR32ri: case X86::OR32ri8: 1054 case X86::OR64rr: case X86::OR64ri8: case X86::OR64ri32: 1055 case X86::SBB8rr: case X86::SBB8ri: 1056 case X86::SBB16rr: case X86::SBB16ri: case X86::SBB16ri8: 1057 case X86::SBB32rr: case X86::SBB32ri: case X86::SBB32ri8: 1058 case X86::SBB64rr: case X86::SBB64ri8: case X86::SBB64ri32: 1059 case X86::SUB8rr: case X86::SUB8ri: 1060 case X86::SUB16rr: case X86::SUB16ri: case X86::SUB16ri8: 1061 case X86::SUB32rr: case X86::SUB32ri: case X86::SUB32ri8: 1062 case X86::SUB64rr: case X86::SUB64ri8: case X86::SUB64ri32: 1063 case X86::XOR8rr: case X86::XOR8ri: 1064 case X86::XOR16rr: case X86::XOR16ri: case X86::XOR16ri8: 1065 case X86::XOR32rr: case X86::XOR32ri: case X86::XOR32ri8: 1066 case X86::XOR64rr: case X86::XOR64ri8: case X86::XOR64ri32: 1067 // Arithmetic with just 32-bit and 64-bit variants and no immediates. 1068 case X86::ADCX32rr: case X86::ADCX64rr: 1069 case X86::ADOX32rr: case X86::ADOX64rr: 1070 case X86::ANDN32rr: case X86::ANDN64rr: 1071 // Unary arithmetic operations. 1072 case X86::DEC8r: case X86::DEC16r: case X86::DEC32r: case X86::DEC64r: 1073 case X86::INC8r: case X86::INC16r: case X86::INC32r: case X86::INC64r: 1074 case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r: 1075 // Check whether the EFLAGS implicit-def is dead. We assume that this will 1076 // always find the implicit-def because this code should only be reached 1077 // for instructions that do in fact implicitly def this. 1078 if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) { 1079 // If we would clobber EFLAGS that are used, just bail for now. 1080 LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: "; 1081 MI.dump(); dbgs() << "\n"); 1082 return false; 1083 } 1084 1085 // Otherwise, fallthrough to handle these the same as instructions that 1086 // don't set EFLAGS. 1087 LLVM_FALLTHROUGH; 1088 1089 // Unlike other arithmetic, NOT doesn't set EFLAGS. 1090 case X86::NOT8r: case X86::NOT16r: case X86::NOT32r: case X86::NOT64r: 1091 1092 // Various move instructions used to zero or sign extend things. Note that we 1093 // intentionally don't support the _NOREX variants as we can't handle that 1094 // register constraint anyways. 1095 case X86::MOVSX16rr8: 1096 case X86::MOVSX32rr8: case X86::MOVSX32rr16: 1097 case X86::MOVSX64rr8: case X86::MOVSX64rr16: case X86::MOVSX64rr32: 1098 case X86::MOVZX16rr8: 1099 case X86::MOVZX32rr8: case X86::MOVZX32rr16: 1100 case X86::MOVZX64rr8: case X86::MOVZX64rr16: 1101 case X86::MOV32rr: 1102 1103 // Arithmetic instructions that are both constant time and don't set flags. 1104 case X86::RORX32ri: 1105 case X86::RORX64ri: 1106 case X86::SARX32rr: 1107 case X86::SARX64rr: 1108 case X86::SHLX32rr: 1109 case X86::SHLX64rr: 1110 case X86::SHRX32rr: 1111 case X86::SHRX64rr: 1112 1113 // LEA doesn't actually access memory, and its arithmetic is constant time. 1114 case X86::LEA16r: 1115 case X86::LEA32r: 1116 case X86::LEA64_32r: 1117 case X86::LEA64r: 1118 return true; 1119 } 1120 } 1121 1122 /// Returns true if the instruction has no behavior (specified or otherwise) 1123 /// that is based on the value loaded from memory or the value of any 1124 /// non-address register operands. 1125 /// 1126 /// For example, if the latency of the instruction is dependent on the 1127 /// particular bits set in any of the registers *or* any of the bits loaded from 1128 /// memory. 1129 /// 1130 /// A classical example of something that is inherently not data invariant is an 1131 /// indirect jump -- the destination is loaded into icache based on the bits set 1132 /// in the jump destination register. 1133 /// 1134 /// FIXME: This should become part of our instruction tables. 1135 static bool isDataInvariantLoad(MachineInstr &MI) { 1136 switch (MI.getOpcode()) { 1137 default: 1138 // By default, assume that the load will immediately leak. 1139 return false; 1140 1141 // On x86 it is believed that imul is constant time w.r.t. the loaded data. 1142 // However, they set flags and are perhaps the most surprisingly constant 1143 // time operations so we call them out here separately. 1144 case X86::IMUL16rm: 1145 case X86::IMUL16rmi8: 1146 case X86::IMUL16rmi: 1147 case X86::IMUL32rm: 1148 case X86::IMUL32rmi8: 1149 case X86::IMUL32rmi: 1150 case X86::IMUL64rm: 1151 case X86::IMUL64rmi32: 1152 case X86::IMUL64rmi8: 1153 1154 // Bit scanning and counting instructions that are somewhat surprisingly 1155 // constant time as they scan across bits and do other fairly complex 1156 // operations like popcnt, but are believed to be constant time on x86. 1157 // However, these set flags. 1158 case X86::BSF16rm: 1159 case X86::BSF32rm: 1160 case X86::BSF64rm: 1161 case X86::BSR16rm: 1162 case X86::BSR32rm: 1163 case X86::BSR64rm: 1164 case X86::LZCNT16rm: 1165 case X86::LZCNT32rm: 1166 case X86::LZCNT64rm: 1167 case X86::POPCNT16rm: 1168 case X86::POPCNT32rm: 1169 case X86::POPCNT64rm: 1170 case X86::TZCNT16rm: 1171 case X86::TZCNT32rm: 1172 case X86::TZCNT64rm: 1173 1174 // Bit manipulation instructions are effectively combinations of basic 1175 // arithmetic ops, and should still execute in constant time. These also 1176 // set flags. 1177 case X86::BLCFILL32rm: 1178 case X86::BLCFILL64rm: 1179 case X86::BLCI32rm: 1180 case X86::BLCI64rm: 1181 case X86::BLCIC32rm: 1182 case X86::BLCIC64rm: 1183 case X86::BLCMSK32rm: 1184 case X86::BLCMSK64rm: 1185 case X86::BLCS32rm: 1186 case X86::BLCS64rm: 1187 case X86::BLSFILL32rm: 1188 case X86::BLSFILL64rm: 1189 case X86::BLSI32rm: 1190 case X86::BLSI64rm: 1191 case X86::BLSIC32rm: 1192 case X86::BLSIC64rm: 1193 case X86::BLSMSK32rm: 1194 case X86::BLSMSK64rm: 1195 case X86::BLSR32rm: 1196 case X86::BLSR64rm: 1197 case X86::TZMSK32rm: 1198 case X86::TZMSK64rm: 1199 1200 // Bit extracting and clearing instructions should execute in constant time, 1201 // and set flags. 1202 case X86::BEXTR32rm: 1203 case X86::BEXTR64rm: 1204 case X86::BEXTRI32mi: 1205 case X86::BEXTRI64mi: 1206 case X86::BZHI32rm: 1207 case X86::BZHI64rm: 1208 1209 // Basic arithmetic is constant time on the input but does set flags. 1210 case X86::ADC8rm: 1211 case X86::ADC16rm: 1212 case X86::ADC32rm: 1213 case X86::ADC64rm: 1214 case X86::ADCX32rm: 1215 case X86::ADCX64rm: 1216 case X86::ADD8rm: 1217 case X86::ADD16rm: 1218 case X86::ADD32rm: 1219 case X86::ADD64rm: 1220 case X86::ADOX32rm: 1221 case X86::ADOX64rm: 1222 case X86::AND8rm: 1223 case X86::AND16rm: 1224 case X86::AND32rm: 1225 case X86::AND64rm: 1226 case X86::ANDN32rm: 1227 case X86::ANDN64rm: 1228 case X86::OR8rm: 1229 case X86::OR16rm: 1230 case X86::OR32rm: 1231 case X86::OR64rm: 1232 case X86::SBB8rm: 1233 case X86::SBB16rm: 1234 case X86::SBB32rm: 1235 case X86::SBB64rm: 1236 case X86::SUB8rm: 1237 case X86::SUB16rm: 1238 case X86::SUB32rm: 1239 case X86::SUB64rm: 1240 case X86::XOR8rm: 1241 case X86::XOR16rm: 1242 case X86::XOR32rm: 1243 case X86::XOR64rm: 1244 // Check whether the EFLAGS implicit-def is dead. We assume that this will 1245 // always find the implicit-def because this code should only be reached 1246 // for instructions that do in fact implicitly def this. 1247 if (!MI.findRegisterDefOperand(X86::EFLAGS)->isDead()) { 1248 // If we would clobber EFLAGS that are used, just bail for now. 1249 LLVM_DEBUG(dbgs() << " Unable to harden post-load due to EFLAGS: "; 1250 MI.dump(); dbgs() << "\n"); 1251 return false; 1252 } 1253 1254 // Otherwise, fallthrough to handle these the same as instructions that 1255 // don't set EFLAGS. 1256 LLVM_FALLTHROUGH; 1257 1258 // Integer multiply w/o affecting flags is still believed to be constant 1259 // time on x86. Called out separately as this is among the most surprising 1260 // instructions to exhibit that behavior. 1261 case X86::MULX32rm: 1262 case X86::MULX64rm: 1263 1264 // Arithmetic instructions that are both constant time and don't set flags. 1265 case X86::RORX32mi: 1266 case X86::RORX64mi: 1267 case X86::SARX32rm: 1268 case X86::SARX64rm: 1269 case X86::SHLX32rm: 1270 case X86::SHLX64rm: 1271 case X86::SHRX32rm: 1272 case X86::SHRX64rm: 1273 1274 // Conversions are believed to be constant time and don't set flags. 1275 case X86::CVTTSD2SI64rm: case X86::VCVTTSD2SI64rm: case X86::VCVTTSD2SI64Zrm: 1276 case X86::CVTTSD2SIrm: case X86::VCVTTSD2SIrm: case X86::VCVTTSD2SIZrm: 1277 case X86::CVTTSS2SI64rm: case X86::VCVTTSS2SI64rm: case X86::VCVTTSS2SI64Zrm: 1278 case X86::CVTTSS2SIrm: case X86::VCVTTSS2SIrm: case X86::VCVTTSS2SIZrm: 1279 case X86::CVTSI2SDrm: case X86::VCVTSI2SDrm: case X86::VCVTSI2SDZrm: 1280 case X86::CVTSI2SSrm: case X86::VCVTSI2SSrm: case X86::VCVTSI2SSZrm: 1281 case X86::CVTSI642SDrm: case X86::VCVTSI642SDrm: case X86::VCVTSI642SDZrm: 1282 case X86::CVTSI642SSrm: case X86::VCVTSI642SSrm: case X86::VCVTSI642SSZrm: 1283 case X86::CVTSS2SDrm: case X86::VCVTSS2SDrm: case X86::VCVTSS2SDZrm: 1284 case X86::CVTSD2SSrm: case X86::VCVTSD2SSrm: case X86::VCVTSD2SSZrm: 1285 // AVX512 added unsigned integer conversions. 1286 case X86::VCVTTSD2USI64Zrm: 1287 case X86::VCVTTSD2USIZrm: 1288 case X86::VCVTTSS2USI64Zrm: 1289 case X86::VCVTTSS2USIZrm: 1290 case X86::VCVTUSI2SDZrm: 1291 case X86::VCVTUSI642SDZrm: 1292 case X86::VCVTUSI2SSZrm: 1293 case X86::VCVTUSI642SSZrm: 1294 1295 // Loads to register don't set flags. 1296 case X86::MOV8rm: 1297 case X86::MOV8rm_NOREX: 1298 case X86::MOV16rm: 1299 case X86::MOV32rm: 1300 case X86::MOV64rm: 1301 case X86::MOVSX16rm8: 1302 case X86::MOVSX32rm16: 1303 case X86::MOVSX32rm8: 1304 case X86::MOVSX32rm8_NOREX: 1305 case X86::MOVSX64rm16: 1306 case X86::MOVSX64rm32: 1307 case X86::MOVSX64rm8: 1308 case X86::MOVZX16rm8: 1309 case X86::MOVZX32rm16: 1310 case X86::MOVZX32rm8: 1311 case X86::MOVZX32rm8_NOREX: 1312 case X86::MOVZX64rm16: 1313 case X86::MOVZX64rm8: 1314 return true; 1315 } 1316 } 1317 1318 static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, 1319 const TargetRegisterInfo &TRI) { 1320 // Check if EFLAGS are alive by seeing if there is a def of them or they 1321 // live-in, and then seeing if that def is in turn used. 1322 for (MachineInstr &MI : llvm::reverse(llvm::make_range(MBB.begin(), I))) { 1323 if (MachineOperand *DefOp = MI.findRegisterDefOperand(X86::EFLAGS)) { 1324 // If the def is dead, then EFLAGS is not live. 1325 if (DefOp->isDead()) 1326 return false; 1327 1328 // Otherwise we've def'ed it, and it is live. 1329 return true; 1330 } 1331 // While at this instruction, also check if we use and kill EFLAGS 1332 // which means it isn't live. 1333 if (MI.killsRegister(X86::EFLAGS, &TRI)) 1334 return false; 1335 } 1336 1337 // If we didn't find anything conclusive (neither definitely alive or 1338 // definitely dead) return whether it lives into the block. 1339 return MBB.isLiveIn(X86::EFLAGS); 1340 } 1341 1342 /// Trace the predicate state through each of the blocks in the function, 1343 /// hardening everything necessary along the way. 1344 /// 1345 /// We call this routine once the initial predicate state has been established 1346 /// for each basic block in the function in the SSA updater. This routine traces 1347 /// it through the instructions within each basic block, and for non-returning 1348 /// blocks informs the SSA updater about the final state that lives out of the 1349 /// block. Along the way, it hardens any vulnerable instruction using the 1350 /// currently valid predicate state. We have to do these two things together 1351 /// because the SSA updater only works across blocks. Within a block, we track 1352 /// the current predicate state directly and update it as it changes. 1353 /// 1354 /// This operates in two passes over each block. First, we analyze the loads in 1355 /// the block to determine which strategy will be used to harden them: hardening 1356 /// the address or hardening the loaded value when loaded into a register 1357 /// amenable to hardening. We have to process these first because the two 1358 /// strategies may interact -- later hardening may change what strategy we wish 1359 /// to use. We also will analyze data dependencies between loads and avoid 1360 /// hardening those loads that are data dependent on a load with a hardened 1361 /// address. We also skip hardening loads already behind an LFENCE as that is 1362 /// sufficient to harden them against misspeculation. 1363 /// 1364 /// Second, we actively trace the predicate state through the block, applying 1365 /// the hardening steps we determined necessary in the first pass as we go. 1366 /// 1367 /// These two passes are applied to each basic block. We operate one block at a 1368 /// time to simplify reasoning about reachability and sequencing. 1369 void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden( 1370 MachineFunction &MF) { 1371 SmallPtrSet<MachineInstr *, 16> HardenPostLoad; 1372 SmallPtrSet<MachineInstr *, 16> HardenLoadAddr; 1373 1374 SmallSet<unsigned, 16> HardenedAddrRegs; 1375 1376 SmallDenseMap<unsigned, unsigned, 32> AddrRegToHardenedReg; 1377 1378 // Track the set of load-dependent registers through the basic block. Because 1379 // the values of these registers have an existing data dependency on a loaded 1380 // value which we would have checked, we can omit any checks on them. 1381 SparseBitVector<> LoadDepRegs; 1382 1383 for (MachineBasicBlock &MBB : MF) { 1384 // The first pass over the block: collect all the loads which can have their 1385 // loaded value hardened and all the loads that instead need their address 1386 // hardened. During this walk we propagate load dependence for address 1387 // hardened loads and also look for LFENCE to stop hardening wherever 1388 // possible. When deciding whether or not to harden the loaded value or not, 1389 // we check to see if any registers used in the address will have been 1390 // hardened at this point and if so, harden any remaining address registers 1391 // as that often successfully re-uses hardened addresses and minimizes 1392 // instructions. 1393 // 1394 // FIXME: We should consider an aggressive mode where we continue to keep as 1395 // many loads value hardened even when some address register hardening would 1396 // be free (due to reuse). 1397 // 1398 // Note that we only need this pass if we are actually hardening loads. 1399 if (HardenLoads) 1400 for (MachineInstr &MI : MBB) { 1401 // We naively assume that all def'ed registers of an instruction have 1402 // a data dependency on all of their operands. 1403 // FIXME: Do a more careful analysis of x86 to build a conservative 1404 // model here. 1405 if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) { 1406 return Op.isReg() && LoadDepRegs.test(Op.getReg()); 1407 })) 1408 for (MachineOperand &Def : MI.defs()) 1409 if (Def.isReg()) 1410 LoadDepRegs.set(Def.getReg()); 1411 1412 // Both Intel and AMD are guiding that they will change the semantics of 1413 // LFENCE to be a speculation barrier, so if we see an LFENCE, there is 1414 // no more need to guard things in this block. 1415 if (MI.getOpcode() == X86::LFENCE) 1416 break; 1417 1418 // If this instruction cannot load, nothing to do. 1419 if (!MI.mayLoad()) 1420 continue; 1421 1422 // Some instructions which "load" are trivially safe or unimportant. 1423 if (MI.getOpcode() == X86::MFENCE) 1424 continue; 1425 1426 // Extract the memory operand information about this instruction. 1427 // FIXME: This doesn't handle loading pseudo instructions which we often 1428 // could handle with similarly generic logic. We probably need to add an 1429 // MI-layer routine similar to the MC-layer one we use here which maps 1430 // pseudos much like this maps real instructions. 1431 const MCInstrDesc &Desc = MI.getDesc(); 1432 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags); 1433 if (MemRefBeginIdx < 0) { 1434 LLVM_DEBUG(dbgs() 1435 << "WARNING: unable to harden loading instruction: "; 1436 MI.dump()); 1437 continue; 1438 } 1439 1440 MemRefBeginIdx += X86II::getOperandBias(Desc); 1441 1442 MachineOperand &BaseMO = 1443 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg); 1444 MachineOperand &IndexMO = 1445 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg); 1446 1447 // If we have at least one (non-frame-index, non-RIP) register operand, 1448 // and neither operand is load-dependent, we need to check the load. 1449 unsigned BaseReg = 0, IndexReg = 0; 1450 if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP && 1451 BaseMO.getReg() != X86::NoRegister) 1452 BaseReg = BaseMO.getReg(); 1453 if (IndexMO.getReg() != X86::NoRegister) 1454 IndexReg = IndexMO.getReg(); 1455 1456 if (!BaseReg && !IndexReg) 1457 // No register operands! 1458 continue; 1459 1460 // If any register operand is dependent, this load is dependent and we 1461 // needn't check it. 1462 // FIXME: Is this true in the case where we are hardening loads after 1463 // they complete? Unclear, need to investigate. 1464 if ((BaseReg && LoadDepRegs.test(BaseReg)) || 1465 (IndexReg && LoadDepRegs.test(IndexReg))) 1466 continue; 1467 1468 // If post-load hardening is enabled, this load is compatible with 1469 // post-load hardening, and we aren't already going to harden one of the 1470 // address registers, queue it up to be hardened post-load. Notably, 1471 // even once hardened this won't introduce a useful dependency that 1472 // could prune out subsequent loads. 1473 if (EnablePostLoadHardening && isDataInvariantLoad(MI) && 1474 MI.getDesc().getNumDefs() == 1 && MI.getOperand(0).isReg() && 1475 canHardenRegister(MI.getOperand(0).getReg()) && 1476 !HardenedAddrRegs.count(BaseReg) && 1477 !HardenedAddrRegs.count(IndexReg)) { 1478 HardenPostLoad.insert(&MI); 1479 HardenedAddrRegs.insert(MI.getOperand(0).getReg()); 1480 continue; 1481 } 1482 1483 // Record this instruction for address hardening and record its register 1484 // operands as being address-hardened. 1485 HardenLoadAddr.insert(&MI); 1486 if (BaseReg) 1487 HardenedAddrRegs.insert(BaseReg); 1488 if (IndexReg) 1489 HardenedAddrRegs.insert(IndexReg); 1490 1491 for (MachineOperand &Def : MI.defs()) 1492 if (Def.isReg()) 1493 LoadDepRegs.set(Def.getReg()); 1494 } 1495 1496 // Now re-walk the instructions in the basic block, and apply whichever 1497 // hardening strategy we have elected. Note that we do this in a second 1498 // pass specifically so that we have the complete set of instructions for 1499 // which we will do post-load hardening and can defer it in certain 1500 // circumstances. 1501 // 1502 // FIXME: This could probably be made even more effective by doing it 1503 // across the entire function. Rather than just walking the flat list 1504 // backwards here, we could walk the function in PO and each block bottom 1505 // up, allowing us to in some cases sink hardening across block blocks. As 1506 // long as the in-block predicate state is used at the eventual hardening 1507 // site, this remains safe. 1508 for (MachineInstr &MI : MBB) { 1509 if (HardenLoads) { 1510 // We cannot both require hardening the def of a load and its address. 1511 assert(!(HardenLoadAddr.count(&MI) && HardenPostLoad.count(&MI)) && 1512 "Requested to harden both the address and def of a load!"); 1513 1514 // Check if this is a load whose address needs to be hardened. 1515 if (HardenLoadAddr.erase(&MI)) { 1516 const MCInstrDesc &Desc = MI.getDesc(); 1517 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags); 1518 assert(MemRefBeginIdx >= 0 && "Cannot have an invalid index here!"); 1519 1520 MemRefBeginIdx += X86II::getOperandBias(Desc); 1521 1522 MachineOperand &BaseMO = 1523 MI.getOperand(MemRefBeginIdx + X86::AddrBaseReg); 1524 MachineOperand &IndexMO = 1525 MI.getOperand(MemRefBeginIdx + X86::AddrIndexReg); 1526 hardenLoadAddr(MI, BaseMO, IndexMO, AddrRegToHardenedReg); 1527 continue; 1528 } 1529 1530 // Test if this instruction is one of our post load instructions (and 1531 // remove it from the set if so). 1532 if (HardenPostLoad.erase(&MI)) { 1533 assert(!MI.isCall() && "Must not try to post-load harden a call!"); 1534 1535 // If this is a data-invariant load, we want to try and sink any 1536 // hardening as far as possible. 1537 if (isDataInvariantLoad(MI)) { 1538 // Sink the instruction we'll need to harden as far as we can down 1539 // the graph. 1540 MachineInstr *SunkMI = sinkPostLoadHardenedInst(MI, HardenPostLoad); 1541 1542 // If we managed to sink this instruction, update everything so we 1543 // harden that instruction when we reach it in the instruction 1544 // sequence. 1545 if (SunkMI != &MI) { 1546 // If in sinking there was no instruction needing to be hardened, 1547 // we're done. 1548 if (!SunkMI) 1549 continue; 1550 1551 // Otherwise, add this to the set of defs we harden. 1552 HardenPostLoad.insert(SunkMI); 1553 continue; 1554 } 1555 } 1556 1557 unsigned HardenedReg = hardenPostLoad(MI); 1558 1559 // Mark the resulting hardened register as such so we don't re-harden. 1560 AddrRegToHardenedReg[HardenedReg] = HardenedReg; 1561 1562 continue; 1563 } 1564 1565 // Check for an indirect call or branch that may need its input hardened 1566 // even if we couldn't find the specific load used, or were able to 1567 // avoid hardening it for some reason. Note that here we cannot break 1568 // out afterward as we may still need to handle any call aspect of this 1569 // instruction. 1570 if ((MI.isCall() || MI.isBranch()) && HardenIndirectCallsAndJumps) 1571 hardenIndirectCallOrJumpInstr(MI, AddrRegToHardenedReg); 1572 } 1573 1574 // After we finish hardening loads we handle interprocedural hardening if 1575 // enabled and relevant for this instruction. 1576 if (!HardenInterprocedurally) 1577 continue; 1578 if (!MI.isCall() && !MI.isReturn()) 1579 continue; 1580 1581 // If this is a direct return (IE, not a tail call) just directly harden 1582 // it. 1583 if (MI.isReturn() && !MI.isCall()) { 1584 hardenReturnInstr(MI); 1585 continue; 1586 } 1587 1588 // Otherwise we have a call. We need to handle transferring the predicate 1589 // state into a call and recovering it after the call returns unless this 1590 // is a tail call. 1591 assert(MI.isCall() && "Should only reach here for calls!"); 1592 tracePredStateThroughCall(MI); 1593 } 1594 1595 HardenPostLoad.clear(); 1596 HardenLoadAddr.clear(); 1597 HardenedAddrRegs.clear(); 1598 AddrRegToHardenedReg.clear(); 1599 1600 // Currently, we only track data-dependent loads within a basic block. 1601 // FIXME: We should see if this is necessary or if we could be more 1602 // aggressive here without opening up attack avenues. 1603 LoadDepRegs.clear(); 1604 } 1605 } 1606 1607 /// Save EFLAGS into the returned GPR. This can in turn be restored with 1608 /// `restoreEFLAGS`. 1609 /// 1610 /// Note that LLVM can only lower very simple patterns of saved and restored 1611 /// EFLAGS registers. The restore should always be within the same basic block 1612 /// as the save so that no PHI nodes are inserted. 1613 unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS( 1614 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, 1615 DebugLoc Loc) { 1616 // FIXME: Hard coding this to a 32-bit register class seems weird, but matches 1617 // what instruction selection does. 1618 unsigned Reg = MRI->createVirtualRegister(&X86::GR32RegClass); 1619 // We directly copy the FLAGS register and rely on later lowering to clean 1620 // this up into the appropriate setCC instructions. 1621 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS); 1622 ++NumInstsInserted; 1623 return Reg; 1624 } 1625 1626 /// Restore EFLAGS from the provided GPR. This should be produced by 1627 /// `saveEFLAGS`. 1628 /// 1629 /// This must be done within the same basic block as the save in order to 1630 /// reliably lower. 1631 void X86SpeculativeLoadHardeningPass::restoreEFLAGS( 1632 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc, 1633 unsigned Reg) { 1634 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg); 1635 ++NumInstsInserted; 1636 } 1637 1638 /// Takes the current predicate state (in a register) and merges it into the 1639 /// stack pointer. The state is essentially a single bit, but we merge this in 1640 /// a way that won't form non-canonical pointers and also will be preserved 1641 /// across normal stack adjustments. 1642 void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP( 1643 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc, 1644 unsigned PredStateReg) { 1645 unsigned TmpReg = MRI->createVirtualRegister(PS->RC); 1646 // FIXME: This hard codes a shift distance based on the number of bits needed 1647 // to stay canonical on 64-bit. We should compute this somehow and support 1648 // 32-bit as part of that. 1649 auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg) 1650 .addReg(PredStateReg, RegState::Kill) 1651 .addImm(47); 1652 ShiftI->addRegisterDead(X86::EFLAGS, TRI); 1653 ++NumInstsInserted; 1654 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP) 1655 .addReg(X86::RSP) 1656 .addReg(TmpReg, RegState::Kill); 1657 OrI->addRegisterDead(X86::EFLAGS, TRI); 1658 ++NumInstsInserted; 1659 } 1660 1661 /// Extracts the predicate state stored in the high bits of the stack pointer. 1662 unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP( 1663 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, 1664 DebugLoc Loc) { 1665 unsigned PredStateReg = MRI->createVirtualRegister(PS->RC); 1666 unsigned TmpReg = MRI->createVirtualRegister(PS->RC); 1667 1668 // We know that the stack pointer will have any preserved predicate state in 1669 // its high bit. We just want to smear this across the other bits. Turns out, 1670 // this is exactly what an arithmetic right shift does. 1671 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg) 1672 .addReg(X86::RSP); 1673 auto ShiftI = 1674 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg) 1675 .addReg(TmpReg, RegState::Kill) 1676 .addImm(TRI->getRegSizeInBits(*PS->RC) - 1); 1677 ShiftI->addRegisterDead(X86::EFLAGS, TRI); 1678 ++NumInstsInserted; 1679 1680 return PredStateReg; 1681 } 1682 1683 void X86SpeculativeLoadHardeningPass::hardenLoadAddr( 1684 MachineInstr &MI, MachineOperand &BaseMO, MachineOperand &IndexMO, 1685 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) { 1686 MachineBasicBlock &MBB = *MI.getParent(); 1687 DebugLoc Loc = MI.getDebugLoc(); 1688 1689 // Check if EFLAGS are alive by seeing if there is a def of them or they 1690 // live-in, and then seeing if that def is in turn used. 1691 bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI); 1692 1693 SmallVector<MachineOperand *, 2> HardenOpRegs; 1694 1695 if (BaseMO.isFI()) { 1696 // A frame index is never a dynamically controllable load, so only 1697 // harden it if we're covering fixed address loads as well. 1698 LLVM_DEBUG( 1699 dbgs() << " Skipping hardening base of explicit stack frame load: "; 1700 MI.dump(); dbgs() << "\n"); 1701 } else if (BaseMO.getReg() == X86::RIP || 1702 BaseMO.getReg() == X86::NoRegister) { 1703 // For both RIP-relative addressed loads or absolute loads, we cannot 1704 // meaningfully harden them because the address being loaded has no 1705 // dynamic component. 1706 // 1707 // FIXME: When using a segment base (like TLS does) we end up with the 1708 // dynamic address being the base plus -1 because we can't mutate the 1709 // segment register here. This allows the signed 32-bit offset to point at 1710 // valid segment-relative addresses and load them successfully. 1711 LLVM_DEBUG( 1712 dbgs() << " Cannot harden base of " 1713 << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base") 1714 << " address in a load!"); 1715 } else { 1716 assert(BaseMO.isReg() && 1717 "Only allowed to have a frame index or register base."); 1718 HardenOpRegs.push_back(&BaseMO); 1719 } 1720 1721 if (IndexMO.getReg() != X86::NoRegister && 1722 (HardenOpRegs.empty() || 1723 HardenOpRegs.front()->getReg() != IndexMO.getReg())) 1724 HardenOpRegs.push_back(&IndexMO); 1725 1726 assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) && 1727 "Should have exactly one or two registers to harden!"); 1728 assert((HardenOpRegs.size() == 1 || 1729 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) && 1730 "Should not have two of the same registers!"); 1731 1732 // Remove any registers that have alreaded been checked. 1733 llvm::erase_if(HardenOpRegs, [&](MachineOperand *Op) { 1734 // See if this operand's register has already been checked. 1735 auto It = AddrRegToHardenedReg.find(Op->getReg()); 1736 if (It == AddrRegToHardenedReg.end()) 1737 // Not checked, so retain this one. 1738 return false; 1739 1740 // Otherwise, we can directly update this operand and remove it. 1741 Op->setReg(It->second); 1742 return true; 1743 }); 1744 // If there are none left, we're done. 1745 if (HardenOpRegs.empty()) 1746 return; 1747 1748 // Compute the current predicate state. 1749 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB); 1750 1751 auto InsertPt = MI.getIterator(); 1752 1753 // If EFLAGS are live and we don't have access to instructions that avoid 1754 // clobbering EFLAGS we need to save and restore them. This in turn makes 1755 // the EFLAGS no longer live. 1756 unsigned FlagsReg = 0; 1757 if (EFLAGSLive && !Subtarget->hasBMI2()) { 1758 EFLAGSLive = false; 1759 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc); 1760 } 1761 1762 for (MachineOperand *Op : HardenOpRegs) { 1763 unsigned OpReg = Op->getReg(); 1764 auto *OpRC = MRI->getRegClass(OpReg); 1765 unsigned TmpReg = MRI->createVirtualRegister(OpRC); 1766 1767 // If this is a vector register, we'll need somewhat custom logic to handle 1768 // hardening it. 1769 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) || 1770 OpRC->hasSuperClassEq(&X86::VR256RegClass))) { 1771 assert(Subtarget->hasAVX2() && "AVX2-specific register classes!"); 1772 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass); 1773 1774 // Move our state into a vector register. 1775 // FIXME: We could skip this at the cost of longer encodings with AVX-512 1776 // but that doesn't seem likely worth it. 1777 unsigned VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass); 1778 auto MovI = 1779 BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg) 1780 .addReg(StateReg); 1781 (void)MovI; 1782 ++NumInstsInserted; 1783 LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n"); 1784 1785 // Broadcast it across the vector register. 1786 unsigned VBStateReg = MRI->createVirtualRegister(OpRC); 1787 auto BroadcastI = BuildMI(MBB, InsertPt, Loc, 1788 TII->get(Is128Bit ? X86::VPBROADCASTQrr 1789 : X86::VPBROADCASTQYrr), 1790 VBStateReg) 1791 .addReg(VStateReg); 1792 (void)BroadcastI; 1793 ++NumInstsInserted; 1794 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump(); 1795 dbgs() << "\n"); 1796 1797 // Merge our potential poison state into the value with a vector or. 1798 auto OrI = 1799 BuildMI(MBB, InsertPt, Loc, 1800 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg) 1801 .addReg(VBStateReg) 1802 .addReg(OpReg); 1803 (void)OrI; 1804 ++NumInstsInserted; 1805 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); 1806 } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) || 1807 OpRC->hasSuperClassEq(&X86::VR256XRegClass) || 1808 OpRC->hasSuperClassEq(&X86::VR512RegClass)) { 1809 assert(Subtarget->hasAVX512() && "AVX512-specific register classes!"); 1810 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass); 1811 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass); 1812 if (Is128Bit || Is256Bit) 1813 assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!"); 1814 1815 // Broadcast our state into a vector register. 1816 unsigned VStateReg = MRI->createVirtualRegister(OpRC); 1817 unsigned BroadcastOp = 1818 Is128Bit ? X86::VPBROADCASTQrZ128r 1819 : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr; 1820 auto BroadcastI = 1821 BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg) 1822 .addReg(StateReg); 1823 (void)BroadcastI; 1824 ++NumInstsInserted; 1825 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump(); 1826 dbgs() << "\n"); 1827 1828 // Merge our potential poison state into the value with a vector or. 1829 unsigned OrOp = Is128Bit ? X86::VPORQZ128rr 1830 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr; 1831 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg) 1832 .addReg(VStateReg) 1833 .addReg(OpReg); 1834 (void)OrI; 1835 ++NumInstsInserted; 1836 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); 1837 } else { 1838 // FIXME: Need to support GR32 here for 32-bit code. 1839 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) && 1840 "Not a supported register class for address hardening!"); 1841 1842 if (!EFLAGSLive) { 1843 // Merge our potential poison state into the value with an or. 1844 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg) 1845 .addReg(StateReg) 1846 .addReg(OpReg); 1847 OrI->addRegisterDead(X86::EFLAGS, TRI); 1848 ++NumInstsInserted; 1849 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); 1850 } else { 1851 // We need to avoid touching EFLAGS so shift out all but the least 1852 // significant bit using the instruction that doesn't update flags. 1853 auto ShiftI = 1854 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg) 1855 .addReg(OpReg) 1856 .addReg(StateReg); 1857 (void)ShiftI; 1858 ++NumInstsInserted; 1859 LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump(); 1860 dbgs() << "\n"); 1861 } 1862 } 1863 1864 // Record this register as checked and update the operand. 1865 assert(!AddrRegToHardenedReg.count(Op->getReg()) && 1866 "Should not have checked this register yet!"); 1867 AddrRegToHardenedReg[Op->getReg()] = TmpReg; 1868 Op->setReg(TmpReg); 1869 ++NumAddrRegsHardened; 1870 } 1871 1872 // And restore the flags if needed. 1873 if (FlagsReg) 1874 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg); 1875 } 1876 1877 MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst( 1878 MachineInstr &InitialMI, SmallPtrSetImpl<MachineInstr *> &HardenedInstrs) { 1879 assert(isDataInvariantLoad(InitialMI) && 1880 "Cannot get here with a non-invariant load!"); 1881 1882 // See if we can sink hardening the loaded value. 1883 auto SinkCheckToSingleUse = 1884 [&](MachineInstr &MI) -> Optional<MachineInstr *> { 1885 unsigned DefReg = MI.getOperand(0).getReg(); 1886 1887 // We need to find a single use which we can sink the check. We can 1888 // primarily do this because many uses may already end up checked on their 1889 // own. 1890 MachineInstr *SingleUseMI = nullptr; 1891 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) { 1892 // If we're already going to harden this use, it is data invariant and 1893 // within our block. 1894 if (HardenedInstrs.count(&UseMI)) { 1895 if (!isDataInvariantLoad(UseMI)) { 1896 // If we've already decided to harden a non-load, we must have sunk 1897 // some other post-load hardened instruction to it and it must itself 1898 // be data-invariant. 1899 assert(isDataInvariant(UseMI) && 1900 "Data variant instruction being hardened!"); 1901 continue; 1902 } 1903 1904 // Otherwise, this is a load and the load component can't be data 1905 // invariant so check how this register is being used. 1906 const MCInstrDesc &Desc = UseMI.getDesc(); 1907 int MemRefBeginIdx = X86II::getMemoryOperandNo(Desc.TSFlags); 1908 assert(MemRefBeginIdx >= 0 && 1909 "Should always have mem references here!"); 1910 MemRefBeginIdx += X86II::getOperandBias(Desc); 1911 1912 MachineOperand &BaseMO = 1913 UseMI.getOperand(MemRefBeginIdx + X86::AddrBaseReg); 1914 MachineOperand &IndexMO = 1915 UseMI.getOperand(MemRefBeginIdx + X86::AddrIndexReg); 1916 if ((BaseMO.isReg() && BaseMO.getReg() == DefReg) || 1917 (IndexMO.isReg() && IndexMO.getReg() == DefReg)) 1918 // The load uses the register as part of its address making it not 1919 // invariant. 1920 return {}; 1921 1922 continue; 1923 } 1924 1925 if (SingleUseMI) 1926 // We already have a single use, this would make two. Bail. 1927 return {}; 1928 1929 // If this single use isn't data invariant, isn't in this block, or has 1930 // interfering EFLAGS, we can't sink the hardening to it. 1931 if (!isDataInvariant(UseMI) || UseMI.getParent() != MI.getParent()) 1932 return {}; 1933 1934 // If this instruction defines multiple registers bail as we won't harden 1935 // all of them. 1936 if (UseMI.getDesc().getNumDefs() > 1) 1937 return {}; 1938 1939 // If this register isn't a virtual register we can't walk uses of sanely, 1940 // just bail. Also check that its register class is one of the ones we 1941 // can harden. 1942 unsigned UseDefReg = UseMI.getOperand(0).getReg(); 1943 if (!TRI->isVirtualRegister(UseDefReg) || 1944 !canHardenRegister(UseDefReg)) 1945 return {}; 1946 1947 SingleUseMI = &UseMI; 1948 } 1949 1950 // If SingleUseMI is still null, there is no use that needs its own 1951 // checking. Otherwise, it is the single use that needs checking. 1952 return {SingleUseMI}; 1953 }; 1954 1955 MachineInstr *MI = &InitialMI; 1956 while (Optional<MachineInstr *> SingleUse = SinkCheckToSingleUse(*MI)) { 1957 // Update which MI we're checking now. 1958 MI = *SingleUse; 1959 if (!MI) 1960 break; 1961 } 1962 1963 return MI; 1964 } 1965 1966 bool X86SpeculativeLoadHardeningPass::canHardenRegister(unsigned Reg) { 1967 auto *RC = MRI->getRegClass(Reg); 1968 int RegBytes = TRI->getRegSizeInBits(*RC) / 8; 1969 if (RegBytes > 8) 1970 // We don't support post-load hardening of vectors. 1971 return false; 1972 1973 // If this register class is explicitly constrained to a class that doesn't 1974 // require REX prefix, we may not be able to satisfy that constraint when 1975 // emitting the hardening instructions, so bail out here. 1976 // FIXME: This seems like a pretty lame hack. The way this comes up is when we 1977 // end up both with a NOREX and REX-only register as operands to the hardening 1978 // instructions. It would be better to fix that code to handle this situation 1979 // rather than hack around it in this way. 1980 const TargetRegisterClass *NOREXRegClasses[] = { 1981 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, 1982 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass}; 1983 if (RC == NOREXRegClasses[Log2_32(RegBytes)]) 1984 return false; 1985 1986 const TargetRegisterClass *GPRRegClasses[] = { 1987 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass, 1988 &X86::GR64RegClass}; 1989 return RC->hasSuperClassEq(GPRRegClasses[Log2_32(RegBytes)]); 1990 } 1991 1992 /// Harden a value in a register. 1993 /// 1994 /// This is the low-level logic to fully harden a value sitting in a register 1995 /// against leaking during speculative execution. 1996 /// 1997 /// Unlike hardening an address that is used by a load, this routine is required 1998 /// to hide *all* incoming bits in the register. 1999 /// 2000 /// `Reg` must be a virtual register. Currently, it is required to be a GPR no 2001 /// larger than the predicate state register. FIXME: We should support vector 2002 /// registers here by broadcasting the predicate state. 2003 /// 2004 /// The new, hardened virtual register is returned. It will have the same 2005 /// register class as `Reg`. 2006 unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister( 2007 unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, 2008 DebugLoc Loc) { 2009 assert(canHardenRegister(Reg) && "Cannot harden this register!"); 2010 assert(TRI->isVirtualRegister(Reg) && "Cannot harden a physical register!"); 2011 2012 auto *RC = MRI->getRegClass(Reg); 2013 int Bytes = TRI->getRegSizeInBits(*RC) / 8; 2014 2015 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB); 2016 2017 // FIXME: Need to teach this about 32-bit mode. 2018 if (Bytes != 8) { 2019 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit}; 2020 unsigned SubRegImm = SubRegImms[Log2_32(Bytes)]; 2021 unsigned NarrowStateReg = MRI->createVirtualRegister(RC); 2022 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg) 2023 .addReg(StateReg, 0, SubRegImm); 2024 StateReg = NarrowStateReg; 2025 } 2026 2027 unsigned FlagsReg = 0; 2028 if (isEFLAGSLive(MBB, InsertPt, *TRI)) 2029 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc); 2030 2031 unsigned NewReg = MRI->createVirtualRegister(RC); 2032 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr}; 2033 unsigned OrOpCode = OrOpCodes[Log2_32(Bytes)]; 2034 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg) 2035 .addReg(StateReg) 2036 .addReg(Reg); 2037 OrI->addRegisterDead(X86::EFLAGS, TRI); 2038 ++NumInstsInserted; 2039 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); 2040 2041 if (FlagsReg) 2042 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg); 2043 2044 return NewReg; 2045 } 2046 2047 /// Harden a load by hardening the loaded value in the defined register. 2048 /// 2049 /// We can harden a non-leaking load into a register without touching the 2050 /// address by just hiding all of the loaded bits during misspeculation. We use 2051 /// an `or` instruction to do this because we set up our poison value as all 2052 /// ones. And the goal is just for the loaded bits to not be exposed to 2053 /// execution and coercing them to one is sufficient. 2054 /// 2055 /// Returns the newly hardened register. 2056 unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(MachineInstr &MI) { 2057 MachineBasicBlock &MBB = *MI.getParent(); 2058 DebugLoc Loc = MI.getDebugLoc(); 2059 2060 auto &DefOp = MI.getOperand(0); 2061 unsigned OldDefReg = DefOp.getReg(); 2062 auto *DefRC = MRI->getRegClass(OldDefReg); 2063 2064 // Because we want to completely replace the uses of this def'ed value with 2065 // the hardened value, create a dedicated new register that will only be used 2066 // to communicate the unhardened value to the hardening. 2067 unsigned UnhardenedReg = MRI->createVirtualRegister(DefRC); 2068 DefOp.setReg(UnhardenedReg); 2069 2070 // Now harden this register's value, getting a hardened reg that is safe to 2071 // use. Note that we insert the instructions to compute this *after* the 2072 // defining instruction, not before it. 2073 unsigned HardenedReg = hardenValueInRegister( 2074 UnhardenedReg, MBB, std::next(MI.getIterator()), Loc); 2075 2076 // Finally, replace the old register (which now only has the uses of the 2077 // original def) with the hardened register. 2078 MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg); 2079 2080 ++NumPostLoadRegsHardened; 2081 return HardenedReg; 2082 } 2083 2084 /// Harden a return instruction. 2085 /// 2086 /// Returns implicitly perform a load which we need to harden. Without hardening 2087 /// this load, an attacker my speculatively write over the return address to 2088 /// steer speculation of the return to an attacker controlled address. This is 2089 /// called Spectre v1.1 or Bounds Check Bypass Store (BCBS) and is described in 2090 /// this paper: 2091 /// https://people.csail.mit.edu/vlk/spectre11.pdf 2092 /// 2093 /// We can harden this by introducing an LFENCE that will delay any load of the 2094 /// return address until prior instructions have retired (and thus are not being 2095 /// speculated), or we can harden the address used by the implicit load: the 2096 /// stack pointer. 2097 /// 2098 /// If we are not using an LFENCE, hardening the stack pointer has an additional 2099 /// benefit: it allows us to pass the predicate state accumulated in this 2100 /// function back to the caller. In the absence of a BCBS attack on the return, 2101 /// the caller will typically be resumed and speculatively executed due to the 2102 /// Return Stack Buffer (RSB) prediction which is very accurate and has a high 2103 /// priority. It is possible that some code from the caller will be executed 2104 /// speculatively even during a BCBS-attacked return until the steering takes 2105 /// effect. Whenever this happens, the caller can recover the (poisoned) 2106 /// predicate state from the stack pointer and continue to harden loads. 2107 void X86SpeculativeLoadHardeningPass::hardenReturnInstr(MachineInstr &MI) { 2108 MachineBasicBlock &MBB = *MI.getParent(); 2109 DebugLoc Loc = MI.getDebugLoc(); 2110 auto InsertPt = MI.getIterator(); 2111 2112 if (FenceCallAndRet) { 2113 // Simply forcibly block speculation of loads out of the function by using 2114 // an LFENCE. This is potentially a heavy-weight mitigation strategy, but 2115 // should be secure, is simple from an ABI perspective, and the cost can be 2116 // minimized through inlining. 2117 // 2118 // FIXME: We should investigate ways to establish a strong data-dependency 2119 // on the return. However, poisoning the stack pointer is unlikely to work 2120 // because the return is *predicted* rather than relying on the load of the 2121 // return address to actually resolve. 2122 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LFENCE)); 2123 ++NumInstsInserted; 2124 ++NumLFENCEsInserted; 2125 return; 2126 } 2127 2128 // Take our predicate state, shift it to the high 17 bits (so that we keep 2129 // pointers canonical) and merge it into RSP. This will allow the caller to 2130 // extract it when we return (speculatively). 2131 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB)); 2132 } 2133 2134 /// Trace the predicate state through a call. 2135 /// 2136 /// There are several layers of this needed to handle the full complexity of 2137 /// calls. 2138 /// 2139 /// First, we need to send the predicate state into the called function. We do 2140 /// this by merging it into the high bits of the stack pointer. 2141 /// 2142 /// For tail calls, this is all we need to do. 2143 /// 2144 /// For calls where we might return to control flow, we further need to extract 2145 /// the predicate state built up within that function from the high bits of the 2146 /// stack pointer, and make that the newly available predicate state. 2147 void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall( 2148 MachineInstr &MI) { 2149 MachineBasicBlock &MBB = *MI.getParent(); 2150 auto InsertPt = MI.getIterator(); 2151 DebugLoc Loc = MI.getDebugLoc(); 2152 2153 // First, we transfer the predicate state into the called function by merging 2154 // it into the stack pointer. This will kill the current def of the state. 2155 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB); 2156 mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg); 2157 2158 // If this call is also a return, it is a tail call and we don't need anything 2159 // else to handle it so just continue. 2160 // FIXME: We should also handle noreturn calls. 2161 if (MI.isReturn()) 2162 return; 2163 2164 // We need to step past the call and recover the predicate state from SP after 2165 // the return, and make this new state available. 2166 ++InsertPt; 2167 unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc); 2168 PS->SSA.AddAvailableValue(&MBB, NewStateReg); 2169 } 2170 2171 /// An attacker may speculatively store over a value that is then speculatively 2172 /// loaded and used as the target of an indirect call or jump instruction. This 2173 /// is called Spectre v1.2 or Bounds Check Bypass Store (BCBS) and is described 2174 /// in this paper: 2175 /// https://people.csail.mit.edu/vlk/spectre11.pdf 2176 /// 2177 /// When this happens, the speculative execution of the call or jump will end up 2178 /// being steered to this attacker controlled address. While most such loads 2179 /// will be adequately hardened already, we want to ensure that they are 2180 /// definitively treated as needing post-load hardening. While address hardening 2181 /// is sufficient to prevent secret data from leaking to the attacker, it may 2182 /// not be sufficient to prevent an attacker from steering speculative 2183 /// execution. We forcibly unfolded all relevant loads above and so will always 2184 /// have an opportunity to post-load harden here, we just need to scan for cases 2185 /// not already flagged and add them. 2186 void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr( 2187 MachineInstr &MI, 2188 SmallDenseMap<unsigned, unsigned, 32> &AddrRegToHardenedReg) { 2189 switch (MI.getOpcode()) { 2190 case X86::FARCALL16m: 2191 case X86::FARCALL32m: 2192 case X86::FARCALL64: 2193 case X86::FARJMP16m: 2194 case X86::FARJMP32m: 2195 case X86::FARJMP64: 2196 // We don't need to harden either far calls or far jumps as they are 2197 // safe from Spectre. 2198 return; 2199 2200 default: 2201 break; 2202 } 2203 2204 // We should never see a loading instruction at this point, as those should 2205 // have been unfolded. 2206 assert(!MI.mayLoad() && "Found a lingering loading instruction!"); 2207 2208 // If the first operand isn't a register, this is a branch or call 2209 // instruction with an immediate operand which doesn't need to be hardened. 2210 if (!MI.getOperand(0).isReg()) 2211 return; 2212 2213 // For all of these, the target register is the first operand of the 2214 // instruction. 2215 auto &TargetOp = MI.getOperand(0); 2216 unsigned OldTargetReg = TargetOp.getReg(); 2217 2218 // Try to lookup a hardened version of this register. We retain a reference 2219 // here as we want to update the map to track any newly computed hardened 2220 // register. 2221 unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg]; 2222 2223 // If we don't have a hardened register yet, compute one. Otherwise, just use 2224 // the already hardened register. 2225 // 2226 // FIXME: It is a little suspect that we use partially hardened registers that 2227 // only feed addresses. The complexity of partial hardening with SHRX 2228 // continues to pile up. Should definitively measure its value and consider 2229 // eliminating it. 2230 if (!HardenedTargetReg) 2231 HardenedTargetReg = hardenValueInRegister( 2232 OldTargetReg, *MI.getParent(), MI.getIterator(), MI.getDebugLoc()); 2233 2234 // Set the target operand to the hardened register. 2235 TargetOp.setReg(HardenedTargetReg); 2236 2237 ++NumCallsOrJumpsHardened; 2238 } 2239 2240 INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, DEBUG_TYPE, 2241 "X86 speculative load hardener", false, false) 2242 INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass, DEBUG_TYPE, 2243 "X86 speculative load hardener", false, false) 2244 2245 FunctionPass *llvm::createX86SpeculativeLoadHardeningPass() { 2246 return new X86SpeculativeLoadHardeningPass(); 2247 } 2248