Home | History | Annotate | Download | only in AsmParser
      1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "X86AsmInstrumentation.h"
     11 #include "MCTargetDesc/X86BaseInfo.h"
     12 #include "X86Operand.h"
     13 #include "llvm/ADT/StringExtras.h"
     14 #include "llvm/ADT/Triple.h"
     15 #include "llvm/MC/MCAsmInfo.h"
     16 #include "llvm/MC/MCContext.h"
     17 #include "llvm/MC/MCInst.h"
     18 #include "llvm/MC/MCInstBuilder.h"
     19 #include "llvm/MC/MCInstrInfo.h"
     20 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
     21 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
     22 #include "llvm/MC/MCStreamer.h"
     23 #include "llvm/MC/MCSubtargetInfo.h"
     24 #include "llvm/MC/MCTargetOptions.h"
     25 #include "llvm/Support/CommandLine.h"
     26 #include <algorithm>
     27 #include <cassert>
     28 #include <vector>
     29 
     30 // Following comment describes how assembly instrumentation works.
     31 // Currently we have only AddressSanitizer instrumentation, but we're
     32 // planning to implement MemorySanitizer for inline assembly too. If
     33 // you're not familiar with AddressSanitizer algorithm, please, read
     34 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
     35 //
     36 // When inline assembly is parsed by an instance of X86AsmParser, all
     37 // instructions are emitted via EmitInstruction method. That's the
     38 // place where X86AsmInstrumentation analyzes an instruction and
     39 // decides, whether the instruction should be emitted as is or
     40 // instrumentation is required. The latter case happens when an
     41 // instruction reads from or writes to memory. Now instruction opcode
     42 // is explicitly checked, and if an instruction has a memory operand
     43 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
     44 // instrumented.  There're also exist instructions that modify
     45 // memory but don't have an explicit memory operands, for instance,
     46 // movs.
     47 //
     48 // Let's consider at first 8-byte memory accesses when an instruction
     49 // has an explicit memory operand. In this case we need two registers -
     50 // AddressReg to compute address of a memory cells which are accessed
     51 // and ShadowReg to compute corresponding shadow address. So, we need
     52 // to spill both registers before instrumentation code and restore them
     53 // after instrumentation. Thus, in general, instrumentation code will
     54 // look like this:
     55 // PUSHF  # Store flags, otherwise they will be overwritten
     56 // PUSH AddressReg  # spill AddressReg
     57 // PUSH ShadowReg   # spill ShadowReg
     58 // LEA MemOp, AddressReg  # compute address of the memory operand
     59 // MOV AddressReg, ShadowReg
     60 // SHR ShadowReg, 3
     61 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
     62 // # corresponding to MemOp.
     63 // CMP ShadowOffset(ShadowReg), 0  # test shadow value
     64 // JZ .Done  # when shadow equals to zero, everything is fine
     65 // MOV AddressReg, RDI
     66 // # Call __asan_report function with AddressReg as an argument
     67 // CALL __asan_report
     68 // .Done:
     69 // POP ShadowReg  # Restore ShadowReg
     70 // POP AddressReg  # Restore AddressReg
     71 // POPF  # Restore flags
     72 //
     73 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
     74 // handled in a similar manner, but small memory accesses (less than 8
     75 // byte) require an additional ScratchReg, which is used for shadow value.
     76 //
     77 // If, suppose, we're instrumenting an instruction like movs, only
     78 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
     79 // RCX are checked.  In this case there're no need to spill and restore
     80 // AddressReg , ShadowReg or flags four times, they're saved on stack
     81 // just once, before instrumentation of these four addresses, and restored
     82 // at the end of the instrumentation.
     83 //
     84 // There exist several things which complicate this simple algorithm.
     85 // * Instrumented memory operand can have RSP as a base or an index
     86 //   register.  So we need to add a constant offset before computation
     87 //   of memory address, since flags, AddressReg, ShadowReg, etc. were
     88 //   already stored on stack and RSP was modified.
     89 // * Debug info (usually, DWARF) should be adjusted, because sometimes
     90 //   RSP is used as a frame register. So, we need to select some
     91 //   register as a frame register and temprorary override current CFA
     92 //   register.
     93 
     94 namespace llvm {
     95 namespace {
     96 
     97 static cl::opt<bool> ClAsanInstrumentAssembly(
     98     "asan-instrument-assembly",
     99     cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
    100     cl::init(false));
    101 
    102 const int64_t MinAllowedDisplacement = std::numeric_limits<int32_t>::min();
    103 const int64_t MaxAllowedDisplacement = std::numeric_limits<int32_t>::max();
    104 
    105 int64_t ApplyDisplacementBounds(int64_t Displacement) {
    106   return std::max(std::min(MaxAllowedDisplacement, Displacement),
    107                   MinAllowedDisplacement);
    108 }
    109 
    110 void CheckDisplacementBounds(int64_t Displacement) {
    111   assert(Displacement >= MinAllowedDisplacement &&
    112          Displacement <= MaxAllowedDisplacement);
    113 }
    114 
    115 bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
    116 
    117 bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
    118 
    119 class X86AddressSanitizer : public X86AsmInstrumentation {
    120 public:
    121   struct RegisterContext {
    122   private:
    123     enum RegOffset {
    124       REG_OFFSET_ADDRESS = 0,
    125       REG_OFFSET_SHADOW,
    126       REG_OFFSET_SCRATCH
    127     };
    128 
    129   public:
    130     RegisterContext(unsigned AddressReg, unsigned ShadowReg,
    131                     unsigned ScratchReg) {
    132       BusyRegs.push_back(convReg(AddressReg, 64));
    133       BusyRegs.push_back(convReg(ShadowReg, 64));
    134       BusyRegs.push_back(convReg(ScratchReg, 64));
    135     }
    136 
    137     unsigned AddressReg(unsigned Size) const {
    138       return convReg(BusyRegs[REG_OFFSET_ADDRESS], Size);
    139     }
    140 
    141     unsigned ShadowReg(unsigned Size) const {
    142       return convReg(BusyRegs[REG_OFFSET_SHADOW], Size);
    143     }
    144 
    145     unsigned ScratchReg(unsigned Size) const {
    146       return convReg(BusyRegs[REG_OFFSET_SCRATCH], Size);
    147     }
    148 
    149     void AddBusyReg(unsigned Reg) {
    150       if (Reg != X86::NoRegister)
    151         BusyRegs.push_back(convReg(Reg, 64));
    152     }
    153 
    154     void AddBusyRegs(const X86Operand &Op) {
    155       AddBusyReg(Op.getMemBaseReg());
    156       AddBusyReg(Op.getMemIndexReg());
    157     }
    158 
    159     unsigned ChooseFrameReg(unsigned Size) const {
    160       static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
    161                                               X86::RCX, X86::RDX, X86::RDI,
    162                                               X86::RSI };
    163       for (unsigned Reg : Candidates) {
    164         if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
    165           return convReg(Reg, Size);
    166       }
    167       return X86::NoRegister;
    168     }
    169 
    170   private:
    171     unsigned convReg(unsigned Reg, unsigned Size) const {
    172       return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, Size);
    173     }
    174 
    175     std::vector<unsigned> BusyRegs;
    176   };
    177 
    178   X86AddressSanitizer(const MCSubtargetInfo *&STI)
    179       : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
    180 
    181   ~X86AddressSanitizer() override {}
    182 
    183   // X86AsmInstrumentation implementation:
    184   void InstrumentAndEmitInstruction(const MCInst &Inst,
    185                                     OperandVector &Operands,
    186                                     MCContext &Ctx,
    187                                     const MCInstrInfo &MII,
    188                                     MCStreamer &Out) override {
    189     InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
    190     if (RepPrefix)
    191       EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
    192 
    193     InstrumentMOV(Inst, Operands, Ctx, MII, Out);
    194 
    195     RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
    196     if (!RepPrefix)
    197       EmitInstruction(Out, Inst);
    198   }
    199 
    200   // Adjusts up stack and saves all registers used in instrumentation.
    201   virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
    202                                             MCContext &Ctx,
    203                                             MCStreamer &Out) = 0;
    204 
    205   // Restores all registers used in instrumentation and adjusts stack.
    206   virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
    207                                             MCContext &Ctx,
    208                                             MCStreamer &Out) = 0;
    209 
    210   virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
    211                                          bool IsWrite,
    212                                          const RegisterContext &RegCtx,
    213                                          MCContext &Ctx, MCStreamer &Out) = 0;
    214   virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
    215                                          bool IsWrite,
    216                                          const RegisterContext &RegCtx,
    217                                          MCContext &Ctx, MCStreamer &Out) = 0;
    218 
    219   virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
    220                                   MCStreamer &Out) = 0;
    221 
    222   void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
    223                             const RegisterContext &RegCtx, MCContext &Ctx,
    224                             MCStreamer &Out);
    225   void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
    226                           unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
    227 
    228   void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
    229                       MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
    230   void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
    231                      MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
    232 
    233 protected:
    234   void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
    235 
    236   void EmitLEA(X86Operand &Op, unsigned Size, unsigned Reg, MCStreamer &Out) {
    237     assert(Size == 32 || Size == 64);
    238     MCInst Inst;
    239     Inst.setOpcode(Size == 32 ? X86::LEA32r : X86::LEA64r);
    240     Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, Size)));
    241     Op.addMemOperands(Inst, 5);
    242     EmitInstruction(Out, Inst);
    243   }
    244 
    245   void ComputeMemOperandAddress(X86Operand &Op, unsigned Size,
    246                                 unsigned Reg, MCContext &Ctx, MCStreamer &Out);
    247 
    248   // Creates new memory operand with Displacement added to an original
    249   // displacement. Residue will contain a residue which could happen when the
    250   // total displacement exceeds 32-bit limitation.
    251   std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
    252                                               int64_t Displacement,
    253                                               MCContext &Ctx, int64_t *Residue);
    254 
    255   bool is64BitMode() const {
    256     return STI->getFeatureBits()[X86::Mode64Bit];
    257   }
    258   bool is32BitMode() const {
    259     return STI->getFeatureBits()[X86::Mode32Bit];
    260   }
    261   bool is16BitMode() const {
    262     return STI->getFeatureBits()[X86::Mode16Bit];
    263   }
    264 
    265   unsigned getPointerWidth() {
    266     if (is16BitMode()) return 16;
    267     if (is32BitMode()) return 32;
    268     if (is64BitMode()) return 64;
    269     llvm_unreachable("invalid mode");
    270   }
    271 
    272   // True when previous instruction was actually REP prefix.
    273   bool RepPrefix;
    274 
    275   // Offset from the original SP register.
    276   int64_t OrigSPOffset;
    277 };
    278 
    279 void X86AddressSanitizer::InstrumentMemOperand(
    280     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    281     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    282   assert(Op.isMem() && "Op should be a memory operand.");
    283   assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
    284          "AccessSize should be a power of two, less or equal than 16.");
    285   // FIXME: take into account load/store alignment.
    286   if (IsSmallMemAccess(AccessSize))
    287     InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
    288   else
    289     InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
    290 }
    291 
    292 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
    293                                              unsigned CntReg,
    294                                              unsigned AccessSize,
    295                                              MCContext &Ctx, MCStreamer &Out) {
    296   // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
    297   // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
    298   RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
    299                          IsSmallMemAccess(AccessSize)
    300                              ? X86::RBX
    301                              : X86::NoRegister /* ScratchReg */);
    302   RegCtx.AddBusyReg(DstReg);
    303   RegCtx.AddBusyReg(SrcReg);
    304   RegCtx.AddBusyReg(CntReg);
    305 
    306   InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
    307 
    308   // Test (%SrcReg)
    309   {
    310     const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
    311     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    312         getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
    313     InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
    314                          Out);
    315   }
    316 
    317   // Test -1(%SrcReg, %CntReg, AccessSize)
    318   {
    319     const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
    320     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    321         getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
    322         SMLoc()));
    323     InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
    324                          Out);
    325   }
    326 
    327   // Test (%DstReg)
    328   {
    329     const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
    330     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    331         getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
    332     InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
    333   }
    334 
    335   // Test -1(%DstReg, %CntReg, AccessSize)
    336   {
    337     const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
    338     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    339         getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
    340         SMLoc()));
    341     InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
    342   }
    343 
    344   InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
    345 }
    346 
    347 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
    348                                          OperandVector &Operands,
    349                                          MCContext &Ctx, const MCInstrInfo &MII,
    350                                          MCStreamer &Out) {
    351   // Access size in bytes.
    352   unsigned AccessSize = 0;
    353 
    354   switch (Inst.getOpcode()) {
    355   case X86::MOVSB:
    356     AccessSize = 1;
    357     break;
    358   case X86::MOVSW:
    359     AccessSize = 2;
    360     break;
    361   case X86::MOVSL:
    362     AccessSize = 4;
    363     break;
    364   case X86::MOVSQ:
    365     AccessSize = 8;
    366     break;
    367   default:
    368     return;
    369   }
    370 
    371   InstrumentMOVSImpl(AccessSize, Ctx, Out);
    372 }
    373 
    374 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
    375                                         OperandVector &Operands, MCContext &Ctx,
    376                                         const MCInstrInfo &MII,
    377                                         MCStreamer &Out) {
    378   // Access size in bytes.
    379   unsigned AccessSize = 0;
    380 
    381   switch (Inst.getOpcode()) {
    382   case X86::MOV8mi:
    383   case X86::MOV8mr:
    384   case X86::MOV8rm:
    385     AccessSize = 1;
    386     break;
    387   case X86::MOV16mi:
    388   case X86::MOV16mr:
    389   case X86::MOV16rm:
    390     AccessSize = 2;
    391     break;
    392   case X86::MOV32mi:
    393   case X86::MOV32mr:
    394   case X86::MOV32rm:
    395     AccessSize = 4;
    396     break;
    397   case X86::MOV64mi32:
    398   case X86::MOV64mr:
    399   case X86::MOV64rm:
    400     AccessSize = 8;
    401     break;
    402   case X86::MOVAPDmr:
    403   case X86::MOVAPSmr:
    404   case X86::MOVAPDrm:
    405   case X86::MOVAPSrm:
    406     AccessSize = 16;
    407     break;
    408   default:
    409     return;
    410   }
    411 
    412   const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
    413 
    414   for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
    415     assert(Operands[Ix]);
    416     MCParsedAsmOperand &Op = *Operands[Ix];
    417     if (Op.isMem()) {
    418       X86Operand &MemOp = static_cast<X86Operand &>(Op);
    419       RegisterContext RegCtx(
    420           X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
    421           IsSmallMemAccess(AccessSize) ? X86::RCX
    422                                        : X86::NoRegister /* ScratchReg */);
    423       RegCtx.AddBusyRegs(MemOp);
    424       InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
    425       InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
    426       InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
    427     }
    428   }
    429 }
    430 
    431 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
    432                                                    unsigned Size,
    433                                                    unsigned Reg, MCContext &Ctx,
    434                                                    MCStreamer &Out) {
    435   int64_t Displacement = 0;
    436   if (IsStackReg(Op.getMemBaseReg()))
    437     Displacement -= OrigSPOffset;
    438   if (IsStackReg(Op.getMemIndexReg()))
    439     Displacement -= OrigSPOffset * Op.getMemScale();
    440 
    441   assert(Displacement >= 0);
    442 
    443   // Emit Op as is.
    444   if (Displacement == 0) {
    445     EmitLEA(Op, Size, Reg, Out);
    446     return;
    447   }
    448 
    449   int64_t Residue;
    450   std::unique_ptr<X86Operand> NewOp =
    451       AddDisplacement(Op, Displacement, Ctx, &Residue);
    452   EmitLEA(*NewOp, Size, Reg, Out);
    453 
    454   while (Residue != 0) {
    455     const MCConstantExpr *Disp =
    456         MCConstantExpr::create(ApplyDisplacementBounds(Residue), Ctx);
    457     std::unique_ptr<X86Operand> DispOp =
    458         X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
    459                               SMLoc());
    460     EmitLEA(*DispOp, Size, Reg, Out);
    461     Residue -= Disp->getValue();
    462   }
    463 }
    464 
    465 std::unique_ptr<X86Operand>
    466 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
    467                                      MCContext &Ctx, int64_t *Residue) {
    468   assert(Displacement >= 0);
    469 
    470   if (Displacement == 0 ||
    471       (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
    472     *Residue = Displacement;
    473     return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
    474                                  Op.getMemDisp(), Op.getMemBaseReg(),
    475                                  Op.getMemIndexReg(), Op.getMemScale(),
    476                                  SMLoc(), SMLoc());
    477   }
    478 
    479   int64_t OrigDisplacement =
    480       static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
    481   CheckDisplacementBounds(OrigDisplacement);
    482   Displacement += OrigDisplacement;
    483 
    484   int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
    485   CheckDisplacementBounds(NewDisplacement);
    486 
    487   *Residue = Displacement - NewDisplacement;
    488   const MCExpr *Disp = MCConstantExpr::create(NewDisplacement, Ctx);
    489   return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
    490                                Op.getMemBaseReg(), Op.getMemIndexReg(),
    491                                Op.getMemScale(), SMLoc(), SMLoc());
    492 }
    493 
    494 class X86AddressSanitizer32 : public X86AddressSanitizer {
    495 public:
    496   static const long kShadowOffset = 0x20000000;
    497 
    498   X86AddressSanitizer32(const MCSubtargetInfo *&STI)
    499       : X86AddressSanitizer(STI) {}
    500 
    501   ~X86AddressSanitizer32() override {}
    502 
    503   unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
    504     unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
    505     if (FrameReg == X86::NoRegister)
    506       return FrameReg;
    507     return getX86SubSuperRegister(FrameReg, 32);
    508   }
    509 
    510   void SpillReg(MCStreamer &Out, unsigned Reg) {
    511     EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
    512     OrigSPOffset -= 4;
    513   }
    514 
    515   void RestoreReg(MCStreamer &Out, unsigned Reg) {
    516     EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
    517     OrigSPOffset += 4;
    518   }
    519 
    520   void StoreFlags(MCStreamer &Out) {
    521     EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
    522     OrigSPOffset -= 4;
    523   }
    524 
    525   void RestoreFlags(MCStreamer &Out) {
    526     EmitInstruction(Out, MCInstBuilder(X86::POPF32));
    527     OrigSPOffset += 4;
    528   }
    529 
    530   void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
    531                                     MCContext &Ctx,
    532                                     MCStreamer &Out) override {
    533     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
    534     assert(LocalFrameReg != X86::NoRegister);
    535 
    536     const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
    537     unsigned FrameReg = GetFrameReg(Ctx, Out);
    538     if (MRI && FrameReg != X86::NoRegister) {
    539       SpillReg(Out, LocalFrameReg);
    540       if (FrameReg == X86::ESP) {
    541         Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
    542         Out.EmitCFIRelOffset(
    543             MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
    544       }
    545       EmitInstruction(
    546           Out,
    547           MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
    548       Out.EmitCFIRememberState();
    549       Out.EmitCFIDefCfaRegister(
    550           MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
    551     }
    552 
    553     SpillReg(Out, RegCtx.AddressReg(32));
    554     SpillReg(Out, RegCtx.ShadowReg(32));
    555     if (RegCtx.ScratchReg(32) != X86::NoRegister)
    556       SpillReg(Out, RegCtx.ScratchReg(32));
    557     StoreFlags(Out);
    558   }
    559 
    560   void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
    561                                     MCContext &Ctx,
    562                                     MCStreamer &Out) override {
    563     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
    564     assert(LocalFrameReg != X86::NoRegister);
    565 
    566     RestoreFlags(Out);
    567     if (RegCtx.ScratchReg(32) != X86::NoRegister)
    568       RestoreReg(Out, RegCtx.ScratchReg(32));
    569     RestoreReg(Out, RegCtx.ShadowReg(32));
    570     RestoreReg(Out, RegCtx.AddressReg(32));
    571 
    572     unsigned FrameReg = GetFrameReg(Ctx, Out);
    573     if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
    574       RestoreReg(Out, LocalFrameReg);
    575       Out.EmitCFIRestoreState();
    576       if (FrameReg == X86::ESP)
    577         Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
    578     }
    579   }
    580 
    581   void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
    582                                  bool IsWrite,
    583                                  const RegisterContext &RegCtx,
    584                                  MCContext &Ctx,
    585                                  MCStreamer &Out) override;
    586   void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
    587                                  bool IsWrite,
    588                                  const RegisterContext &RegCtx,
    589                                  MCContext &Ctx,
    590                                  MCStreamer &Out) override;
    591   void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
    592                           MCStreamer &Out) override;
    593 
    594 private:
    595   void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
    596                           MCStreamer &Out, const RegisterContext &RegCtx) {
    597     EmitInstruction(Out, MCInstBuilder(X86::CLD));
    598     EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
    599 
    600     EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
    601                              .addReg(X86::ESP)
    602                              .addReg(X86::ESP)
    603                              .addImm(-16));
    604     EmitInstruction(
    605         Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(32)));
    606 
    607     MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
    608                                             (IsWrite ? "store" : "load") +
    609                                             llvm::Twine(AccessSize));
    610     const MCSymbolRefExpr *FnExpr =
    611         MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
    612     EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
    613   }
    614 };
    615 
    616 void X86AddressSanitizer32::InstrumentMemOperandSmall(
    617     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    618     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    619   unsigned AddressRegI32 = RegCtx.AddressReg(32);
    620   unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
    621   unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
    622 
    623   assert(RegCtx.ScratchReg(32) != X86::NoRegister);
    624   unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
    625 
    626   ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
    627 
    628   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
    629                            AddressRegI32));
    630   EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
    631                            .addReg(ShadowRegI32)
    632                            .addReg(ShadowRegI32)
    633                            .addImm(3));
    634 
    635   {
    636     MCInst Inst;
    637     Inst.setOpcode(X86::MOV8rm);
    638     Inst.addOperand(MCOperand::createReg(ShadowRegI8));
    639     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    640     std::unique_ptr<X86Operand> Op(
    641         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
    642                               SMLoc(), SMLoc()));
    643     Op->addMemOperands(Inst, 5);
    644     EmitInstruction(Out, Inst);
    645   }
    646 
    647   EmitInstruction(
    648       Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
    649   MCSymbol *DoneSym = Ctx.createTempSymbol();
    650   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    651   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    652 
    653   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
    654                            AddressRegI32));
    655   EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
    656                            .addReg(ScratchRegI32)
    657                            .addReg(ScratchRegI32)
    658                            .addImm(7));
    659 
    660   switch (AccessSize) {
    661   default: llvm_unreachable("Incorrect access size");
    662   case 1:
    663     break;
    664   case 2: {
    665     const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
    666     std::unique_ptr<X86Operand> Op(
    667         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
    668                               SMLoc(), SMLoc()));
    669     EmitLEA(*Op, 32, ScratchRegI32, Out);
    670     break;
    671   }
    672   case 4:
    673     EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
    674                              .addReg(ScratchRegI32)
    675                              .addReg(ScratchRegI32)
    676                              .addImm(3));
    677     break;
    678   }
    679 
    680   EmitInstruction(
    681       Out,
    682       MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
    683   EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
    684                            ShadowRegI32));
    685   EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
    686 
    687   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
    688   EmitLabel(Out, DoneSym);
    689 }
    690 
    691 void X86AddressSanitizer32::InstrumentMemOperandLarge(
    692     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    693     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    694   unsigned AddressRegI32 = RegCtx.AddressReg(32);
    695   unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
    696 
    697   ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
    698 
    699   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
    700                            AddressRegI32));
    701   EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
    702                            .addReg(ShadowRegI32)
    703                            .addReg(ShadowRegI32)
    704                            .addImm(3));
    705   {
    706     MCInst Inst;
    707     switch (AccessSize) {
    708     default: llvm_unreachable("Incorrect access size");
    709     case 8:
    710       Inst.setOpcode(X86::CMP8mi);
    711       break;
    712     case 16:
    713       Inst.setOpcode(X86::CMP16mi);
    714       break;
    715     }
    716     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    717     std::unique_ptr<X86Operand> Op(
    718         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
    719                               SMLoc(), SMLoc()));
    720     Op->addMemOperands(Inst, 5);
    721     Inst.addOperand(MCOperand::createImm(0));
    722     EmitInstruction(Out, Inst);
    723   }
    724   MCSymbol *DoneSym = Ctx.createTempSymbol();
    725   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    726   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    727 
    728   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
    729   EmitLabel(Out, DoneSym);
    730 }
    731 
    732 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
    733                                                MCContext &Ctx,
    734                                                MCStreamer &Out) {
    735   StoreFlags(Out);
    736 
    737   // No need to test when ECX is equals to zero.
    738   MCSymbol *DoneSym = Ctx.createTempSymbol();
    739   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    740   EmitInstruction(
    741       Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
    742   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    743 
    744   // Instrument first and last elements in src and dst range.
    745   InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
    746                      X86::ECX /* CntReg */, AccessSize, Ctx, Out);
    747 
    748   EmitLabel(Out, DoneSym);
    749   RestoreFlags(Out);
    750 }
    751 
    752 class X86AddressSanitizer64 : public X86AddressSanitizer {
    753 public:
    754   static const long kShadowOffset = 0x7fff8000;
    755 
    756   X86AddressSanitizer64(const MCSubtargetInfo *&STI)
    757       : X86AddressSanitizer(STI) {}
    758 
    759   ~X86AddressSanitizer64() override {}
    760 
    761   unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
    762     unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
    763     if (FrameReg == X86::NoRegister)
    764       return FrameReg;
    765     return getX86SubSuperRegister(FrameReg, 64);
    766   }
    767 
    768   void SpillReg(MCStreamer &Out, unsigned Reg) {
    769     EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
    770     OrigSPOffset -= 8;
    771   }
    772 
    773   void RestoreReg(MCStreamer &Out, unsigned Reg) {
    774     EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
    775     OrigSPOffset += 8;
    776   }
    777 
    778   void StoreFlags(MCStreamer &Out) {
    779     EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
    780     OrigSPOffset -= 8;
    781   }
    782 
    783   void RestoreFlags(MCStreamer &Out) {
    784     EmitInstruction(Out, MCInstBuilder(X86::POPF64));
    785     OrigSPOffset += 8;
    786   }
    787 
    788   void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
    789                                     MCContext &Ctx,
    790                                     MCStreamer &Out) override {
    791     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
    792     assert(LocalFrameReg != X86::NoRegister);
    793 
    794     const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
    795     unsigned FrameReg = GetFrameReg(Ctx, Out);
    796     if (MRI && FrameReg != X86::NoRegister) {
    797       SpillReg(Out, X86::RBP);
    798       if (FrameReg == X86::RSP) {
    799         Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
    800         Out.EmitCFIRelOffset(
    801             MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
    802       }
    803       EmitInstruction(
    804           Out,
    805           MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
    806       Out.EmitCFIRememberState();
    807       Out.EmitCFIDefCfaRegister(
    808           MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
    809     }
    810 
    811     EmitAdjustRSP(Ctx, Out, -128);
    812     SpillReg(Out, RegCtx.ShadowReg(64));
    813     SpillReg(Out, RegCtx.AddressReg(64));
    814     if (RegCtx.ScratchReg(64) != X86::NoRegister)
    815       SpillReg(Out, RegCtx.ScratchReg(64));
    816     StoreFlags(Out);
    817   }
    818 
    819   void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
    820                                     MCContext &Ctx,
    821                                     MCStreamer &Out) override {
    822     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
    823     assert(LocalFrameReg != X86::NoRegister);
    824 
    825     RestoreFlags(Out);
    826     if (RegCtx.ScratchReg(64) != X86::NoRegister)
    827       RestoreReg(Out, RegCtx.ScratchReg(64));
    828     RestoreReg(Out, RegCtx.AddressReg(64));
    829     RestoreReg(Out, RegCtx.ShadowReg(64));
    830     EmitAdjustRSP(Ctx, Out, 128);
    831 
    832     unsigned FrameReg = GetFrameReg(Ctx, Out);
    833     if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
    834       RestoreReg(Out, LocalFrameReg);
    835       Out.EmitCFIRestoreState();
    836       if (FrameReg == X86::RSP)
    837         Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
    838     }
    839   }
    840 
    841   void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
    842                                  bool IsWrite,
    843                                  const RegisterContext &RegCtx,
    844                                  MCContext &Ctx,
    845                                  MCStreamer &Out) override;
    846   void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
    847                                  bool IsWrite,
    848                                  const RegisterContext &RegCtx,
    849                                  MCContext &Ctx,
    850                                  MCStreamer &Out) override;
    851   void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
    852                           MCStreamer &Out) override;
    853 
    854 private:
    855   void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
    856     const MCExpr *Disp = MCConstantExpr::create(Offset, Ctx);
    857     std::unique_ptr<X86Operand> Op(
    858         X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
    859                               SMLoc(), SMLoc()));
    860     EmitLEA(*Op, 64, X86::RSP, Out);
    861     OrigSPOffset += Offset;
    862   }
    863 
    864   void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
    865                           MCStreamer &Out, const RegisterContext &RegCtx) {
    866     EmitInstruction(Out, MCInstBuilder(X86::CLD));
    867     EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
    868 
    869     EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
    870                              .addReg(X86::RSP)
    871                              .addReg(X86::RSP)
    872                              .addImm(-16));
    873 
    874     if (RegCtx.AddressReg(64) != X86::RDI) {
    875       EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
    876                                RegCtx.AddressReg(64)));
    877     }
    878     MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
    879                                             (IsWrite ? "store" : "load") +
    880                                             llvm::Twine(AccessSize));
    881     const MCSymbolRefExpr *FnExpr =
    882         MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
    883     EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
    884   }
    885 };
    886 
    887 void X86AddressSanitizer64::InstrumentMemOperandSmall(
    888     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    889     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    890   unsigned AddressRegI64 = RegCtx.AddressReg(64);
    891   unsigned AddressRegI32 = RegCtx.AddressReg(32);
    892   unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
    893   unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
    894   unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
    895 
    896   assert(RegCtx.ScratchReg(32) != X86::NoRegister);
    897   unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
    898 
    899   ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
    900 
    901   EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
    902                            AddressRegI64));
    903   EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
    904                            .addReg(ShadowRegI64)
    905                            .addReg(ShadowRegI64)
    906                            .addImm(3));
    907   {
    908     MCInst Inst;
    909     Inst.setOpcode(X86::MOV8rm);
    910     Inst.addOperand(MCOperand::createReg(ShadowRegI8));
    911     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    912     std::unique_ptr<X86Operand> Op(
    913         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
    914                               SMLoc(), SMLoc()));
    915     Op->addMemOperands(Inst, 5);
    916     EmitInstruction(Out, Inst);
    917   }
    918 
    919   EmitInstruction(
    920       Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
    921   MCSymbol *DoneSym = Ctx.createTempSymbol();
    922   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    923   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    924 
    925   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
    926                            AddressRegI32));
    927   EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
    928                            .addReg(ScratchRegI32)
    929                            .addReg(ScratchRegI32)
    930                            .addImm(7));
    931 
    932   switch (AccessSize) {
    933   default: llvm_unreachable("Incorrect access size");
    934   case 1:
    935     break;
    936   case 2: {
    937     const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
    938     std::unique_ptr<X86Operand> Op(
    939         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
    940                               SMLoc(), SMLoc()));
    941     EmitLEA(*Op, 32, ScratchRegI32, Out);
    942     break;
    943   }
    944   case 4:
    945     EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
    946                              .addReg(ScratchRegI32)
    947                              .addReg(ScratchRegI32)
    948                              .addImm(3));
    949     break;
    950   }
    951 
    952   EmitInstruction(
    953       Out,
    954       MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
    955   EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
    956                            ShadowRegI32));
    957   EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
    958 
    959   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
    960   EmitLabel(Out, DoneSym);
    961 }
    962 
    963 void X86AddressSanitizer64::InstrumentMemOperandLarge(
    964     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    965     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    966   unsigned AddressRegI64 = RegCtx.AddressReg(64);
    967   unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
    968 
    969   ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
    970 
    971   EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
    972                            AddressRegI64));
    973   EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
    974                            .addReg(ShadowRegI64)
    975                            .addReg(ShadowRegI64)
    976                            .addImm(3));
    977   {
    978     MCInst Inst;
    979     switch (AccessSize) {
    980     default: llvm_unreachable("Incorrect access size");
    981     case 8:
    982       Inst.setOpcode(X86::CMP8mi);
    983       break;
    984     case 16:
    985       Inst.setOpcode(X86::CMP16mi);
    986       break;
    987     }
    988     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    989     std::unique_ptr<X86Operand> Op(
    990         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
    991                               SMLoc(), SMLoc()));
    992     Op->addMemOperands(Inst, 5);
    993     Inst.addOperand(MCOperand::createImm(0));
    994     EmitInstruction(Out, Inst);
    995   }
    996 
    997   MCSymbol *DoneSym = Ctx.createTempSymbol();
    998   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    999   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
   1000 
   1001   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
   1002   EmitLabel(Out, DoneSym);
   1003 }
   1004 
   1005 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
   1006                                                MCContext &Ctx,
   1007                                                MCStreamer &Out) {
   1008   StoreFlags(Out);
   1009 
   1010   // No need to test when RCX is equals to zero.
   1011   MCSymbol *DoneSym = Ctx.createTempSymbol();
   1012   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
   1013   EmitInstruction(
   1014       Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
   1015   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
   1016 
   1017   // Instrument first and last elements in src and dst range.
   1018   InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
   1019                      X86::RCX /* CntReg */, AccessSize, Ctx, Out);
   1020 
   1021   EmitLabel(Out, DoneSym);
   1022   RestoreFlags(Out);
   1023 }
   1024 
   1025 } // End anonymous namespace
   1026 
   1027 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo *&STI)
   1028     : STI(STI), InitialFrameReg(0) {}
   1029 
   1030 X86AsmInstrumentation::~X86AsmInstrumentation() {}
   1031 
   1032 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
   1033     const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
   1034     const MCInstrInfo &MII, MCStreamer &Out) {
   1035   EmitInstruction(Out, Inst);
   1036 }
   1037 
   1038 void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out,
   1039                                             const MCInst &Inst) {
   1040   Out.EmitInstruction(Inst, *STI);
   1041 }
   1042 
   1043 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
   1044                                                    MCStreamer &Out) {
   1045   if (!Out.getNumFrameInfos()) // No active dwarf frame
   1046     return X86::NoRegister;
   1047   const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
   1048   if (Frame.End) // Active dwarf frame is closed
   1049     return X86::NoRegister;
   1050   const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
   1051   if (!MRI) // No register info
   1052     return X86::NoRegister;
   1053 
   1054   if (InitialFrameReg) {
   1055     // FrameReg is set explicitly, we're instrumenting a MachineFunction.
   1056     return InitialFrameReg;
   1057   }
   1058 
   1059   return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
   1060 }
   1061 
   1062 X86AsmInstrumentation *
   1063 CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
   1064                             const MCContext &Ctx, const MCSubtargetInfo *&STI) {
   1065   Triple T(STI->getTargetTriple());
   1066   const bool hasCompilerRTSupport = T.isOSLinux();
   1067   if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
   1068       MCOptions.SanitizeAddress) {
   1069     if (STI->getFeatureBits()[X86::Mode32Bit] != 0)
   1070       return new X86AddressSanitizer32(STI);
   1071     if (STI->getFeatureBits()[X86::Mode64Bit] != 0)
   1072       return new X86AddressSanitizer64(STI);
   1073   }
   1074   return new X86AsmInstrumentation(STI);
   1075 }
   1076 
   1077 } // end llvm namespace
   1078