Home | History | Annotate | Download | only in AsmParser
      1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "MCTargetDesc/X86BaseInfo.h"
     11 #include "X86AsmInstrumentation.h"
     12 #include "X86Operand.h"
     13 #include "X86RegisterInfo.h"
     14 #include "llvm/ADT/StringExtras.h"
     15 #include "llvm/ADT/Triple.h"
     16 #include "llvm/CodeGen/MachineValueType.h"
     17 #include "llvm/MC/MCAsmInfo.h"
     18 #include "llvm/MC/MCContext.h"
     19 #include "llvm/MC/MCInst.h"
     20 #include "llvm/MC/MCInstBuilder.h"
     21 #include "llvm/MC/MCInstrInfo.h"
     22 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
     23 #include "llvm/MC/MCStreamer.h"
     24 #include "llvm/MC/MCSubtargetInfo.h"
     25 #include "llvm/MC/MCTargetAsmParser.h"
     26 #include "llvm/MC/MCTargetOptions.h"
     27 #include "llvm/Support/CommandLine.h"
     28 #include <algorithm>
     29 #include <cassert>
     30 #include <vector>
     31 
     32 // Following comment describes how assembly instrumentation works.
     33 // Currently we have only AddressSanitizer instrumentation, but we're
     34 // planning to implement MemorySanitizer for inline assembly too. If
     35 // you're not familiar with AddressSanitizer algorithm, please, read
     36 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
     37 //
     38 // When inline assembly is parsed by an instance of X86AsmParser, all
     39 // instructions are emitted via EmitInstruction method. That's the
     40 // place where X86AsmInstrumentation analyzes an instruction and
     41 // decides, whether the instruction should be emitted as is or
     42 // instrumentation is required. The latter case happens when an
     43 // instruction reads from or writes to memory. Now instruction opcode
     44 // is explicitly checked, and if an instruction has a memory operand
     45 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
     46 // instrumented.  There're also exist instructions that modify
     47 // memory but don't have an explicit memory operands, for instance,
     48 // movs.
     49 //
     50 // Let's consider at first 8-byte memory accesses when an instruction
     51 // has an explicit memory operand. In this case we need two registers -
     52 // AddressReg to compute address of a memory cells which are accessed
     53 // and ShadowReg to compute corresponding shadow address. So, we need
     54 // to spill both registers before instrumentation code and restore them
     55 // after instrumentation. Thus, in general, instrumentation code will
     56 // look like this:
     57 // PUSHF  # Store flags, otherwise they will be overwritten
     58 // PUSH AddressReg  # spill AddressReg
     59 // PUSH ShadowReg   # spill ShadowReg
     60 // LEA MemOp, AddressReg  # compute address of the memory operand
     61 // MOV AddressReg, ShadowReg
     62 // SHR ShadowReg, 3
     63 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
     64 // # corresponding to MemOp.
     65 // CMP ShadowOffset(ShadowReg), 0  # test shadow value
     66 // JZ .Done  # when shadow equals to zero, everything is fine
     67 // MOV AddressReg, RDI
     68 // # Call __asan_report function with AddressReg as an argument
     69 // CALL __asan_report
     70 // .Done:
     71 // POP ShadowReg  # Restore ShadowReg
     72 // POP AddressReg  # Restore AddressReg
     73 // POPF  # Restore flags
     74 //
     75 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
     76 // handled in a similar manner, but small memory accesses (less than 8
     77 // byte) require an additional ScratchReg, which is used for shadow value.
     78 //
     79 // If, suppose, we're instrumenting an instruction like movs, only
     80 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
     81 // RCX are checked.  In this case there're no need to spill and restore
     82 // AddressReg , ShadowReg or flags four times, they're saved on stack
     83 // just once, before instrumentation of these four addresses, and restored
     84 // at the end of the instrumentation.
     85 //
     86 // There exist several things which complicate this simple algorithm.
     87 // * Instrumented memory operand can have RSP as a base or an index
     88 //   register.  So we need to add a constant offset before computation
     89 //   of memory address, since flags, AddressReg, ShadowReg, etc. were
     90 //   already stored on stack and RSP was modified.
     91 // * Debug info (usually, DWARF) should be adjusted, because sometimes
     92 //   RSP is used as a frame register. So, we need to select some
     93 //   register as a frame register and temprorary override current CFA
     94 //   register.
     95 
     96 namespace llvm {
     97 namespace {
     98 
     99 static cl::opt<bool> ClAsanInstrumentAssembly(
    100     "asan-instrument-assembly",
    101     cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
    102     cl::init(false));
    103 
    104 const int64_t MinAllowedDisplacement = std::numeric_limits<int32_t>::min();
    105 const int64_t MaxAllowedDisplacement = std::numeric_limits<int32_t>::max();
    106 
    107 int64_t ApplyDisplacementBounds(int64_t Displacement) {
    108   return std::max(std::min(MaxAllowedDisplacement, Displacement),
    109                   MinAllowedDisplacement);
    110 }
    111 
    112 void CheckDisplacementBounds(int64_t Displacement) {
    113   assert(Displacement >= MinAllowedDisplacement &&
    114          Displacement <= MaxAllowedDisplacement);
    115 }
    116 
    117 bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
    118 
    119 bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
    120 
    121 class X86AddressSanitizer : public X86AsmInstrumentation {
    122 public:
    123   struct RegisterContext {
    124   private:
    125     enum RegOffset {
    126       REG_OFFSET_ADDRESS = 0,
    127       REG_OFFSET_SHADOW,
    128       REG_OFFSET_SCRATCH
    129     };
    130 
    131   public:
    132     RegisterContext(unsigned AddressReg, unsigned ShadowReg,
    133                     unsigned ScratchReg) {
    134       BusyRegs.push_back(convReg(AddressReg, MVT::i64));
    135       BusyRegs.push_back(convReg(ShadowReg, MVT::i64));
    136       BusyRegs.push_back(convReg(ScratchReg, MVT::i64));
    137     }
    138 
    139     unsigned AddressReg(MVT::SimpleValueType VT) const {
    140       return convReg(BusyRegs[REG_OFFSET_ADDRESS], VT);
    141     }
    142 
    143     unsigned ShadowReg(MVT::SimpleValueType VT) const {
    144       return convReg(BusyRegs[REG_OFFSET_SHADOW], VT);
    145     }
    146 
    147     unsigned ScratchReg(MVT::SimpleValueType VT) const {
    148       return convReg(BusyRegs[REG_OFFSET_SCRATCH], VT);
    149     }
    150 
    151     void AddBusyReg(unsigned Reg) {
    152       if (Reg != X86::NoRegister)
    153         BusyRegs.push_back(convReg(Reg, MVT::i64));
    154     }
    155 
    156     void AddBusyRegs(const X86Operand &Op) {
    157       AddBusyReg(Op.getMemBaseReg());
    158       AddBusyReg(Op.getMemIndexReg());
    159     }
    160 
    161     unsigned ChooseFrameReg(MVT::SimpleValueType VT) const {
    162       static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
    163                                               X86::RCX, X86::RDX, X86::RDI,
    164                                               X86::RSI };
    165       for (unsigned Reg : Candidates) {
    166         if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
    167           return convReg(Reg, VT);
    168       }
    169       return X86::NoRegister;
    170     }
    171 
    172   private:
    173     unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const {
    174       return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT);
    175     }
    176 
    177     std::vector<unsigned> BusyRegs;
    178   };
    179 
    180   X86AddressSanitizer(const MCSubtargetInfo *&STI)
    181       : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
    182 
    183   ~X86AddressSanitizer() override {}
    184 
    185   // X86AsmInstrumentation implementation:
    186   void InstrumentAndEmitInstruction(const MCInst &Inst,
    187                                     OperandVector &Operands,
    188                                     MCContext &Ctx,
    189                                     const MCInstrInfo &MII,
    190                                     MCStreamer &Out) override {
    191     InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
    192     if (RepPrefix)
    193       EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
    194 
    195     InstrumentMOV(Inst, Operands, Ctx, MII, Out);
    196 
    197     RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
    198     if (!RepPrefix)
    199       EmitInstruction(Out, Inst);
    200   }
    201 
    202   // Adjusts up stack and saves all registers used in instrumentation.
    203   virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
    204                                             MCContext &Ctx,
    205                                             MCStreamer &Out) = 0;
    206 
    207   // Restores all registers used in instrumentation and adjusts stack.
    208   virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
    209                                             MCContext &Ctx,
    210                                             MCStreamer &Out) = 0;
    211 
    212   virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
    213                                          bool IsWrite,
    214                                          const RegisterContext &RegCtx,
    215                                          MCContext &Ctx, MCStreamer &Out) = 0;
    216   virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
    217                                          bool IsWrite,
    218                                          const RegisterContext &RegCtx,
    219                                          MCContext &Ctx, MCStreamer &Out) = 0;
    220 
    221   virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
    222                                   MCStreamer &Out) = 0;
    223 
    224   void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
    225                             const RegisterContext &RegCtx, MCContext &Ctx,
    226                             MCStreamer &Out);
    227   void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
    228                           unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
    229 
    230   void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
    231                       MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
    232   void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
    233                      MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
    234 
    235 protected:
    236   void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
    237 
    238   void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg,
    239                MCStreamer &Out) {
    240     assert(VT == MVT::i32 || VT == MVT::i64);
    241     MCInst Inst;
    242     Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r);
    243     Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, VT)));
    244     Op.addMemOperands(Inst, 5);
    245     EmitInstruction(Out, Inst);
    246   }
    247 
    248   void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT,
    249                                 unsigned Reg, MCContext &Ctx, MCStreamer &Out);
    250 
    251   // Creates new memory operand with Displacement added to an original
    252   // displacement. Residue will contain a residue which could happen when the
    253   // total displacement exceeds 32-bit limitation.
    254   std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
    255                                               int64_t Displacement,
    256                                               MCContext &Ctx, int64_t *Residue);
    257 
    258   bool is64BitMode() const {
    259     return STI->getFeatureBits()[X86::Mode64Bit];
    260   }
    261   bool is32BitMode() const {
    262     return STI->getFeatureBits()[X86::Mode32Bit];
    263   }
    264   bool is16BitMode() const {
    265     return STI->getFeatureBits()[X86::Mode16Bit];
    266   }
    267 
    268   unsigned getPointerWidth() {
    269     if (is16BitMode()) return 16;
    270     if (is32BitMode()) return 32;
    271     if (is64BitMode()) return 64;
    272     llvm_unreachable("invalid mode");
    273   }
    274 
    275   // True when previous instruction was actually REP prefix.
    276   bool RepPrefix;
    277 
    278   // Offset from the original SP register.
    279   int64_t OrigSPOffset;
    280 };
    281 
    282 void X86AddressSanitizer::InstrumentMemOperand(
    283     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    284     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    285   assert(Op.isMem() && "Op should be a memory operand.");
    286   assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
    287          "AccessSize should be a power of two, less or equal than 16.");
    288   // FIXME: take into account load/store alignment.
    289   if (IsSmallMemAccess(AccessSize))
    290     InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
    291   else
    292     InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
    293 }
    294 
    295 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
    296                                              unsigned CntReg,
    297                                              unsigned AccessSize,
    298                                              MCContext &Ctx, MCStreamer &Out) {
    299   // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
    300   // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
    301   RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
    302                          IsSmallMemAccess(AccessSize)
    303                              ? X86::RBX
    304                              : X86::NoRegister /* ScratchReg */);
    305   RegCtx.AddBusyReg(DstReg);
    306   RegCtx.AddBusyReg(SrcReg);
    307   RegCtx.AddBusyReg(CntReg);
    308 
    309   InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
    310 
    311   // Test (%SrcReg)
    312   {
    313     const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
    314     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    315         getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
    316     InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
    317                          Out);
    318   }
    319 
    320   // Test -1(%SrcReg, %CntReg, AccessSize)
    321   {
    322     const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
    323     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    324         getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
    325         SMLoc()));
    326     InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
    327                          Out);
    328   }
    329 
    330   // Test (%DstReg)
    331   {
    332     const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
    333     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    334         getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
    335     InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
    336   }
    337 
    338   // Test -1(%DstReg, %CntReg, AccessSize)
    339   {
    340     const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
    341     std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
    342         getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
    343         SMLoc()));
    344     InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
    345   }
    346 
    347   InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
    348 }
    349 
    350 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
    351                                          OperandVector &Operands,
    352                                          MCContext &Ctx, const MCInstrInfo &MII,
    353                                          MCStreamer &Out) {
    354   // Access size in bytes.
    355   unsigned AccessSize = 0;
    356 
    357   switch (Inst.getOpcode()) {
    358   case X86::MOVSB:
    359     AccessSize = 1;
    360     break;
    361   case X86::MOVSW:
    362     AccessSize = 2;
    363     break;
    364   case X86::MOVSL:
    365     AccessSize = 4;
    366     break;
    367   case X86::MOVSQ:
    368     AccessSize = 8;
    369     break;
    370   default:
    371     return;
    372   }
    373 
    374   InstrumentMOVSImpl(AccessSize, Ctx, Out);
    375 }
    376 
    377 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
    378                                         OperandVector &Operands, MCContext &Ctx,
    379                                         const MCInstrInfo &MII,
    380                                         MCStreamer &Out) {
    381   // Access size in bytes.
    382   unsigned AccessSize = 0;
    383 
    384   switch (Inst.getOpcode()) {
    385   case X86::MOV8mi:
    386   case X86::MOV8mr:
    387   case X86::MOV8rm:
    388     AccessSize = 1;
    389     break;
    390   case X86::MOV16mi:
    391   case X86::MOV16mr:
    392   case X86::MOV16rm:
    393     AccessSize = 2;
    394     break;
    395   case X86::MOV32mi:
    396   case X86::MOV32mr:
    397   case X86::MOV32rm:
    398     AccessSize = 4;
    399     break;
    400   case X86::MOV64mi32:
    401   case X86::MOV64mr:
    402   case X86::MOV64rm:
    403     AccessSize = 8;
    404     break;
    405   case X86::MOVAPDmr:
    406   case X86::MOVAPSmr:
    407   case X86::MOVAPDrm:
    408   case X86::MOVAPSrm:
    409     AccessSize = 16;
    410     break;
    411   default:
    412     return;
    413   }
    414 
    415   const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
    416 
    417   for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
    418     assert(Operands[Ix]);
    419     MCParsedAsmOperand &Op = *Operands[Ix];
    420     if (Op.isMem()) {
    421       X86Operand &MemOp = static_cast<X86Operand &>(Op);
    422       RegisterContext RegCtx(
    423           X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
    424           IsSmallMemAccess(AccessSize) ? X86::RCX
    425                                        : X86::NoRegister /* ScratchReg */);
    426       RegCtx.AddBusyRegs(MemOp);
    427       InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
    428       InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
    429       InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
    430     }
    431   }
    432 }
    433 
    434 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
    435                                                    MVT::SimpleValueType VT,
    436                                                    unsigned Reg, MCContext &Ctx,
    437                                                    MCStreamer &Out) {
    438   int64_t Displacement = 0;
    439   if (IsStackReg(Op.getMemBaseReg()))
    440     Displacement -= OrigSPOffset;
    441   if (IsStackReg(Op.getMemIndexReg()))
    442     Displacement -= OrigSPOffset * Op.getMemScale();
    443 
    444   assert(Displacement >= 0);
    445 
    446   // Emit Op as is.
    447   if (Displacement == 0) {
    448     EmitLEA(Op, VT, Reg, Out);
    449     return;
    450   }
    451 
    452   int64_t Residue;
    453   std::unique_ptr<X86Operand> NewOp =
    454       AddDisplacement(Op, Displacement, Ctx, &Residue);
    455   EmitLEA(*NewOp, VT, Reg, Out);
    456 
    457   while (Residue != 0) {
    458     const MCConstantExpr *Disp =
    459         MCConstantExpr::create(ApplyDisplacementBounds(Residue), Ctx);
    460     std::unique_ptr<X86Operand> DispOp =
    461         X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
    462                               SMLoc());
    463     EmitLEA(*DispOp, VT, Reg, Out);
    464     Residue -= Disp->getValue();
    465   }
    466 }
    467 
    468 std::unique_ptr<X86Operand>
    469 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
    470                                      MCContext &Ctx, int64_t *Residue) {
    471   assert(Displacement >= 0);
    472 
    473   if (Displacement == 0 ||
    474       (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
    475     *Residue = Displacement;
    476     return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
    477                                  Op.getMemDisp(), Op.getMemBaseReg(),
    478                                  Op.getMemIndexReg(), Op.getMemScale(),
    479                                  SMLoc(), SMLoc());
    480   }
    481 
    482   int64_t OrigDisplacement =
    483       static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
    484   CheckDisplacementBounds(OrigDisplacement);
    485   Displacement += OrigDisplacement;
    486 
    487   int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
    488   CheckDisplacementBounds(NewDisplacement);
    489 
    490   *Residue = Displacement - NewDisplacement;
    491   const MCExpr *Disp = MCConstantExpr::create(NewDisplacement, Ctx);
    492   return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
    493                                Op.getMemBaseReg(), Op.getMemIndexReg(),
    494                                Op.getMemScale(), SMLoc(), SMLoc());
    495 }
    496 
    497 class X86AddressSanitizer32 : public X86AddressSanitizer {
    498 public:
    499   static const long kShadowOffset = 0x20000000;
    500 
    501   X86AddressSanitizer32(const MCSubtargetInfo *&STI)
    502       : X86AddressSanitizer(STI) {}
    503 
    504   ~X86AddressSanitizer32() override {}
    505 
    506   unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
    507     unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
    508     if (FrameReg == X86::NoRegister)
    509       return FrameReg;
    510     return getX86SubSuperRegister(FrameReg, MVT::i32);
    511   }
    512 
    513   void SpillReg(MCStreamer &Out, unsigned Reg) {
    514     EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
    515     OrigSPOffset -= 4;
    516   }
    517 
    518   void RestoreReg(MCStreamer &Out, unsigned Reg) {
    519     EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
    520     OrigSPOffset += 4;
    521   }
    522 
    523   void StoreFlags(MCStreamer &Out) {
    524     EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
    525     OrigSPOffset -= 4;
    526   }
    527 
    528   void RestoreFlags(MCStreamer &Out) {
    529     EmitInstruction(Out, MCInstBuilder(X86::POPF32));
    530     OrigSPOffset += 4;
    531   }
    532 
    533   void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
    534                                     MCContext &Ctx,
    535                                     MCStreamer &Out) override {
    536     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
    537     assert(LocalFrameReg != X86::NoRegister);
    538 
    539     const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
    540     unsigned FrameReg = GetFrameReg(Ctx, Out);
    541     if (MRI && FrameReg != X86::NoRegister) {
    542       SpillReg(Out, LocalFrameReg);
    543       if (FrameReg == X86::ESP) {
    544         Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
    545         Out.EmitCFIRelOffset(
    546             MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
    547       }
    548       EmitInstruction(
    549           Out,
    550           MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
    551       Out.EmitCFIRememberState();
    552       Out.EmitCFIDefCfaRegister(
    553           MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
    554     }
    555 
    556     SpillReg(Out, RegCtx.AddressReg(MVT::i32));
    557     SpillReg(Out, RegCtx.ShadowReg(MVT::i32));
    558     if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
    559       SpillReg(Out, RegCtx.ScratchReg(MVT::i32));
    560     StoreFlags(Out);
    561   }
    562 
    563   void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
    564                                     MCContext &Ctx,
    565                                     MCStreamer &Out) override {
    566     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
    567     assert(LocalFrameReg != X86::NoRegister);
    568 
    569     RestoreFlags(Out);
    570     if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
    571       RestoreReg(Out, RegCtx.ScratchReg(MVT::i32));
    572     RestoreReg(Out, RegCtx.ShadowReg(MVT::i32));
    573     RestoreReg(Out, RegCtx.AddressReg(MVT::i32));
    574 
    575     unsigned FrameReg = GetFrameReg(Ctx, Out);
    576     if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
    577       RestoreReg(Out, LocalFrameReg);
    578       Out.EmitCFIRestoreState();
    579       if (FrameReg == X86::ESP)
    580         Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
    581     }
    582   }
    583 
    584   void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
    585                                  bool IsWrite,
    586                                  const RegisterContext &RegCtx,
    587                                  MCContext &Ctx,
    588                                  MCStreamer &Out) override;
    589   void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
    590                                  bool IsWrite,
    591                                  const RegisterContext &RegCtx,
    592                                  MCContext &Ctx,
    593                                  MCStreamer &Out) override;
    594   void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
    595                           MCStreamer &Out) override;
    596 
    597 private:
    598   void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
    599                           MCStreamer &Out, const RegisterContext &RegCtx) {
    600     EmitInstruction(Out, MCInstBuilder(X86::CLD));
    601     EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
    602 
    603     EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
    604                              .addReg(X86::ESP)
    605                              .addReg(X86::ESP)
    606                              .addImm(-16));
    607     EmitInstruction(
    608         Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(MVT::i32)));
    609 
    610     MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
    611                                             (IsWrite ? "store" : "load") +
    612                                             llvm::Twine(AccessSize));
    613     const MCSymbolRefExpr *FnExpr =
    614         MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
    615     EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
    616   }
    617 };
    618 
    619 void X86AddressSanitizer32::InstrumentMemOperandSmall(
    620     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    621     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    622   unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
    623   unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
    624   unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
    625 
    626   assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
    627   unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
    628 
    629   ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
    630 
    631   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
    632                            AddressRegI32));
    633   EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
    634                            .addReg(ShadowRegI32)
    635                            .addReg(ShadowRegI32)
    636                            .addImm(3));
    637 
    638   {
    639     MCInst Inst;
    640     Inst.setOpcode(X86::MOV8rm);
    641     Inst.addOperand(MCOperand::createReg(ShadowRegI8));
    642     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    643     std::unique_ptr<X86Operand> Op(
    644         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
    645                               SMLoc(), SMLoc()));
    646     Op->addMemOperands(Inst, 5);
    647     EmitInstruction(Out, Inst);
    648   }
    649 
    650   EmitInstruction(
    651       Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
    652   MCSymbol *DoneSym = Ctx.createTempSymbol();
    653   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    654   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    655 
    656   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
    657                            AddressRegI32));
    658   EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
    659                            .addReg(ScratchRegI32)
    660                            .addReg(ScratchRegI32)
    661                            .addImm(7));
    662 
    663   switch (AccessSize) {
    664   default: llvm_unreachable("Incorrect access size");
    665   case 1:
    666     break;
    667   case 2: {
    668     const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
    669     std::unique_ptr<X86Operand> Op(
    670         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
    671                               SMLoc(), SMLoc()));
    672     EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
    673     break;
    674   }
    675   case 4:
    676     EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
    677                              .addReg(ScratchRegI32)
    678                              .addReg(ScratchRegI32)
    679                              .addImm(3));
    680     break;
    681   }
    682 
    683   EmitInstruction(
    684       Out,
    685       MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
    686   EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
    687                            ShadowRegI32));
    688   EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
    689 
    690   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
    691   EmitLabel(Out, DoneSym);
    692 }
    693 
    694 void X86AddressSanitizer32::InstrumentMemOperandLarge(
    695     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    696     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    697   unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
    698   unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
    699 
    700   ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
    701 
    702   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
    703                            AddressRegI32));
    704   EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
    705                            .addReg(ShadowRegI32)
    706                            .addReg(ShadowRegI32)
    707                            .addImm(3));
    708   {
    709     MCInst Inst;
    710     switch (AccessSize) {
    711     default: llvm_unreachable("Incorrect access size");
    712     case 8:
    713       Inst.setOpcode(X86::CMP8mi);
    714       break;
    715     case 16:
    716       Inst.setOpcode(X86::CMP16mi);
    717       break;
    718     }
    719     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    720     std::unique_ptr<X86Operand> Op(
    721         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
    722                               SMLoc(), SMLoc()));
    723     Op->addMemOperands(Inst, 5);
    724     Inst.addOperand(MCOperand::createImm(0));
    725     EmitInstruction(Out, Inst);
    726   }
    727   MCSymbol *DoneSym = Ctx.createTempSymbol();
    728   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    729   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    730 
    731   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
    732   EmitLabel(Out, DoneSym);
    733 }
    734 
    735 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
    736                                                MCContext &Ctx,
    737                                                MCStreamer &Out) {
    738   StoreFlags(Out);
    739 
    740   // No need to test when ECX is equals to zero.
    741   MCSymbol *DoneSym = Ctx.createTempSymbol();
    742   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    743   EmitInstruction(
    744       Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
    745   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    746 
    747   // Instrument first and last elements in src and dst range.
    748   InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
    749                      X86::ECX /* CntReg */, AccessSize, Ctx, Out);
    750 
    751   EmitLabel(Out, DoneSym);
    752   RestoreFlags(Out);
    753 }
    754 
    755 class X86AddressSanitizer64 : public X86AddressSanitizer {
    756 public:
    757   static const long kShadowOffset = 0x7fff8000;
    758 
    759   X86AddressSanitizer64(const MCSubtargetInfo *&STI)
    760       : X86AddressSanitizer(STI) {}
    761 
    762   ~X86AddressSanitizer64() override {}
    763 
    764   unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
    765     unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
    766     if (FrameReg == X86::NoRegister)
    767       return FrameReg;
    768     return getX86SubSuperRegister(FrameReg, MVT::i64);
    769   }
    770 
    771   void SpillReg(MCStreamer &Out, unsigned Reg) {
    772     EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
    773     OrigSPOffset -= 8;
    774   }
    775 
    776   void RestoreReg(MCStreamer &Out, unsigned Reg) {
    777     EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
    778     OrigSPOffset += 8;
    779   }
    780 
    781   void StoreFlags(MCStreamer &Out) {
    782     EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
    783     OrigSPOffset -= 8;
    784   }
    785 
    786   void RestoreFlags(MCStreamer &Out) {
    787     EmitInstruction(Out, MCInstBuilder(X86::POPF64));
    788     OrigSPOffset += 8;
    789   }
    790 
    791   void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
    792                                     MCContext &Ctx,
    793                                     MCStreamer &Out) override {
    794     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
    795     assert(LocalFrameReg != X86::NoRegister);
    796 
    797     const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
    798     unsigned FrameReg = GetFrameReg(Ctx, Out);
    799     if (MRI && FrameReg != X86::NoRegister) {
    800       SpillReg(Out, X86::RBP);
    801       if (FrameReg == X86::RSP) {
    802         Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
    803         Out.EmitCFIRelOffset(
    804             MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
    805       }
    806       EmitInstruction(
    807           Out,
    808           MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
    809       Out.EmitCFIRememberState();
    810       Out.EmitCFIDefCfaRegister(
    811           MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
    812     }
    813 
    814     EmitAdjustRSP(Ctx, Out, -128);
    815     SpillReg(Out, RegCtx.ShadowReg(MVT::i64));
    816     SpillReg(Out, RegCtx.AddressReg(MVT::i64));
    817     if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
    818       SpillReg(Out, RegCtx.ScratchReg(MVT::i64));
    819     StoreFlags(Out);
    820   }
    821 
    822   void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
    823                                     MCContext &Ctx,
    824                                     MCStreamer &Out) override {
    825     unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
    826     assert(LocalFrameReg != X86::NoRegister);
    827 
    828     RestoreFlags(Out);
    829     if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
    830       RestoreReg(Out, RegCtx.ScratchReg(MVT::i64));
    831     RestoreReg(Out, RegCtx.AddressReg(MVT::i64));
    832     RestoreReg(Out, RegCtx.ShadowReg(MVT::i64));
    833     EmitAdjustRSP(Ctx, Out, 128);
    834 
    835     unsigned FrameReg = GetFrameReg(Ctx, Out);
    836     if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
    837       RestoreReg(Out, LocalFrameReg);
    838       Out.EmitCFIRestoreState();
    839       if (FrameReg == X86::RSP)
    840         Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
    841     }
    842   }
    843 
    844   void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
    845                                  bool IsWrite,
    846                                  const RegisterContext &RegCtx,
    847                                  MCContext &Ctx,
    848                                  MCStreamer &Out) override;
    849   void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
    850                                  bool IsWrite,
    851                                  const RegisterContext &RegCtx,
    852                                  MCContext &Ctx,
    853                                  MCStreamer &Out) override;
    854   void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
    855                           MCStreamer &Out) override;
    856 
    857 private:
    858   void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
    859     const MCExpr *Disp = MCConstantExpr::create(Offset, Ctx);
    860     std::unique_ptr<X86Operand> Op(
    861         X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
    862                               SMLoc(), SMLoc()));
    863     EmitLEA(*Op, MVT::i64, X86::RSP, Out);
    864     OrigSPOffset += Offset;
    865   }
    866 
    867   void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
    868                           MCStreamer &Out, const RegisterContext &RegCtx) {
    869     EmitInstruction(Out, MCInstBuilder(X86::CLD));
    870     EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
    871 
    872     EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
    873                              .addReg(X86::RSP)
    874                              .addReg(X86::RSP)
    875                              .addImm(-16));
    876 
    877     if (RegCtx.AddressReg(MVT::i64) != X86::RDI) {
    878       EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
    879                                RegCtx.AddressReg(MVT::i64)));
    880     }
    881     MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
    882                                             (IsWrite ? "store" : "load") +
    883                                             llvm::Twine(AccessSize));
    884     const MCSymbolRefExpr *FnExpr =
    885         MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
    886     EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
    887   }
    888 };
    889 
    890 void X86AddressSanitizer64::InstrumentMemOperandSmall(
    891     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    892     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    893   unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
    894   unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
    895   unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
    896   unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
    897   unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
    898 
    899   assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
    900   unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
    901 
    902   ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
    903 
    904   EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
    905                            AddressRegI64));
    906   EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
    907                            .addReg(ShadowRegI64)
    908                            .addReg(ShadowRegI64)
    909                            .addImm(3));
    910   {
    911     MCInst Inst;
    912     Inst.setOpcode(X86::MOV8rm);
    913     Inst.addOperand(MCOperand::createReg(ShadowRegI8));
    914     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    915     std::unique_ptr<X86Operand> Op(
    916         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
    917                               SMLoc(), SMLoc()));
    918     Op->addMemOperands(Inst, 5);
    919     EmitInstruction(Out, Inst);
    920   }
    921 
    922   EmitInstruction(
    923       Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
    924   MCSymbol *DoneSym = Ctx.createTempSymbol();
    925   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
    926   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
    927 
    928   EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
    929                            AddressRegI32));
    930   EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
    931                            .addReg(ScratchRegI32)
    932                            .addReg(ScratchRegI32)
    933                            .addImm(7));
    934 
    935   switch (AccessSize) {
    936   default: llvm_unreachable("Incorrect access size");
    937   case 1:
    938     break;
    939   case 2: {
    940     const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
    941     std::unique_ptr<X86Operand> Op(
    942         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
    943                               SMLoc(), SMLoc()));
    944     EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
    945     break;
    946   }
    947   case 4:
    948     EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
    949                              .addReg(ScratchRegI32)
    950                              .addReg(ScratchRegI32)
    951                              .addImm(3));
    952     break;
    953   }
    954 
    955   EmitInstruction(
    956       Out,
    957       MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
    958   EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
    959                            ShadowRegI32));
    960   EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
    961 
    962   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
    963   EmitLabel(Out, DoneSym);
    964 }
    965 
    966 void X86AddressSanitizer64::InstrumentMemOperandLarge(
    967     X86Operand &Op, unsigned AccessSize, bool IsWrite,
    968     const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
    969   unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
    970   unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
    971 
    972   ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
    973 
    974   EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
    975                            AddressRegI64));
    976   EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
    977                            .addReg(ShadowRegI64)
    978                            .addReg(ShadowRegI64)
    979                            .addImm(3));
    980   {
    981     MCInst Inst;
    982     switch (AccessSize) {
    983     default: llvm_unreachable("Incorrect access size");
    984     case 8:
    985       Inst.setOpcode(X86::CMP8mi);
    986       break;
    987     case 16:
    988       Inst.setOpcode(X86::CMP16mi);
    989       break;
    990     }
    991     const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
    992     std::unique_ptr<X86Operand> Op(
    993         X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
    994                               SMLoc(), SMLoc()));
    995     Op->addMemOperands(Inst, 5);
    996     Inst.addOperand(MCOperand::createImm(0));
    997     EmitInstruction(Out, Inst);
    998   }
    999 
   1000   MCSymbol *DoneSym = Ctx.createTempSymbol();
   1001   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
   1002   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
   1003 
   1004   EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
   1005   EmitLabel(Out, DoneSym);
   1006 }
   1007 
   1008 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
   1009                                                MCContext &Ctx,
   1010                                                MCStreamer &Out) {
   1011   StoreFlags(Out);
   1012 
   1013   // No need to test when RCX is equals to zero.
   1014   MCSymbol *DoneSym = Ctx.createTempSymbol();
   1015   const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
   1016   EmitInstruction(
   1017       Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
   1018   EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
   1019 
   1020   // Instrument first and last elements in src and dst range.
   1021   InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
   1022                      X86::RCX /* CntReg */, AccessSize, Ctx, Out);
   1023 
   1024   EmitLabel(Out, DoneSym);
   1025   RestoreFlags(Out);
   1026 }
   1027 
   1028 } // End anonymous namespace
   1029 
   1030 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo *&STI)
   1031     : STI(STI), InitialFrameReg(0) {}
   1032 
   1033 X86AsmInstrumentation::~X86AsmInstrumentation() {}
   1034 
   1035 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
   1036     const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
   1037     const MCInstrInfo &MII, MCStreamer &Out) {
   1038   EmitInstruction(Out, Inst);
   1039 }
   1040 
   1041 void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out,
   1042                                             const MCInst &Inst) {
   1043   Out.EmitInstruction(Inst, *STI);
   1044 }
   1045 
   1046 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
   1047                                                    MCStreamer &Out) {
   1048   if (!Out.getNumFrameInfos()) // No active dwarf frame
   1049     return X86::NoRegister;
   1050   const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
   1051   if (Frame.End) // Active dwarf frame is closed
   1052     return X86::NoRegister;
   1053   const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
   1054   if (!MRI) // No register info
   1055     return X86::NoRegister;
   1056 
   1057   if (InitialFrameReg) {
   1058     // FrameReg is set explicitly, we're instrumenting a MachineFunction.
   1059     return InitialFrameReg;
   1060   }
   1061 
   1062   return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
   1063 }
   1064 
   1065 X86AsmInstrumentation *
   1066 CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
   1067                             const MCContext &Ctx, const MCSubtargetInfo *&STI) {
   1068   Triple T(STI->getTargetTriple());
   1069   const bool hasCompilerRTSupport = T.isOSLinux();
   1070   if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
   1071       MCOptions.SanitizeAddress) {
   1072     if (STI->getFeatureBits()[X86::Mode32Bit] != 0)
   1073       return new X86AddressSanitizer32(STI);
   1074     if (STI->getFeatureBits()[X86::Mode64Bit] != 0)
   1075       return new X86AddressSanitizer64(STI);
   1076   }
   1077   return new X86AsmInstrumentation(STI);
   1078 }
   1079 
   1080 } // end llvm namespace
   1081