Home | History | Annotate | Download | only in AsmParser
      1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "MCTargetDesc/AArch64AddressingModes.h"
     11 #include "MCTargetDesc/AArch64MCExpr.h"
     12 #include "MCTargetDesc/AArch64TargetStreamer.h"
     13 #include "Utils/AArch64BaseInfo.h"
     14 #include "llvm/ADT/APInt.h"
     15 #include "llvm/ADT/STLExtras.h"
     16 #include "llvm/ADT/SmallVector.h"
     17 #include "llvm/ADT/StringSwitch.h"
     18 #include "llvm/ADT/Twine.h"
     19 #include "llvm/MC/MCContext.h"
     20 #include "llvm/MC/MCExpr.h"
     21 #include "llvm/MC/MCInst.h"
     22 #include "llvm/MC/MCObjectFileInfo.h"
     23 #include "llvm/MC/MCParser/MCAsmLexer.h"
     24 #include "llvm/MC/MCParser/MCAsmParser.h"
     25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
     26 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
     27 #include "llvm/MC/MCRegisterInfo.h"
     28 #include "llvm/MC/MCStreamer.h"
     29 #include "llvm/MC/MCSubtargetInfo.h"
     30 #include "llvm/MC/MCSymbol.h"
     31 #include "llvm/Support/ErrorHandling.h"
     32 #include "llvm/Support/SourceMgr.h"
     33 #include "llvm/Support/TargetParser.h"
     34 #include "llvm/Support/TargetRegistry.h"
     35 #include "llvm/Support/raw_ostream.h"
     36 #include <cstdio>
     37 using namespace llvm;
     38 
     39 namespace {
     40 
     41 class AArch64Operand;
     42 
     43 class AArch64AsmParser : public MCTargetAsmParser {
     44 private:
     45   StringRef Mnemonic; ///< Instruction mnemonic.
     46 
     47   // Map of register aliases registers via the .req directive.
     48   StringMap<std::pair<bool, unsigned> > RegisterReqs;
     49 
     50   AArch64TargetStreamer &getTargetStreamer() {
     51     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
     52     return static_cast<AArch64TargetStreamer &>(TS);
     53   }
     54 
     55   SMLoc getLoc() const { return getParser().getTok().getLoc(); }
     56 
     57   bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
     58   AArch64CC::CondCode parseCondCodeString(StringRef Cond);
     59   bool parseCondCode(OperandVector &Operands, bool invertCondCode);
     60   unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
     61   int tryParseRegister();
     62   int tryMatchVectorRegister(StringRef &Kind, bool expected);
     63   bool parseRegister(OperandVector &Operands);
     64   bool parseSymbolicImmVal(const MCExpr *&ImmVal);
     65   bool parseVectorList(OperandVector &Operands);
     66   bool parseOperand(OperandVector &Operands, bool isCondCode,
     67                     bool invertCondCode);
     68 
     69   void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
     70   bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
     71   bool showMatchError(SMLoc Loc, unsigned ErrCode);
     72 
     73   bool parseDirectiveArch(SMLoc L);
     74   bool parseDirectiveCPU(SMLoc L);
     75   bool parseDirectiveWord(unsigned Size, SMLoc L);
     76   bool parseDirectiveInst(SMLoc L);
     77 
     78   bool parseDirectiveTLSDescCall(SMLoc L);
     79 
     80   bool parseDirectiveLOH(StringRef LOH, SMLoc L);
     81   bool parseDirectiveLtorg(SMLoc L);
     82 
     83   bool parseDirectiveReq(StringRef Name, SMLoc L);
     84   bool parseDirectiveUnreq(SMLoc L);
     85 
     86   bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
     87   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
     88                                OperandVector &Operands, MCStreamer &Out,
     89                                uint64_t &ErrorInfo,
     90                                bool MatchingInlineAsm) override;
     91 /// @name Auto-generated Match Functions
     92 /// {
     93 
     94 #define GET_ASSEMBLER_HEADER
     95 #include "AArch64GenAsmMatcher.inc"
     96 
     97   /// }
     98 
     99   OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
    100   OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
    101   OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
    102   OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
    103   OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
    104   OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
    105   OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
    106   OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
    107   OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
    108   OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
    109   OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
    110   OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
    111   bool tryParseVectorRegister(OperandVector &Operands);
    112   OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
    113 
    114 public:
    115   enum AArch64MatchResultTy {
    116     Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
    117 #define GET_OPERAND_DIAGNOSTIC_TYPES
    118 #include "AArch64GenAsmMatcher.inc"
    119   };
    120   AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
    121                    const MCInstrInfo &MII, const MCTargetOptions &Options)
    122     : MCTargetAsmParser(Options, STI) {
    123     MCAsmParserExtension::Initialize(Parser);
    124     MCStreamer &S = getParser().getStreamer();
    125     if (S.getTargetStreamer() == nullptr)
    126       new AArch64TargetStreamer(S);
    127 
    128     // Initialize the set of available features.
    129     setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
    130   }
    131 
    132   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
    133                         SMLoc NameLoc, OperandVector &Operands) override;
    134   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
    135   bool ParseDirective(AsmToken DirectiveID) override;
    136   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
    137                                       unsigned Kind) override;
    138 
    139   static bool classifySymbolRef(const MCExpr *Expr,
    140                                 AArch64MCExpr::VariantKind &ELFRefKind,
    141                                 MCSymbolRefExpr::VariantKind &DarwinRefKind,
    142                                 int64_t &Addend);
    143 };
    144 } // end anonymous namespace
    145 
    146 namespace {
    147 
    148 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
    149 /// instruction.
    150 class AArch64Operand : public MCParsedAsmOperand {
    151 private:
    152   enum KindTy {
    153     k_Immediate,
    154     k_ShiftedImm,
    155     k_CondCode,
    156     k_Register,
    157     k_VectorList,
    158     k_VectorIndex,
    159     k_Token,
    160     k_SysReg,
    161     k_SysCR,
    162     k_Prefetch,
    163     k_ShiftExtend,
    164     k_FPImm,
    165     k_Barrier,
    166     k_PSBHint,
    167   } Kind;
    168 
    169   SMLoc StartLoc, EndLoc;
    170 
    171   struct TokOp {
    172     const char *Data;
    173     unsigned Length;
    174     bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
    175   };
    176 
    177   struct RegOp {
    178     unsigned RegNum;
    179     bool isVector;
    180   };
    181 
    182   struct VectorListOp {
    183     unsigned RegNum;
    184     unsigned Count;
    185     unsigned NumElements;
    186     unsigned ElementKind;
    187   };
    188 
    189   struct VectorIndexOp {
    190     unsigned Val;
    191   };
    192 
    193   struct ImmOp {
    194     const MCExpr *Val;
    195   };
    196 
    197   struct ShiftedImmOp {
    198     const MCExpr *Val;
    199     unsigned ShiftAmount;
    200   };
    201 
    202   struct CondCodeOp {
    203     AArch64CC::CondCode Code;
    204   };
    205 
    206   struct FPImmOp {
    207     unsigned Val; // Encoded 8-bit representation.
    208   };
    209 
    210   struct BarrierOp {
    211     unsigned Val; // Not the enum since not all values have names.
    212     const char *Data;
    213     unsigned Length;
    214   };
    215 
    216   struct SysRegOp {
    217     const char *Data;
    218     unsigned Length;
    219     uint32_t MRSReg;
    220     uint32_t MSRReg;
    221     uint32_t PStateField;
    222   };
    223 
    224   struct SysCRImmOp {
    225     unsigned Val;
    226   };
    227 
    228   struct PrefetchOp {
    229     unsigned Val;
    230     const char *Data;
    231     unsigned Length;
    232   };
    233 
    234   struct PSBHintOp {
    235     unsigned Val;
    236     const char *Data;
    237     unsigned Length;
    238   };
    239 
    240   struct ShiftExtendOp {
    241     AArch64_AM::ShiftExtendType Type;
    242     unsigned Amount;
    243     bool HasExplicitAmount;
    244   };
    245 
    246   struct ExtendOp {
    247     unsigned Val;
    248   };
    249 
    250   union {
    251     struct TokOp Tok;
    252     struct RegOp Reg;
    253     struct VectorListOp VectorList;
    254     struct VectorIndexOp VectorIndex;
    255     struct ImmOp Imm;
    256     struct ShiftedImmOp ShiftedImm;
    257     struct CondCodeOp CondCode;
    258     struct FPImmOp FPImm;
    259     struct BarrierOp Barrier;
    260     struct SysRegOp SysReg;
    261     struct SysCRImmOp SysCRImm;
    262     struct PrefetchOp Prefetch;
    263     struct PSBHintOp PSBHint;
    264     struct ShiftExtendOp ShiftExtend;
    265   };
    266 
    267   // Keep the MCContext around as the MCExprs may need manipulated during
    268   // the add<>Operands() calls.
    269   MCContext &Ctx;
    270 
    271 public:
    272   AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
    273 
    274   AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
    275     Kind = o.Kind;
    276     StartLoc = o.StartLoc;
    277     EndLoc = o.EndLoc;
    278     switch (Kind) {
    279     case k_Token:
    280       Tok = o.Tok;
    281       break;
    282     case k_Immediate:
    283       Imm = o.Imm;
    284       break;
    285     case k_ShiftedImm:
    286       ShiftedImm = o.ShiftedImm;
    287       break;
    288     case k_CondCode:
    289       CondCode = o.CondCode;
    290       break;
    291     case k_FPImm:
    292       FPImm = o.FPImm;
    293       break;
    294     case k_Barrier:
    295       Barrier = o.Barrier;
    296       break;
    297     case k_Register:
    298       Reg = o.Reg;
    299       break;
    300     case k_VectorList:
    301       VectorList = o.VectorList;
    302       break;
    303     case k_VectorIndex:
    304       VectorIndex = o.VectorIndex;
    305       break;
    306     case k_SysReg:
    307       SysReg = o.SysReg;
    308       break;
    309     case k_SysCR:
    310       SysCRImm = o.SysCRImm;
    311       break;
    312     case k_Prefetch:
    313       Prefetch = o.Prefetch;
    314       break;
    315     case k_PSBHint:
    316       PSBHint = o.PSBHint;
    317       break;
    318     case k_ShiftExtend:
    319       ShiftExtend = o.ShiftExtend;
    320       break;
    321     }
    322   }
    323 
    324   /// getStartLoc - Get the location of the first token of this operand.
    325   SMLoc getStartLoc() const override { return StartLoc; }
    326   /// getEndLoc - Get the location of the last token of this operand.
    327   SMLoc getEndLoc() const override { return EndLoc; }
    328 
    329   StringRef getToken() const {
    330     assert(Kind == k_Token && "Invalid access!");
    331     return StringRef(Tok.Data, Tok.Length);
    332   }
    333 
    334   bool isTokenSuffix() const {
    335     assert(Kind == k_Token && "Invalid access!");
    336     return Tok.IsSuffix;
    337   }
    338 
    339   const MCExpr *getImm() const {
    340     assert(Kind == k_Immediate && "Invalid access!");
    341     return Imm.Val;
    342   }
    343 
    344   const MCExpr *getShiftedImmVal() const {
    345     assert(Kind == k_ShiftedImm && "Invalid access!");
    346     return ShiftedImm.Val;
    347   }
    348 
    349   unsigned getShiftedImmShift() const {
    350     assert(Kind == k_ShiftedImm && "Invalid access!");
    351     return ShiftedImm.ShiftAmount;
    352   }
    353 
    354   AArch64CC::CondCode getCondCode() const {
    355     assert(Kind == k_CondCode && "Invalid access!");
    356     return CondCode.Code;
    357   }
    358 
    359   unsigned getFPImm() const {
    360     assert(Kind == k_FPImm && "Invalid access!");
    361     return FPImm.Val;
    362   }
    363 
    364   unsigned getBarrier() const {
    365     assert(Kind == k_Barrier && "Invalid access!");
    366     return Barrier.Val;
    367   }
    368 
    369   StringRef getBarrierName() const {
    370     assert(Kind == k_Barrier && "Invalid access!");
    371     return StringRef(Barrier.Data, Barrier.Length);
    372   }
    373 
    374   unsigned getReg() const override {
    375     assert(Kind == k_Register && "Invalid access!");
    376     return Reg.RegNum;
    377   }
    378 
    379   unsigned getVectorListStart() const {
    380     assert(Kind == k_VectorList && "Invalid access!");
    381     return VectorList.RegNum;
    382   }
    383 
    384   unsigned getVectorListCount() const {
    385     assert(Kind == k_VectorList && "Invalid access!");
    386     return VectorList.Count;
    387   }
    388 
    389   unsigned getVectorIndex() const {
    390     assert(Kind == k_VectorIndex && "Invalid access!");
    391     return VectorIndex.Val;
    392   }
    393 
    394   StringRef getSysReg() const {
    395     assert(Kind == k_SysReg && "Invalid access!");
    396     return StringRef(SysReg.Data, SysReg.Length);
    397   }
    398 
    399   unsigned getSysCR() const {
    400     assert(Kind == k_SysCR && "Invalid access!");
    401     return SysCRImm.Val;
    402   }
    403 
    404   unsigned getPrefetch() const {
    405     assert(Kind == k_Prefetch && "Invalid access!");
    406     return Prefetch.Val;
    407   }
    408 
    409   unsigned getPSBHint() const {
    410     assert(Kind == k_PSBHint && "Invalid access!");
    411     return PSBHint.Val;
    412   }
    413 
    414   StringRef getPSBHintName() const {
    415     assert(Kind == k_PSBHint && "Invalid access!");
    416     return StringRef(PSBHint.Data, PSBHint.Length);
    417   }
    418 
    419   StringRef getPrefetchName() const {
    420     assert(Kind == k_Prefetch && "Invalid access!");
    421     return StringRef(Prefetch.Data, Prefetch.Length);
    422   }
    423 
    424   AArch64_AM::ShiftExtendType getShiftExtendType() const {
    425     assert(Kind == k_ShiftExtend && "Invalid access!");
    426     return ShiftExtend.Type;
    427   }
    428 
    429   unsigned getShiftExtendAmount() const {
    430     assert(Kind == k_ShiftExtend && "Invalid access!");
    431     return ShiftExtend.Amount;
    432   }
    433 
    434   bool hasShiftExtendAmount() const {
    435     assert(Kind == k_ShiftExtend && "Invalid access!");
    436     return ShiftExtend.HasExplicitAmount;
    437   }
    438 
    439   bool isImm() const override { return Kind == k_Immediate; }
    440   bool isMem() const override { return false; }
    441   bool isSImm9() const {
    442     if (!isImm())
    443       return false;
    444     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    445     if (!MCE)
    446       return false;
    447     int64_t Val = MCE->getValue();
    448     return (Val >= -256 && Val < 256);
    449   }
    450   bool isSImm7s4() const {
    451     if (!isImm())
    452       return false;
    453     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    454     if (!MCE)
    455       return false;
    456     int64_t Val = MCE->getValue();
    457     return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
    458   }
    459   bool isSImm7s8() const {
    460     if (!isImm())
    461       return false;
    462     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    463     if (!MCE)
    464       return false;
    465     int64_t Val = MCE->getValue();
    466     return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
    467   }
    468   bool isSImm7s16() const {
    469     if (!isImm())
    470       return false;
    471     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    472     if (!MCE)
    473       return false;
    474     int64_t Val = MCE->getValue();
    475     return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
    476   }
    477 
    478   bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
    479     AArch64MCExpr::VariantKind ELFRefKind;
    480     MCSymbolRefExpr::VariantKind DarwinRefKind;
    481     int64_t Addend;
    482     if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
    483                                            Addend)) {
    484       // If we don't understand the expression, assume the best and
    485       // let the fixup and relocation code deal with it.
    486       return true;
    487     }
    488 
    489     if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
    490         ELFRefKind == AArch64MCExpr::VK_LO12 ||
    491         ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
    492         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
    493         ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
    494         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
    495         ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
    496         ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
    497         ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
    498       // Note that we don't range-check the addend. It's adjusted modulo page
    499       // size when converted, so there is no "out of range" condition when using
    500       // @pageoff.
    501       return Addend >= 0 && (Addend % Scale) == 0;
    502     } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
    503                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
    504       // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
    505       return Addend == 0;
    506     }
    507 
    508     return false;
    509   }
    510 
    511   template <int Scale> bool isUImm12Offset() const {
    512     if (!isImm())
    513       return false;
    514 
    515     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    516     if (!MCE)
    517       return isSymbolicUImm12Offset(getImm(), Scale);
    518 
    519     int64_t Val = MCE->getValue();
    520     return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
    521   }
    522 
    523   bool isImm0_1() const {
    524     if (!isImm())
    525       return false;
    526     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    527     if (!MCE)
    528       return false;
    529     int64_t Val = MCE->getValue();
    530     return (Val >= 0 && Val < 2);
    531   }
    532   bool isImm0_7() const {
    533     if (!isImm())
    534       return false;
    535     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    536     if (!MCE)
    537       return false;
    538     int64_t Val = MCE->getValue();
    539     return (Val >= 0 && Val < 8);
    540   }
    541   bool isImm1_8() const {
    542     if (!isImm())
    543       return false;
    544     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    545     if (!MCE)
    546       return false;
    547     int64_t Val = MCE->getValue();
    548     return (Val > 0 && Val < 9);
    549   }
    550   bool isImm0_15() const {
    551     if (!isImm())
    552       return false;
    553     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    554     if (!MCE)
    555       return false;
    556     int64_t Val = MCE->getValue();
    557     return (Val >= 0 && Val < 16);
    558   }
    559   bool isImm1_16() const {
    560     if (!isImm())
    561       return false;
    562     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    563     if (!MCE)
    564       return false;
    565     int64_t Val = MCE->getValue();
    566     return (Val > 0 && Val < 17);
    567   }
    568   bool isImm0_31() const {
    569     if (!isImm())
    570       return false;
    571     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    572     if (!MCE)
    573       return false;
    574     int64_t Val = MCE->getValue();
    575     return (Val >= 0 && Val < 32);
    576   }
    577   bool isImm1_31() const {
    578     if (!isImm())
    579       return false;
    580     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    581     if (!MCE)
    582       return false;
    583     int64_t Val = MCE->getValue();
    584     return (Val >= 1 && Val < 32);
    585   }
    586   bool isImm1_32() const {
    587     if (!isImm())
    588       return false;
    589     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    590     if (!MCE)
    591       return false;
    592     int64_t Val = MCE->getValue();
    593     return (Val >= 1 && Val < 33);
    594   }
    595   bool isImm0_63() const {
    596     if (!isImm())
    597       return false;
    598     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    599     if (!MCE)
    600       return false;
    601     int64_t Val = MCE->getValue();
    602     return (Val >= 0 && Val < 64);
    603   }
    604   bool isImm1_63() const {
    605     if (!isImm())
    606       return false;
    607     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    608     if (!MCE)
    609       return false;
    610     int64_t Val = MCE->getValue();
    611     return (Val >= 1 && Val < 64);
    612   }
    613   bool isImm1_64() const {
    614     if (!isImm())
    615       return false;
    616     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    617     if (!MCE)
    618       return false;
    619     int64_t Val = MCE->getValue();
    620     return (Val >= 1 && Val < 65);
    621   }
    622   bool isImm0_127() const {
    623     if (!isImm())
    624       return false;
    625     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    626     if (!MCE)
    627       return false;
    628     int64_t Val = MCE->getValue();
    629     return (Val >= 0 && Val < 128);
    630   }
    631   bool isImm0_255() const {
    632     if (!isImm())
    633       return false;
    634     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    635     if (!MCE)
    636       return false;
    637     int64_t Val = MCE->getValue();
    638     return (Val >= 0 && Val < 256);
    639   }
    640   bool isImm0_65535() const {
    641     if (!isImm())
    642       return false;
    643     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    644     if (!MCE)
    645       return false;
    646     int64_t Val = MCE->getValue();
    647     return (Val >= 0 && Val < 65536);
    648   }
    649   bool isImm32_63() const {
    650     if (!isImm())
    651       return false;
    652     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    653     if (!MCE)
    654       return false;
    655     int64_t Val = MCE->getValue();
    656     return (Val >= 32 && Val < 64);
    657   }
    658   bool isLogicalImm32() const {
    659     if (!isImm())
    660       return false;
    661     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    662     if (!MCE)
    663       return false;
    664     int64_t Val = MCE->getValue();
    665     if (Val >> 32 != 0 && Val >> 32 != ~0LL)
    666       return false;
    667     Val &= 0xFFFFFFFF;
    668     return AArch64_AM::isLogicalImmediate(Val, 32);
    669   }
    670   bool isLogicalImm64() const {
    671     if (!isImm())
    672       return false;
    673     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    674     if (!MCE)
    675       return false;
    676     return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
    677   }
    678   bool isLogicalImm32Not() const {
    679     if (!isImm())
    680       return false;
    681     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    682     if (!MCE)
    683       return false;
    684     int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
    685     return AArch64_AM::isLogicalImmediate(Val, 32);
    686   }
    687   bool isLogicalImm64Not() const {
    688     if (!isImm())
    689       return false;
    690     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    691     if (!MCE)
    692       return false;
    693     return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
    694   }
    695   bool isShiftedImm() const { return Kind == k_ShiftedImm; }
    696   bool isAddSubImm() const {
    697     if (!isShiftedImm() && !isImm())
    698       return false;
    699 
    700     const MCExpr *Expr;
    701 
    702     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
    703     if (isShiftedImm()) {
    704       unsigned Shift = ShiftedImm.ShiftAmount;
    705       Expr = ShiftedImm.Val;
    706       if (Shift != 0 && Shift != 12)
    707         return false;
    708     } else {
    709       Expr = getImm();
    710     }
    711 
    712     AArch64MCExpr::VariantKind ELFRefKind;
    713     MCSymbolRefExpr::VariantKind DarwinRefKind;
    714     int64_t Addend;
    715     if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
    716                                           DarwinRefKind, Addend)) {
    717       return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
    718           || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
    719           || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
    720           || ELFRefKind == AArch64MCExpr::VK_LO12
    721           || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
    722           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
    723           || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
    724           || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
    725           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
    726           || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
    727           || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
    728     }
    729 
    730     // Otherwise it should be a real immediate in range:
    731     const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
    732     return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
    733   }
    734   bool isAddSubImmNeg() const {
    735     if (!isShiftedImm() && !isImm())
    736       return false;
    737 
    738     const MCExpr *Expr;
    739 
    740     // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
    741     if (isShiftedImm()) {
    742       unsigned Shift = ShiftedImm.ShiftAmount;
    743       Expr = ShiftedImm.Val;
    744       if (Shift != 0 && Shift != 12)
    745         return false;
    746     } else
    747       Expr = getImm();
    748 
    749     // Otherwise it should be a real negative immediate in range:
    750     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
    751     return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
    752   }
    753   bool isCondCode() const { return Kind == k_CondCode; }
    754   bool isSIMDImmType10() const {
    755     if (!isImm())
    756       return false;
    757     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    758     if (!MCE)
    759       return false;
    760     return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
    761   }
    762   bool isBranchTarget26() const {
    763     if (!isImm())
    764       return false;
    765     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    766     if (!MCE)
    767       return true;
    768     int64_t Val = MCE->getValue();
    769     if (Val & 0x3)
    770       return false;
    771     return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
    772   }
    773   bool isPCRelLabel19() const {
    774     if (!isImm())
    775       return false;
    776     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    777     if (!MCE)
    778       return true;
    779     int64_t Val = MCE->getValue();
    780     if (Val & 0x3)
    781       return false;
    782     return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
    783   }
    784   bool isBranchTarget14() const {
    785     if (!isImm())
    786       return false;
    787     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
    788     if (!MCE)
    789       return true;
    790     int64_t Val = MCE->getValue();
    791     if (Val & 0x3)
    792       return false;
    793     return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
    794   }
    795 
    796   bool
    797   isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
    798     if (!isImm())
    799       return false;
    800 
    801     AArch64MCExpr::VariantKind ELFRefKind;
    802     MCSymbolRefExpr::VariantKind DarwinRefKind;
    803     int64_t Addend;
    804     if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
    805                                              DarwinRefKind, Addend)) {
    806       return false;
    807     }
    808     if (DarwinRefKind != MCSymbolRefExpr::VK_None)
    809       return false;
    810 
    811     for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
    812       if (ELFRefKind == AllowedModifiers[i])
    813         return Addend == 0;
    814     }
    815 
    816     return false;
    817   }
    818 
    819   bool isMovZSymbolG3() const {
    820     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
    821   }
    822 
    823   bool isMovZSymbolG2() const {
    824     return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
    825                          AArch64MCExpr::VK_TPREL_G2,
    826                          AArch64MCExpr::VK_DTPREL_G2});
    827   }
    828 
    829   bool isMovZSymbolG1() const {
    830     return isMovWSymbol({
    831         AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
    832         AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
    833         AArch64MCExpr::VK_DTPREL_G1,
    834     });
    835   }
    836 
    837   bool isMovZSymbolG0() const {
    838     return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
    839                          AArch64MCExpr::VK_TPREL_G0,
    840                          AArch64MCExpr::VK_DTPREL_G0});
    841   }
    842 
    843   bool isMovKSymbolG3() const {
    844     return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
    845   }
    846 
    847   bool isMovKSymbolG2() const {
    848     return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
    849   }
    850 
    851   bool isMovKSymbolG1() const {
    852     return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
    853                          AArch64MCExpr::VK_TPREL_G1_NC,
    854                          AArch64MCExpr::VK_DTPREL_G1_NC});
    855   }
    856 
    857   bool isMovKSymbolG0() const {
    858     return isMovWSymbol(
    859         {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
    860          AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
    861   }
    862 
    863   template<int RegWidth, int Shift>
    864   bool isMOVZMovAlias() const {
    865     if (!isImm()) return false;
    866 
    867     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    868     if (!CE) return false;
    869     uint64_t Value = CE->getValue();
    870 
    871     return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
    872   }
    873 
    874   template<int RegWidth, int Shift>
    875   bool isMOVNMovAlias() const {
    876     if (!isImm()) return false;
    877 
    878     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    879     if (!CE) return false;
    880     uint64_t Value = CE->getValue();
    881 
    882     return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
    883   }
    884 
    885   bool isFPImm() const { return Kind == k_FPImm; }
    886   bool isBarrier() const { return Kind == k_Barrier; }
    887   bool isSysReg() const { return Kind == k_SysReg; }
    888   bool isMRSSystemRegister() const {
    889     if (!isSysReg()) return false;
    890 
    891     return SysReg.MRSReg != -1U;
    892   }
    893   bool isMSRSystemRegister() const {
    894     if (!isSysReg()) return false;
    895     return SysReg.MSRReg != -1U;
    896   }
    897   bool isSystemPStateFieldWithImm0_1() const {
    898     if (!isSysReg()) return false;
    899     return (SysReg.PStateField == AArch64PState::PAN ||
    900             SysReg.PStateField == AArch64PState::UAO);
    901   }
    902   bool isSystemPStateFieldWithImm0_15() const {
    903     if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
    904     return SysReg.PStateField != -1U;
    905   }
    906   bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
    907   bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
    908   bool isVectorRegLo() const {
    909     return Kind == k_Register && Reg.isVector &&
    910            AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
    911                Reg.RegNum);
    912   }
    913   bool isGPR32as64() const {
    914     return Kind == k_Register && !Reg.isVector &&
    915       AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
    916   }
    917   bool isWSeqPair() const {
    918     return Kind == k_Register && !Reg.isVector &&
    919            AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
    920                Reg.RegNum);
    921   }
    922   bool isXSeqPair() const {
    923     return Kind == k_Register && !Reg.isVector &&
    924            AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
    925                Reg.RegNum);
    926   }
    927 
    928   bool isGPR64sp0() const {
    929     return Kind == k_Register && !Reg.isVector &&
    930       AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
    931   }
    932 
    933   /// Is this a vector list with the type implicit (presumably attached to the
    934   /// instruction itself)?
    935   template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
    936     return Kind == k_VectorList && VectorList.Count == NumRegs &&
    937            !VectorList.ElementKind;
    938   }
    939 
    940   template <unsigned NumRegs, unsigned NumElements, char ElementKind>
    941   bool isTypedVectorList() const {
    942     if (Kind != k_VectorList)
    943       return false;
    944     if (VectorList.Count != NumRegs)
    945       return false;
    946     if (VectorList.ElementKind != ElementKind)
    947       return false;
    948     return VectorList.NumElements == NumElements;
    949   }
    950 
    951   bool isVectorIndex1() const {
    952     return Kind == k_VectorIndex && VectorIndex.Val == 1;
    953   }
    954   bool isVectorIndexB() const {
    955     return Kind == k_VectorIndex && VectorIndex.Val < 16;
    956   }
    957   bool isVectorIndexH() const {
    958     return Kind == k_VectorIndex && VectorIndex.Val < 8;
    959   }
    960   bool isVectorIndexS() const {
    961     return Kind == k_VectorIndex && VectorIndex.Val < 4;
    962   }
    963   bool isVectorIndexD() const {
    964     return Kind == k_VectorIndex && VectorIndex.Val < 2;
    965   }
    966   bool isToken() const override { return Kind == k_Token; }
    967   bool isTokenEqual(StringRef Str) const {
    968     return Kind == k_Token && getToken() == Str;
    969   }
    970   bool isSysCR() const { return Kind == k_SysCR; }
    971   bool isPrefetch() const { return Kind == k_Prefetch; }
    972   bool isPSBHint() const { return Kind == k_PSBHint; }
    973   bool isShiftExtend() const { return Kind == k_ShiftExtend; }
    974   bool isShifter() const {
    975     if (!isShiftExtend())
    976       return false;
    977 
    978     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
    979     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
    980             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
    981             ST == AArch64_AM::MSL);
    982   }
    983   bool isExtend() const {
    984     if (!isShiftExtend())
    985       return false;
    986 
    987     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
    988     return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
    989             ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
    990             ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
    991             ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
    992             ET == AArch64_AM::LSL) &&
    993            getShiftExtendAmount() <= 4;
    994   }
    995 
    996   bool isExtend64() const {
    997     if (!isExtend())
    998       return false;
    999     // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
   1000     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1001     return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
   1002   }
   1003   bool isExtendLSL64() const {
   1004     if (!isExtend())
   1005       return false;
   1006     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1007     return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
   1008             ET == AArch64_AM::LSL) &&
   1009            getShiftExtendAmount() <= 4;
   1010   }
   1011 
   1012   template<int Width> bool isMemXExtend() const {
   1013     if (!isExtend())
   1014       return false;
   1015     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1016     return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
   1017            (getShiftExtendAmount() == Log2_32(Width / 8) ||
   1018             getShiftExtendAmount() == 0);
   1019   }
   1020 
   1021   template<int Width> bool isMemWExtend() const {
   1022     if (!isExtend())
   1023       return false;
   1024     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1025     return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
   1026            (getShiftExtendAmount() == Log2_32(Width / 8) ||
   1027             getShiftExtendAmount() == 0);
   1028   }
   1029 
   1030   template <unsigned width>
   1031   bool isArithmeticShifter() const {
   1032     if (!isShifter())
   1033       return false;
   1034 
   1035     // An arithmetic shifter is LSL, LSR, or ASR.
   1036     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1037     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
   1038             ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
   1039   }
   1040 
   1041   template <unsigned width>
   1042   bool isLogicalShifter() const {
   1043     if (!isShifter())
   1044       return false;
   1045 
   1046     // A logical shifter is LSL, LSR, ASR or ROR.
   1047     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1048     return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
   1049             ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
   1050            getShiftExtendAmount() < width;
   1051   }
   1052 
   1053   bool isMovImm32Shifter() const {
   1054     if (!isShifter())
   1055       return false;
   1056 
   1057     // A MOVi shifter is LSL of 0, 16, 32, or 48.
   1058     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1059     if (ST != AArch64_AM::LSL)
   1060       return false;
   1061     uint64_t Val = getShiftExtendAmount();
   1062     return (Val == 0 || Val == 16);
   1063   }
   1064 
   1065   bool isMovImm64Shifter() const {
   1066     if (!isShifter())
   1067       return false;
   1068 
   1069     // A MOVi shifter is LSL of 0 or 16.
   1070     AArch64_AM::ShiftExtendType ST = getShiftExtendType();
   1071     if (ST != AArch64_AM::LSL)
   1072       return false;
   1073     uint64_t Val = getShiftExtendAmount();
   1074     return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
   1075   }
   1076 
   1077   bool isLogicalVecShifter() const {
   1078     if (!isShifter())
   1079       return false;
   1080 
   1081     // A logical vector shifter is a left shift by 0, 8, 16, or 24.
   1082     unsigned Shift = getShiftExtendAmount();
   1083     return getShiftExtendType() == AArch64_AM::LSL &&
   1084            (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
   1085   }
   1086 
   1087   bool isLogicalVecHalfWordShifter() const {
   1088     if (!isLogicalVecShifter())
   1089       return false;
   1090 
   1091     // A logical vector shifter is a left shift by 0 or 8.
   1092     unsigned Shift = getShiftExtendAmount();
   1093     return getShiftExtendType() == AArch64_AM::LSL &&
   1094            (Shift == 0 || Shift == 8);
   1095   }
   1096 
   1097   bool isMoveVecShifter() const {
   1098     if (!isShiftExtend())
   1099       return false;
   1100 
   1101     // A logical vector shifter is a left shift by 8 or 16.
   1102     unsigned Shift = getShiftExtendAmount();
   1103     return getShiftExtendType() == AArch64_AM::MSL &&
   1104            (Shift == 8 || Shift == 16);
   1105   }
   1106 
   1107   // Fallback unscaled operands are for aliases of LDR/STR that fall back
   1108   // to LDUR/STUR when the offset is not legal for the former but is for
   1109   // the latter. As such, in addition to checking for being a legal unscaled
   1110   // address, also check that it is not a legal scaled address. This avoids
   1111   // ambiguity in the matcher.
   1112   template<int Width>
   1113   bool isSImm9OffsetFB() const {
   1114     return isSImm9() && !isUImm12Offset<Width / 8>();
   1115   }
   1116 
   1117   bool isAdrpLabel() const {
   1118     // Validation was handled during parsing, so we just sanity check that
   1119     // something didn't go haywire.
   1120     if (!isImm())
   1121         return false;
   1122 
   1123     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
   1124       int64_t Val = CE->getValue();
   1125       int64_t Min = - (4096 * (1LL << (21 - 1)));
   1126       int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
   1127       return (Val % 4096) == 0 && Val >= Min && Val <= Max;
   1128     }
   1129 
   1130     return true;
   1131   }
   1132 
   1133   bool isAdrLabel() const {
   1134     // Validation was handled during parsing, so we just sanity check that
   1135     // something didn't go haywire.
   1136     if (!isImm())
   1137         return false;
   1138 
   1139     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
   1140       int64_t Val = CE->getValue();
   1141       int64_t Min = - (1LL << (21 - 1));
   1142       int64_t Max = ((1LL << (21 - 1)) - 1);
   1143       return Val >= Min && Val <= Max;
   1144     }
   1145 
   1146     return true;
   1147   }
   1148 
   1149   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
   1150     // Add as immediates when possible.  Null MCExpr = 0.
   1151     if (!Expr)
   1152       Inst.addOperand(MCOperand::createImm(0));
   1153     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
   1154       Inst.addOperand(MCOperand::createImm(CE->getValue()));
   1155     else
   1156       Inst.addOperand(MCOperand::createExpr(Expr));
   1157   }
   1158 
   1159   void addRegOperands(MCInst &Inst, unsigned N) const {
   1160     assert(N == 1 && "Invalid number of operands!");
   1161     Inst.addOperand(MCOperand::createReg(getReg()));
   1162   }
   1163 
   1164   void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
   1165     assert(N == 1 && "Invalid number of operands!");
   1166     assert(
   1167         AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
   1168 
   1169     const MCRegisterInfo *RI = Ctx.getRegisterInfo();
   1170     uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
   1171         RI->getEncodingValue(getReg()));
   1172 
   1173     Inst.addOperand(MCOperand::createReg(Reg));
   1174   }
   1175 
   1176   void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
   1177     assert(N == 1 && "Invalid number of operands!");
   1178     assert(
   1179         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
   1180     Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
   1181   }
   1182 
   1183   void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
   1184     assert(N == 1 && "Invalid number of operands!");
   1185     assert(
   1186         AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
   1187     Inst.addOperand(MCOperand::createReg(getReg()));
   1188   }
   1189 
   1190   void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
   1191     assert(N == 1 && "Invalid number of operands!");
   1192     Inst.addOperand(MCOperand::createReg(getReg()));
   1193   }
   1194 
   1195   template <unsigned NumRegs>
   1196   void addVectorList64Operands(MCInst &Inst, unsigned N) const {
   1197     assert(N == 1 && "Invalid number of operands!");
   1198     static const unsigned FirstRegs[] = { AArch64::D0,
   1199                                           AArch64::D0_D1,
   1200                                           AArch64::D0_D1_D2,
   1201                                           AArch64::D0_D1_D2_D3 };
   1202     unsigned FirstReg = FirstRegs[NumRegs - 1];
   1203 
   1204     Inst.addOperand(
   1205         MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
   1206   }
   1207 
   1208   template <unsigned NumRegs>
   1209   void addVectorList128Operands(MCInst &Inst, unsigned N) const {
   1210     assert(N == 1 && "Invalid number of operands!");
   1211     static const unsigned FirstRegs[] = { AArch64::Q0,
   1212                                           AArch64::Q0_Q1,
   1213                                           AArch64::Q0_Q1_Q2,
   1214                                           AArch64::Q0_Q1_Q2_Q3 };
   1215     unsigned FirstReg = FirstRegs[NumRegs - 1];
   1216 
   1217     Inst.addOperand(
   1218         MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
   1219   }
   1220 
   1221   void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
   1222     assert(N == 1 && "Invalid number of operands!");
   1223     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1224   }
   1225 
   1226   void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
   1227     assert(N == 1 && "Invalid number of operands!");
   1228     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1229   }
   1230 
   1231   void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
   1232     assert(N == 1 && "Invalid number of operands!");
   1233     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1234   }
   1235 
   1236   void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
   1237     assert(N == 1 && "Invalid number of operands!");
   1238     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1239   }
   1240 
   1241   void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
   1242     assert(N == 1 && "Invalid number of operands!");
   1243     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
   1244   }
   1245 
   1246   void addImmOperands(MCInst &Inst, unsigned N) const {
   1247     assert(N == 1 && "Invalid number of operands!");
   1248     // If this is a pageoff symrefexpr with an addend, adjust the addend
   1249     // to be only the page-offset portion. Otherwise, just add the expr
   1250     // as-is.
   1251     addExpr(Inst, getImm());
   1252   }
   1253 
   1254   void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
   1255     assert(N == 2 && "Invalid number of operands!");
   1256     if (isShiftedImm()) {
   1257       addExpr(Inst, getShiftedImmVal());
   1258       Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
   1259     } else {
   1260       addExpr(Inst, getImm());
   1261       Inst.addOperand(MCOperand::createImm(0));
   1262     }
   1263   }
   1264 
   1265   void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
   1266     assert(N == 2 && "Invalid number of operands!");
   1267 
   1268     const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
   1269     const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
   1270     int64_t Val = -CE->getValue();
   1271     unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
   1272 
   1273     Inst.addOperand(MCOperand::createImm(Val));
   1274     Inst.addOperand(MCOperand::createImm(ShiftAmt));
   1275   }
   1276 
   1277   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
   1278     assert(N == 1 && "Invalid number of operands!");
   1279     Inst.addOperand(MCOperand::createImm(getCondCode()));
   1280   }
   1281 
   1282   void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
   1283     assert(N == 1 && "Invalid number of operands!");
   1284     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1285     if (!MCE)
   1286       addExpr(Inst, getImm());
   1287     else
   1288       Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
   1289   }
   1290 
   1291   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
   1292     addImmOperands(Inst, N);
   1293   }
   1294 
   1295   template<int Scale>
   1296   void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
   1297     assert(N == 1 && "Invalid number of operands!");
   1298     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1299 
   1300     if (!MCE) {
   1301       Inst.addOperand(MCOperand::createExpr(getImm()));
   1302       return;
   1303     }
   1304     Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
   1305   }
   1306 
   1307   void addSImm9Operands(MCInst &Inst, unsigned N) const {
   1308     assert(N == 1 && "Invalid number of operands!");
   1309     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1310     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1311   }
   1312 
   1313   void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
   1314     assert(N == 1 && "Invalid number of operands!");
   1315     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1316     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
   1317   }
   1318 
   1319   void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
   1320     assert(N == 1 && "Invalid number of operands!");
   1321     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1322     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
   1323   }
   1324 
   1325   void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
   1326     assert(N == 1 && "Invalid number of operands!");
   1327     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1328     Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
   1329   }
   1330 
   1331   void addImm0_1Operands(MCInst &Inst, unsigned N) const {
   1332     assert(N == 1 && "Invalid number of operands!");
   1333     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1334     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1335   }
   1336 
   1337   void addImm0_7Operands(MCInst &Inst, unsigned N) const {
   1338     assert(N == 1 && "Invalid number of operands!");
   1339     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1340     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1341   }
   1342 
   1343   void addImm1_8Operands(MCInst &Inst, unsigned N) const {
   1344     assert(N == 1 && "Invalid number of operands!");
   1345     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1346     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1347   }
   1348 
   1349   void addImm0_15Operands(MCInst &Inst, unsigned N) const {
   1350     assert(N == 1 && "Invalid number of operands!");
   1351     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1352     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1353   }
   1354 
   1355   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
   1356     assert(N == 1 && "Invalid number of operands!");
   1357     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1358     assert(MCE && "Invalid constant immediate operand!");
   1359     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1360   }
   1361 
   1362   void addImm0_31Operands(MCInst &Inst, unsigned N) const {
   1363     assert(N == 1 && "Invalid number of operands!");
   1364     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1365     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1366   }
   1367 
   1368   void addImm1_31Operands(MCInst &Inst, unsigned N) const {
   1369     assert(N == 1 && "Invalid number of operands!");
   1370     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1371     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1372   }
   1373 
   1374   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
   1375     assert(N == 1 && "Invalid number of operands!");
   1376     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1377     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1378   }
   1379 
   1380   void addImm0_63Operands(MCInst &Inst, unsigned N) const {
   1381     assert(N == 1 && "Invalid number of operands!");
   1382     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1383     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1384   }
   1385 
   1386   void addImm1_63Operands(MCInst &Inst, unsigned N) const {
   1387     assert(N == 1 && "Invalid number of operands!");
   1388     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1389     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1390   }
   1391 
   1392   void addImm1_64Operands(MCInst &Inst, unsigned N) const {
   1393     assert(N == 1 && "Invalid number of operands!");
   1394     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1395     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1396   }
   1397 
   1398   void addImm0_127Operands(MCInst &Inst, unsigned N) const {
   1399     assert(N == 1 && "Invalid number of operands!");
   1400     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1401     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1402   }
   1403 
   1404   void addImm0_255Operands(MCInst &Inst, unsigned N) const {
   1405     assert(N == 1 && "Invalid number of operands!");
   1406     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1407     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1408   }
   1409 
   1410   void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
   1411     assert(N == 1 && "Invalid number of operands!");
   1412     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1413     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1414   }
   1415 
   1416   void addImm32_63Operands(MCInst &Inst, unsigned N) const {
   1417     assert(N == 1 && "Invalid number of operands!");
   1418     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1419     Inst.addOperand(MCOperand::createImm(MCE->getValue()));
   1420   }
   1421 
   1422   void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
   1423     assert(N == 1 && "Invalid number of operands!");
   1424     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1425     uint64_t encoding =
   1426         AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
   1427     Inst.addOperand(MCOperand::createImm(encoding));
   1428   }
   1429 
   1430   void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
   1431     assert(N == 1 && "Invalid number of operands!");
   1432     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1433     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
   1434     Inst.addOperand(MCOperand::createImm(encoding));
   1435   }
   1436 
   1437   void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
   1438     assert(N == 1 && "Invalid number of operands!");
   1439     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1440     int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
   1441     uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
   1442     Inst.addOperand(MCOperand::createImm(encoding));
   1443   }
   1444 
   1445   void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
   1446     assert(N == 1 && "Invalid number of operands!");
   1447     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1448     uint64_t encoding =
   1449         AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
   1450     Inst.addOperand(MCOperand::createImm(encoding));
   1451   }
   1452 
   1453   void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
   1454     assert(N == 1 && "Invalid number of operands!");
   1455     const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
   1456     uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
   1457     Inst.addOperand(MCOperand::createImm(encoding));
   1458   }
   1459 
   1460   void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
   1461     // Branch operands don't encode the low bits, so shift them off
   1462     // here. If it's a label, however, just put it on directly as there's
   1463     // not enough information now to do anything.
   1464     assert(N == 1 && "Invalid number of operands!");
   1465     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1466     if (!MCE) {
   1467       addExpr(Inst, getImm());
   1468       return;
   1469     }
   1470     assert(MCE && "Invalid constant immediate operand!");
   1471     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
   1472   }
   1473 
   1474   void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
   1475     // Branch operands don't encode the low bits, so shift them off
   1476     // here. If it's a label, however, just put it on directly as there's
   1477     // not enough information now to do anything.
   1478     assert(N == 1 && "Invalid number of operands!");
   1479     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1480     if (!MCE) {
   1481       addExpr(Inst, getImm());
   1482       return;
   1483     }
   1484     assert(MCE && "Invalid constant immediate operand!");
   1485     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
   1486   }
   1487 
   1488   void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
   1489     // Branch operands don't encode the low bits, so shift them off
   1490     // here. If it's a label, however, just put it on directly as there's
   1491     // not enough information now to do anything.
   1492     assert(N == 1 && "Invalid number of operands!");
   1493     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
   1494     if (!MCE) {
   1495       addExpr(Inst, getImm());
   1496       return;
   1497     }
   1498     assert(MCE && "Invalid constant immediate operand!");
   1499     Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
   1500   }
   1501 
   1502   void addFPImmOperands(MCInst &Inst, unsigned N) const {
   1503     assert(N == 1 && "Invalid number of operands!");
   1504     Inst.addOperand(MCOperand::createImm(getFPImm()));
   1505   }
   1506 
   1507   void addBarrierOperands(MCInst &Inst, unsigned N) const {
   1508     assert(N == 1 && "Invalid number of operands!");
   1509     Inst.addOperand(MCOperand::createImm(getBarrier()));
   1510   }
   1511 
   1512   void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
   1513     assert(N == 1 && "Invalid number of operands!");
   1514 
   1515     Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
   1516   }
   1517 
   1518   void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
   1519     assert(N == 1 && "Invalid number of operands!");
   1520 
   1521     Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
   1522   }
   1523 
   1524   void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
   1525     assert(N == 1 && "Invalid number of operands!");
   1526 
   1527     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
   1528   }
   1529 
   1530   void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
   1531     assert(N == 1 && "Invalid number of operands!");
   1532 
   1533     Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
   1534   }
   1535 
   1536   void addSysCROperands(MCInst &Inst, unsigned N) const {
   1537     assert(N == 1 && "Invalid number of operands!");
   1538     Inst.addOperand(MCOperand::createImm(getSysCR()));
   1539   }
   1540 
   1541   void addPrefetchOperands(MCInst &Inst, unsigned N) const {
   1542     assert(N == 1 && "Invalid number of operands!");
   1543     Inst.addOperand(MCOperand::createImm(getPrefetch()));
   1544   }
   1545 
   1546   void addPSBHintOperands(MCInst &Inst, unsigned N) const {
   1547     assert(N == 1 && "Invalid number of operands!");
   1548     Inst.addOperand(MCOperand::createImm(getPSBHint()));
   1549   }
   1550 
   1551   void addShifterOperands(MCInst &Inst, unsigned N) const {
   1552     assert(N == 1 && "Invalid number of operands!");
   1553     unsigned Imm =
   1554         AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
   1555     Inst.addOperand(MCOperand::createImm(Imm));
   1556   }
   1557 
   1558   void addExtendOperands(MCInst &Inst, unsigned N) const {
   1559     assert(N == 1 && "Invalid number of operands!");
   1560     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1561     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
   1562     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
   1563     Inst.addOperand(MCOperand::createImm(Imm));
   1564   }
   1565 
   1566   void addExtend64Operands(MCInst &Inst, unsigned N) const {
   1567     assert(N == 1 && "Invalid number of operands!");
   1568     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1569     if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
   1570     unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
   1571     Inst.addOperand(MCOperand::createImm(Imm));
   1572   }
   1573 
   1574   void addMemExtendOperands(MCInst &Inst, unsigned N) const {
   1575     assert(N == 2 && "Invalid number of operands!");
   1576     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1577     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
   1578     Inst.addOperand(MCOperand::createImm(IsSigned));
   1579     Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
   1580   }
   1581 
   1582   // For 8-bit load/store instructions with a register offset, both the
   1583   // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
   1584   // they're disambiguated by whether the shift was explicit or implicit rather
   1585   // than its size.
   1586   void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
   1587     assert(N == 2 && "Invalid number of operands!");
   1588     AArch64_AM::ShiftExtendType ET = getShiftExtendType();
   1589     bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
   1590     Inst.addOperand(MCOperand::createImm(IsSigned));
   1591     Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
   1592   }
   1593 
   1594   template<int Shift>
   1595   void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
   1596     assert(N == 1 && "Invalid number of operands!");
   1597 
   1598     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
   1599     uint64_t Value = CE->getValue();
   1600     Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
   1601   }
   1602 
   1603   template<int Shift>
   1604   void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
   1605     assert(N == 1 && "Invalid number of operands!");
   1606 
   1607     const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
   1608     uint64_t Value = CE->getValue();
   1609     Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
   1610   }
   1611 
   1612   void print(raw_ostream &OS) const override;
   1613 
   1614   static std::unique_ptr<AArch64Operand>
   1615   CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
   1616     auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
   1617     Op->Tok.Data = Str.data();
   1618     Op->Tok.Length = Str.size();
   1619     Op->Tok.IsSuffix = IsSuffix;
   1620     Op->StartLoc = S;
   1621     Op->EndLoc = S;
   1622     return Op;
   1623   }
   1624 
   1625   static std::unique_ptr<AArch64Operand>
   1626   CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
   1627     auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
   1628     Op->Reg.RegNum = RegNum;
   1629     Op->Reg.isVector = isVector;
   1630     Op->StartLoc = S;
   1631     Op->EndLoc = E;
   1632     return Op;
   1633   }
   1634 
   1635   static std::unique_ptr<AArch64Operand>
   1636   CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
   1637                    char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
   1638     auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
   1639     Op->VectorList.RegNum = RegNum;
   1640     Op->VectorList.Count = Count;
   1641     Op->VectorList.NumElements = NumElements;
   1642     Op->VectorList.ElementKind = ElementKind;
   1643     Op->StartLoc = S;
   1644     Op->EndLoc = E;
   1645     return Op;
   1646   }
   1647 
   1648   static std::unique_ptr<AArch64Operand>
   1649   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
   1650     auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
   1651     Op->VectorIndex.Val = Idx;
   1652     Op->StartLoc = S;
   1653     Op->EndLoc = E;
   1654     return Op;
   1655   }
   1656 
   1657   static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
   1658                                                    SMLoc E, MCContext &Ctx) {
   1659     auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
   1660     Op->Imm.Val = Val;
   1661     Op->StartLoc = S;
   1662     Op->EndLoc = E;
   1663     return Op;
   1664   }
   1665 
   1666   static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
   1667                                                           unsigned ShiftAmount,
   1668                                                           SMLoc S, SMLoc E,
   1669                                                           MCContext &Ctx) {
   1670     auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
   1671     Op->ShiftedImm .Val = Val;
   1672     Op->ShiftedImm.ShiftAmount = ShiftAmount;
   1673     Op->StartLoc = S;
   1674     Op->EndLoc = E;
   1675     return Op;
   1676   }
   1677 
   1678   static std::unique_ptr<AArch64Operand>
   1679   CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
   1680     auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
   1681     Op->CondCode.Code = Code;
   1682     Op->StartLoc = S;
   1683     Op->EndLoc = E;
   1684     return Op;
   1685   }
   1686 
   1687   static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
   1688                                                      MCContext &Ctx) {
   1689     auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
   1690     Op->FPImm.Val = Val;
   1691     Op->StartLoc = S;
   1692     Op->EndLoc = S;
   1693     return Op;
   1694   }
   1695 
   1696   static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
   1697                                                        StringRef Str,
   1698                                                        SMLoc S,
   1699                                                        MCContext &Ctx) {
   1700     auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
   1701     Op->Barrier.Val = Val;
   1702     Op->Barrier.Data = Str.data();
   1703     Op->Barrier.Length = Str.size();
   1704     Op->StartLoc = S;
   1705     Op->EndLoc = S;
   1706     return Op;
   1707   }
   1708 
   1709   static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
   1710                                                       uint32_t MRSReg,
   1711                                                       uint32_t MSRReg,
   1712                                                       uint32_t PStateField,
   1713                                                       MCContext &Ctx) {
   1714     auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
   1715     Op->SysReg.Data = Str.data();
   1716     Op->SysReg.Length = Str.size();
   1717     Op->SysReg.MRSReg = MRSReg;
   1718     Op->SysReg.MSRReg = MSRReg;
   1719     Op->SysReg.PStateField = PStateField;
   1720     Op->StartLoc = S;
   1721     Op->EndLoc = S;
   1722     return Op;
   1723   }
   1724 
   1725   static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
   1726                                                      SMLoc E, MCContext &Ctx) {
   1727     auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
   1728     Op->SysCRImm.Val = Val;
   1729     Op->StartLoc = S;
   1730     Op->EndLoc = E;
   1731     return Op;
   1732   }
   1733 
   1734   static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
   1735                                                         StringRef Str,
   1736                                                         SMLoc S,
   1737                                                         MCContext &Ctx) {
   1738     auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
   1739     Op->Prefetch.Val = Val;
   1740     Op->Barrier.Data = Str.data();
   1741     Op->Barrier.Length = Str.size();
   1742     Op->StartLoc = S;
   1743     Op->EndLoc = S;
   1744     return Op;
   1745   }
   1746 
   1747   static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
   1748                                                        StringRef Str,
   1749                                                        SMLoc S,
   1750                                                        MCContext &Ctx) {
   1751     auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
   1752     Op->PSBHint.Val = Val;
   1753     Op->PSBHint.Data = Str.data();
   1754     Op->PSBHint.Length = Str.size();
   1755     Op->StartLoc = S;
   1756     Op->EndLoc = S;
   1757     return Op;
   1758   }
   1759 
   1760   static std::unique_ptr<AArch64Operand>
   1761   CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
   1762                     bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
   1763     auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
   1764     Op->ShiftExtend.Type = ShOp;
   1765     Op->ShiftExtend.Amount = Val;
   1766     Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
   1767     Op->StartLoc = S;
   1768     Op->EndLoc = E;
   1769     return Op;
   1770   }
   1771 };
   1772 
   1773 } // end anonymous namespace.
   1774 
   1775 void AArch64Operand::print(raw_ostream &OS) const {
   1776   switch (Kind) {
   1777   case k_FPImm:
   1778     OS << "<fpimm " << getFPImm() << "("
   1779        << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
   1780     break;
   1781   case k_Barrier: {
   1782     StringRef Name = getBarrierName();
   1783     if (!Name.empty())
   1784       OS << "<barrier " << Name << ">";
   1785     else
   1786       OS << "<barrier invalid #" << getBarrier() << ">";
   1787     break;
   1788   }
   1789   case k_Immediate:
   1790     OS << *getImm();
   1791     break;
   1792   case k_ShiftedImm: {
   1793     unsigned Shift = getShiftedImmShift();
   1794     OS << "<shiftedimm ";
   1795     OS << *getShiftedImmVal();
   1796     OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
   1797     break;
   1798   }
   1799   case k_CondCode:
   1800     OS << "<condcode " << getCondCode() << ">";
   1801     break;
   1802   case k_Register:
   1803     OS << "<register " << getReg() << ">";
   1804     break;
   1805   case k_VectorList: {
   1806     OS << "<vectorlist ";
   1807     unsigned Reg = getVectorListStart();
   1808     for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
   1809       OS << Reg + i << " ";
   1810     OS << ">";
   1811     break;
   1812   }
   1813   case k_VectorIndex:
   1814     OS << "<vectorindex " << getVectorIndex() << ">";
   1815     break;
   1816   case k_SysReg:
   1817     OS << "<sysreg: " << getSysReg() << '>';
   1818     break;
   1819   case k_Token:
   1820     OS << "'" << getToken() << "'";
   1821     break;
   1822   case k_SysCR:
   1823     OS << "c" << getSysCR();
   1824     break;
   1825   case k_Prefetch: {
   1826     StringRef Name = getPrefetchName();
   1827     if (!Name.empty())
   1828       OS << "<prfop " << Name << ">";
   1829     else
   1830       OS << "<prfop invalid #" << getPrefetch() << ">";
   1831     break;
   1832   }
   1833   case k_PSBHint: {
   1834     OS << getPSBHintName();
   1835     break;
   1836   }
   1837   case k_ShiftExtend: {
   1838     OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
   1839        << getShiftExtendAmount();
   1840     if (!hasShiftExtendAmount())
   1841       OS << "<imp>";
   1842     OS << '>';
   1843     break;
   1844   }
   1845   }
   1846 }
   1847 
   1848 /// @name Auto-generated Match Functions
   1849 /// {
   1850 
   1851 static unsigned MatchRegisterName(StringRef Name);
   1852 
   1853 /// }
   1854 
   1855 static unsigned matchVectorRegName(StringRef Name) {
   1856   return StringSwitch<unsigned>(Name.lower())
   1857       .Case("v0", AArch64::Q0)
   1858       .Case("v1", AArch64::Q1)
   1859       .Case("v2", AArch64::Q2)
   1860       .Case("v3", AArch64::Q3)
   1861       .Case("v4", AArch64::Q4)
   1862       .Case("v5", AArch64::Q5)
   1863       .Case("v6", AArch64::Q6)
   1864       .Case("v7", AArch64::Q7)
   1865       .Case("v8", AArch64::Q8)
   1866       .Case("v9", AArch64::Q9)
   1867       .Case("v10", AArch64::Q10)
   1868       .Case("v11", AArch64::Q11)
   1869       .Case("v12", AArch64::Q12)
   1870       .Case("v13", AArch64::Q13)
   1871       .Case("v14", AArch64::Q14)
   1872       .Case("v15", AArch64::Q15)
   1873       .Case("v16", AArch64::Q16)
   1874       .Case("v17", AArch64::Q17)
   1875       .Case("v18", AArch64::Q18)
   1876       .Case("v19", AArch64::Q19)
   1877       .Case("v20", AArch64::Q20)
   1878       .Case("v21", AArch64::Q21)
   1879       .Case("v22", AArch64::Q22)
   1880       .Case("v23", AArch64::Q23)
   1881       .Case("v24", AArch64::Q24)
   1882       .Case("v25", AArch64::Q25)
   1883       .Case("v26", AArch64::Q26)
   1884       .Case("v27", AArch64::Q27)
   1885       .Case("v28", AArch64::Q28)
   1886       .Case("v29", AArch64::Q29)
   1887       .Case("v30", AArch64::Q30)
   1888       .Case("v31", AArch64::Q31)
   1889       .Default(0);
   1890 }
   1891 
   1892 static bool isValidVectorKind(StringRef Name) {
   1893   return StringSwitch<bool>(Name.lower())
   1894       .Case(".8b", true)
   1895       .Case(".16b", true)
   1896       .Case(".4h", true)
   1897       .Case(".8h", true)
   1898       .Case(".2s", true)
   1899       .Case(".4s", true)
   1900       .Case(".1d", true)
   1901       .Case(".2d", true)
   1902       .Case(".1q", true)
   1903       // Accept the width neutral ones, too, for verbose syntax. If those
   1904       // aren't used in the right places, the token operand won't match so
   1905       // all will work out.
   1906       .Case(".b", true)
   1907       .Case(".h", true)
   1908       .Case(".s", true)
   1909       .Case(".d", true)
   1910       // Needed for fp16 scalar pairwise reductions
   1911       .Case(".2h", true)
   1912       .Default(false);
   1913 }
   1914 
   1915 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
   1916                                  char &ElementKind) {
   1917   assert(isValidVectorKind(Name));
   1918 
   1919   ElementKind = Name.lower()[Name.size() - 1];
   1920   NumElements = 0;
   1921 
   1922   if (Name.size() == 2)
   1923     return;
   1924 
   1925   // Parse the lane count
   1926   Name = Name.drop_front();
   1927   while (isdigit(Name.front())) {
   1928     NumElements = 10 * NumElements + (Name.front() - '0');
   1929     Name = Name.drop_front();
   1930   }
   1931 }
   1932 
   1933 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
   1934                                      SMLoc &EndLoc) {
   1935   StartLoc = getLoc();
   1936   RegNo = tryParseRegister();
   1937   EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   1938   return (RegNo == (unsigned)-1);
   1939 }
   1940 
   1941 // Matches a register name or register alias previously defined by '.req'
   1942 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
   1943                                                   bool isVector) {
   1944   unsigned RegNum = isVector ? matchVectorRegName(Name)
   1945                              : MatchRegisterName(Name);
   1946 
   1947   if (RegNum == 0) {
   1948     // Check for aliases registered via .req. Canonicalize to lower case.
   1949     // That's more consistent since register names are case insensitive, and
   1950     // it's how the original entry was passed in from MC/MCParser/AsmParser.
   1951     auto Entry = RegisterReqs.find(Name.lower());
   1952     if (Entry == RegisterReqs.end())
   1953       return 0;
   1954     // set RegNum if the match is the right kind of register
   1955     if (isVector == Entry->getValue().first)
   1956       RegNum = Entry->getValue().second;
   1957   }
   1958   return RegNum;
   1959 }
   1960 
   1961 /// tryParseRegister - Try to parse a register name. The token must be an
   1962 /// Identifier when called, and if it is a register name the token is eaten and
   1963 /// the register is added to the operand list.
   1964 int AArch64AsmParser::tryParseRegister() {
   1965   MCAsmParser &Parser = getParser();
   1966   const AsmToken &Tok = Parser.getTok();
   1967   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
   1968 
   1969   std::string lowerCase = Tok.getString().lower();
   1970   unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
   1971   // Also handle a few aliases of registers.
   1972   if (RegNum == 0)
   1973     RegNum = StringSwitch<unsigned>(lowerCase)
   1974                  .Case("fp",  AArch64::FP)
   1975                  .Case("lr",  AArch64::LR)
   1976                  .Case("x31", AArch64::XZR)
   1977                  .Case("w31", AArch64::WZR)
   1978                  .Default(0);
   1979 
   1980   if (RegNum == 0)
   1981     return -1;
   1982 
   1983   Parser.Lex(); // Eat identifier token.
   1984   return RegNum;
   1985 }
   1986 
   1987 /// tryMatchVectorRegister - Try to parse a vector register name with optional
   1988 /// kind specifier. If it is a register specifier, eat the token and return it.
   1989 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
   1990   MCAsmParser &Parser = getParser();
   1991   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   1992     TokError("vector register expected");
   1993     return -1;
   1994   }
   1995 
   1996   StringRef Name = Parser.getTok().getString();
   1997   // If there is a kind specifier, it's separated from the register name by
   1998   // a '.'.
   1999   size_t Start = 0, Next = Name.find('.');
   2000   StringRef Head = Name.slice(Start, Next);
   2001   unsigned RegNum = matchRegisterNameAlias(Head, true);
   2002 
   2003   if (RegNum) {
   2004     if (Next != StringRef::npos) {
   2005       Kind = Name.slice(Next, StringRef::npos);
   2006       if (!isValidVectorKind(Kind)) {
   2007         TokError("invalid vector kind qualifier");
   2008         return -1;
   2009       }
   2010     }
   2011     Parser.Lex(); // Eat the register token.
   2012     return RegNum;
   2013   }
   2014 
   2015   if (expected)
   2016     TokError("vector register expected");
   2017   return -1;
   2018 }
   2019 
   2020 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
   2021 AArch64AsmParser::OperandMatchResultTy
   2022 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
   2023   MCAsmParser &Parser = getParser();
   2024   SMLoc S = getLoc();
   2025 
   2026   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   2027     Error(S, "Expected cN operand where 0 <= N <= 15");
   2028     return MatchOperand_ParseFail;
   2029   }
   2030 
   2031   StringRef Tok = Parser.getTok().getIdentifier();
   2032   if (Tok[0] != 'c' && Tok[0] != 'C') {
   2033     Error(S, "Expected cN operand where 0 <= N <= 15");
   2034     return MatchOperand_ParseFail;
   2035   }
   2036 
   2037   uint32_t CRNum;
   2038   bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
   2039   if (BadNum || CRNum > 15) {
   2040     Error(S, "Expected cN operand where 0 <= N <= 15");
   2041     return MatchOperand_ParseFail;
   2042   }
   2043 
   2044   Parser.Lex(); // Eat identifier token.
   2045   Operands.push_back(
   2046       AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
   2047   return MatchOperand_Success;
   2048 }
   2049 
   2050 /// tryParsePrefetch - Try to parse a prefetch operand.
   2051 AArch64AsmParser::OperandMatchResultTy
   2052 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
   2053   MCAsmParser &Parser = getParser();
   2054   SMLoc S = getLoc();
   2055   const AsmToken &Tok = Parser.getTok();
   2056   // Either an identifier for named values or a 5-bit immediate.
   2057   bool Hash = Tok.is(AsmToken::Hash);
   2058   if (Hash || Tok.is(AsmToken::Integer)) {
   2059     if (Hash)
   2060       Parser.Lex(); // Eat hash token.
   2061     const MCExpr *ImmVal;
   2062     if (getParser().parseExpression(ImmVal))
   2063       return MatchOperand_ParseFail;
   2064 
   2065     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2066     if (!MCE) {
   2067       TokError("immediate value expected for prefetch operand");
   2068       return MatchOperand_ParseFail;
   2069     }
   2070     unsigned prfop = MCE->getValue();
   2071     if (prfop > 31) {
   2072       TokError("prefetch operand out of range, [0,31] expected");
   2073       return MatchOperand_ParseFail;
   2074     }
   2075 
   2076     auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
   2077     Operands.push_back(AArch64Operand::CreatePrefetch(
   2078         prfop, PRFM ? PRFM->Name : "", S, getContext()));
   2079     return MatchOperand_Success;
   2080   }
   2081 
   2082   if (Tok.isNot(AsmToken::Identifier)) {
   2083     TokError("pre-fetch hint expected");
   2084     return MatchOperand_ParseFail;
   2085   }
   2086 
   2087   auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
   2088   if (!PRFM) {
   2089     TokError("pre-fetch hint expected");
   2090     return MatchOperand_ParseFail;
   2091   }
   2092 
   2093   Parser.Lex(); // Eat identifier token.
   2094   Operands.push_back(AArch64Operand::CreatePrefetch(
   2095       PRFM->Encoding, Tok.getString(), S, getContext()));
   2096   return MatchOperand_Success;
   2097 }
   2098 
   2099 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
   2100 AArch64AsmParser::OperandMatchResultTy
   2101 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
   2102   MCAsmParser &Parser = getParser();
   2103   SMLoc S = getLoc();
   2104   const AsmToken &Tok = Parser.getTok();
   2105   if (Tok.isNot(AsmToken::Identifier)) {
   2106     TokError("invalid operand for instruction");
   2107     return MatchOperand_ParseFail;
   2108   }
   2109 
   2110   auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
   2111   if (!PSB) {
   2112     TokError("invalid operand for instruction");
   2113     return MatchOperand_ParseFail;
   2114   }
   2115 
   2116   Parser.Lex(); // Eat identifier token.
   2117   Operands.push_back(AArch64Operand::CreatePSBHint(
   2118       PSB->Encoding, Tok.getString(), S, getContext()));
   2119   return MatchOperand_Success;
   2120 }
   2121 
   2122 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
   2123 /// instruction.
   2124 AArch64AsmParser::OperandMatchResultTy
   2125 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
   2126   MCAsmParser &Parser = getParser();
   2127   SMLoc S = getLoc();
   2128   const MCExpr *Expr;
   2129 
   2130   if (Parser.getTok().is(AsmToken::Hash)) {
   2131     Parser.Lex(); // Eat hash token.
   2132   }
   2133 
   2134   if (parseSymbolicImmVal(Expr))
   2135     return MatchOperand_ParseFail;
   2136 
   2137   AArch64MCExpr::VariantKind ELFRefKind;
   2138   MCSymbolRefExpr::VariantKind DarwinRefKind;
   2139   int64_t Addend;
   2140   if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
   2141     if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
   2142         ELFRefKind == AArch64MCExpr::VK_INVALID) {
   2143       // No modifier was specified at all; this is the syntax for an ELF basic
   2144       // ADRP relocation (unfortunately).
   2145       Expr =
   2146           AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
   2147     } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
   2148                 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
   2149                Addend != 0) {
   2150       Error(S, "gotpage label reference not allowed an addend");
   2151       return MatchOperand_ParseFail;
   2152     } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
   2153                DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
   2154                DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
   2155                ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
   2156                ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
   2157                ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
   2158       // The operand must be an @page or @gotpage qualified symbolref.
   2159       Error(S, "page or gotpage label reference expected");
   2160       return MatchOperand_ParseFail;
   2161     }
   2162   }
   2163 
   2164   // We have either a label reference possibly with addend or an immediate. The
   2165   // addend is a raw value here. The linker will adjust it to only reference the
   2166   // page.
   2167   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2168   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
   2169 
   2170   return MatchOperand_Success;
   2171 }
   2172 
   2173 /// tryParseAdrLabel - Parse and validate a source label for the ADR
   2174 /// instruction.
   2175 AArch64AsmParser::OperandMatchResultTy
   2176 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
   2177   MCAsmParser &Parser = getParser();
   2178   SMLoc S = getLoc();
   2179   const MCExpr *Expr;
   2180 
   2181   if (Parser.getTok().is(AsmToken::Hash)) {
   2182     Parser.Lex(); // Eat hash token.
   2183   }
   2184 
   2185   if (getParser().parseExpression(Expr))
   2186     return MatchOperand_ParseFail;
   2187 
   2188   SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2189   Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
   2190 
   2191   return MatchOperand_Success;
   2192 }
   2193 
   2194 /// tryParseFPImm - A floating point immediate expression operand.
   2195 AArch64AsmParser::OperandMatchResultTy
   2196 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
   2197   MCAsmParser &Parser = getParser();
   2198   SMLoc S = getLoc();
   2199 
   2200   bool Hash = false;
   2201   if (Parser.getTok().is(AsmToken::Hash)) {
   2202     Parser.Lex(); // Eat '#'
   2203     Hash = true;
   2204   }
   2205 
   2206   // Handle negation, as that still comes through as a separate token.
   2207   bool isNegative = false;
   2208   if (Parser.getTok().is(AsmToken::Minus)) {
   2209     isNegative = true;
   2210     Parser.Lex();
   2211   }
   2212   const AsmToken &Tok = Parser.getTok();
   2213   if (Tok.is(AsmToken::Real)) {
   2214     APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
   2215     if (isNegative)
   2216       RealVal.changeSign();
   2217 
   2218     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   2219     int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
   2220     Parser.Lex(); // Eat the token.
   2221     // Check for out of range values. As an exception, we let Zero through,
   2222     // as we handle that special case in post-processing before matching in
   2223     // order to use the zero register for it.
   2224     if (Val == -1 && !RealVal.isPosZero()) {
   2225       TokError("expected compatible register or floating-point constant");
   2226       return MatchOperand_ParseFail;
   2227     }
   2228     Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
   2229     return MatchOperand_Success;
   2230   }
   2231   if (Tok.is(AsmToken::Integer)) {
   2232     int64_t Val;
   2233     if (!isNegative && Tok.getString().startswith("0x")) {
   2234       Val = Tok.getIntVal();
   2235       if (Val > 255 || Val < 0) {
   2236         TokError("encoded floating point value out of range");
   2237         return MatchOperand_ParseFail;
   2238       }
   2239     } else {
   2240       APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
   2241       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   2242       // If we had a '-' in front, toggle the sign bit.
   2243       IntVal ^= (uint64_t)isNegative << 63;
   2244       Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
   2245     }
   2246     Parser.Lex(); // Eat the token.
   2247     Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
   2248     return MatchOperand_Success;
   2249   }
   2250 
   2251   if (!Hash)
   2252     return MatchOperand_NoMatch;
   2253 
   2254   TokError("invalid floating point immediate");
   2255   return MatchOperand_ParseFail;
   2256 }
   2257 
   2258 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
   2259 AArch64AsmParser::OperandMatchResultTy
   2260 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
   2261   MCAsmParser &Parser = getParser();
   2262   SMLoc S = getLoc();
   2263 
   2264   if (Parser.getTok().is(AsmToken::Hash))
   2265     Parser.Lex(); // Eat '#'
   2266   else if (Parser.getTok().isNot(AsmToken::Integer))
   2267     // Operand should start from # or should be integer, emit error otherwise.
   2268     return MatchOperand_NoMatch;
   2269 
   2270   const MCExpr *Imm;
   2271   if (parseSymbolicImmVal(Imm))
   2272     return MatchOperand_ParseFail;
   2273   else if (Parser.getTok().isNot(AsmToken::Comma)) {
   2274     uint64_t ShiftAmount = 0;
   2275     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
   2276     if (MCE) {
   2277       int64_t Val = MCE->getValue();
   2278       if (Val > 0xfff && (Val & 0xfff) == 0) {
   2279         Imm = MCConstantExpr::create(Val >> 12, getContext());
   2280         ShiftAmount = 12;
   2281       }
   2282     }
   2283     SMLoc E = Parser.getTok().getLoc();
   2284     Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
   2285                                                         getContext()));
   2286     return MatchOperand_Success;
   2287   }
   2288 
   2289   // Eat ','
   2290   Parser.Lex();
   2291 
   2292   // The optional operand must be "lsl #N" where N is non-negative.
   2293   if (!Parser.getTok().is(AsmToken::Identifier) ||
   2294       !Parser.getTok().getIdentifier().equals_lower("lsl")) {
   2295     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
   2296     return MatchOperand_ParseFail;
   2297   }
   2298 
   2299   // Eat 'lsl'
   2300   Parser.Lex();
   2301 
   2302   if (Parser.getTok().is(AsmToken::Hash)) {
   2303     Parser.Lex();
   2304   }
   2305 
   2306   if (Parser.getTok().isNot(AsmToken::Integer)) {
   2307     Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
   2308     return MatchOperand_ParseFail;
   2309   }
   2310 
   2311   int64_t ShiftAmount = Parser.getTok().getIntVal();
   2312 
   2313   if (ShiftAmount < 0) {
   2314     Error(Parser.getTok().getLoc(), "positive shift amount required");
   2315     return MatchOperand_ParseFail;
   2316   }
   2317   Parser.Lex(); // Eat the number
   2318 
   2319   SMLoc E = Parser.getTok().getLoc();
   2320   Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
   2321                                                       S, E, getContext()));
   2322   return MatchOperand_Success;
   2323 }
   2324 
   2325 /// parseCondCodeString - Parse a Condition Code string.
   2326 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
   2327   AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
   2328                     .Case("eq", AArch64CC::EQ)
   2329                     .Case("ne", AArch64CC::NE)
   2330                     .Case("cs", AArch64CC::HS)
   2331                     .Case("hs", AArch64CC::HS)
   2332                     .Case("cc", AArch64CC::LO)
   2333                     .Case("lo", AArch64CC::LO)
   2334                     .Case("mi", AArch64CC::MI)
   2335                     .Case("pl", AArch64CC::PL)
   2336                     .Case("vs", AArch64CC::VS)
   2337                     .Case("vc", AArch64CC::VC)
   2338                     .Case("hi", AArch64CC::HI)
   2339                     .Case("ls", AArch64CC::LS)
   2340                     .Case("ge", AArch64CC::GE)
   2341                     .Case("lt", AArch64CC::LT)
   2342                     .Case("gt", AArch64CC::GT)
   2343                     .Case("le", AArch64CC::LE)
   2344                     .Case("al", AArch64CC::AL)
   2345                     .Case("nv", AArch64CC::NV)
   2346                     .Default(AArch64CC::Invalid);
   2347   return CC;
   2348 }
   2349 
   2350 /// parseCondCode - Parse a Condition Code operand.
   2351 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
   2352                                      bool invertCondCode) {
   2353   MCAsmParser &Parser = getParser();
   2354   SMLoc S = getLoc();
   2355   const AsmToken &Tok = Parser.getTok();
   2356   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
   2357 
   2358   StringRef Cond = Tok.getString();
   2359   AArch64CC::CondCode CC = parseCondCodeString(Cond);
   2360   if (CC == AArch64CC::Invalid)
   2361     return TokError("invalid condition code");
   2362   Parser.Lex(); // Eat identifier token.
   2363 
   2364   if (invertCondCode) {
   2365     if (CC == AArch64CC::AL || CC == AArch64CC::NV)
   2366       return TokError("condition codes AL and NV are invalid for this instruction");
   2367     CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
   2368   }
   2369 
   2370   Operands.push_back(
   2371       AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
   2372   return false;
   2373 }
   2374 
   2375 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
   2376 /// them if present.
   2377 AArch64AsmParser::OperandMatchResultTy
   2378 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
   2379   MCAsmParser &Parser = getParser();
   2380   const AsmToken &Tok = Parser.getTok();
   2381   std::string LowerID = Tok.getString().lower();
   2382   AArch64_AM::ShiftExtendType ShOp =
   2383       StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
   2384           .Case("lsl", AArch64_AM::LSL)
   2385           .Case("lsr", AArch64_AM::LSR)
   2386           .Case("asr", AArch64_AM::ASR)
   2387           .Case("ror", AArch64_AM::ROR)
   2388           .Case("msl", AArch64_AM::MSL)
   2389           .Case("uxtb", AArch64_AM::UXTB)
   2390           .Case("uxth", AArch64_AM::UXTH)
   2391           .Case("uxtw", AArch64_AM::UXTW)
   2392           .Case("uxtx", AArch64_AM::UXTX)
   2393           .Case("sxtb", AArch64_AM::SXTB)
   2394           .Case("sxth", AArch64_AM::SXTH)
   2395           .Case("sxtw", AArch64_AM::SXTW)
   2396           .Case("sxtx", AArch64_AM::SXTX)
   2397           .Default(AArch64_AM::InvalidShiftExtend);
   2398 
   2399   if (ShOp == AArch64_AM::InvalidShiftExtend)
   2400     return MatchOperand_NoMatch;
   2401 
   2402   SMLoc S = Tok.getLoc();
   2403   Parser.Lex();
   2404 
   2405   bool Hash = getLexer().is(AsmToken::Hash);
   2406   if (!Hash && getLexer().isNot(AsmToken::Integer)) {
   2407     if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
   2408         ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
   2409         ShOp == AArch64_AM::MSL) {
   2410       // We expect a number here.
   2411       TokError("expected #imm after shift specifier");
   2412       return MatchOperand_ParseFail;
   2413     }
   2414 
   2415     // "extend" type operatoins don't need an immediate, #0 is implicit.
   2416     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2417     Operands.push_back(
   2418         AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
   2419     return MatchOperand_Success;
   2420   }
   2421 
   2422   if (Hash)
   2423     Parser.Lex(); // Eat the '#'.
   2424 
   2425   // Make sure we do actually have a number or a parenthesized expression.
   2426   SMLoc E = Parser.getTok().getLoc();
   2427   if (!Parser.getTok().is(AsmToken::Integer) &&
   2428       !Parser.getTok().is(AsmToken::LParen)) {
   2429     Error(E, "expected integer shift amount");
   2430     return MatchOperand_ParseFail;
   2431   }
   2432 
   2433   const MCExpr *ImmVal;
   2434   if (getParser().parseExpression(ImmVal))
   2435     return MatchOperand_ParseFail;
   2436 
   2437   const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2438   if (!MCE) {
   2439     Error(E, "expected constant '#imm' after shift specifier");
   2440     return MatchOperand_ParseFail;
   2441   }
   2442 
   2443   E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   2444   Operands.push_back(AArch64Operand::CreateShiftExtend(
   2445       ShOp, MCE->getValue(), true, S, E, getContext()));
   2446   return MatchOperand_Success;
   2447 }
   2448 
   2449 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
   2450 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
   2451 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
   2452                                    OperandVector &Operands) {
   2453   if (Name.find('.') != StringRef::npos)
   2454     return TokError("invalid operand");
   2455 
   2456   Mnemonic = Name;
   2457   Operands.push_back(
   2458       AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
   2459 
   2460   MCAsmParser &Parser = getParser();
   2461   const AsmToken &Tok = Parser.getTok();
   2462   StringRef Op = Tok.getString();
   2463   SMLoc S = Tok.getLoc();
   2464 
   2465   const MCExpr *Expr = nullptr;
   2466 
   2467 #define SYS_ALIAS(op1, Cn, Cm, op2)                                            \
   2468   do {                                                                         \
   2469     Expr = MCConstantExpr::create(op1, getContext());                          \
   2470     Operands.push_back(                                                        \
   2471         AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
   2472     Operands.push_back(                                                        \
   2473         AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));           \
   2474     Operands.push_back(                                                        \
   2475         AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));           \
   2476     Expr = MCConstantExpr::create(op2, getContext());                          \
   2477     Operands.push_back(                                                        \
   2478         AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
   2479   } while (0)
   2480 
   2481   if (Mnemonic == "ic") {
   2482     if (!Op.compare_lower("ialluis")) {
   2483       // SYS #0, C7, C1, #0
   2484       SYS_ALIAS(0, 7, 1, 0);
   2485     } else if (!Op.compare_lower("iallu")) {
   2486       // SYS #0, C7, C5, #0
   2487       SYS_ALIAS(0, 7, 5, 0);
   2488     } else if (!Op.compare_lower("ivau")) {
   2489       // SYS #3, C7, C5, #1
   2490       SYS_ALIAS(3, 7, 5, 1);
   2491     } else {
   2492       return TokError("invalid operand for IC instruction");
   2493     }
   2494   } else if (Mnemonic == "dc") {
   2495     if (!Op.compare_lower("zva")) {
   2496       // SYS #3, C7, C4, #1
   2497       SYS_ALIAS(3, 7, 4, 1);
   2498     } else if (!Op.compare_lower("ivac")) {
   2499       // SYS #3, C7, C6, #1
   2500       SYS_ALIAS(0, 7, 6, 1);
   2501     } else if (!Op.compare_lower("isw")) {
   2502       // SYS #0, C7, C6, #2
   2503       SYS_ALIAS(0, 7, 6, 2);
   2504     } else if (!Op.compare_lower("cvac")) {
   2505       // SYS #3, C7, C10, #1
   2506       SYS_ALIAS(3, 7, 10, 1);
   2507     } else if (!Op.compare_lower("csw")) {
   2508       // SYS #0, C7, C10, #2
   2509       SYS_ALIAS(0, 7, 10, 2);
   2510     } else if (!Op.compare_lower("cvau")) {
   2511       // SYS #3, C7, C11, #1
   2512       SYS_ALIAS(3, 7, 11, 1);
   2513     } else if (!Op.compare_lower("civac")) {
   2514       // SYS #3, C7, C14, #1
   2515       SYS_ALIAS(3, 7, 14, 1);
   2516     } else if (!Op.compare_lower("cisw")) {
   2517       // SYS #0, C7, C14, #2
   2518       SYS_ALIAS(0, 7, 14, 2);
   2519     } else if (!Op.compare_lower("cvap")) {
   2520       if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
   2521         // SYS #3, C7, C12, #1
   2522         SYS_ALIAS(3, 7, 12, 1);
   2523       } else {
   2524         return TokError("DC CVAP requires ARMv8.2a");
   2525       }
   2526     } else {
   2527       return TokError("invalid operand for DC instruction");
   2528     }
   2529   } else if (Mnemonic == "at") {
   2530     if (!Op.compare_lower("s1e1r")) {
   2531       // SYS #0, C7, C8, #0
   2532       SYS_ALIAS(0, 7, 8, 0);
   2533     } else if (!Op.compare_lower("s1e2r")) {
   2534       // SYS #4, C7, C8, #0
   2535       SYS_ALIAS(4, 7, 8, 0);
   2536     } else if (!Op.compare_lower("s1e3r")) {
   2537       // SYS #6, C7, C8, #0
   2538       SYS_ALIAS(6, 7, 8, 0);
   2539     } else if (!Op.compare_lower("s1e1w")) {
   2540       // SYS #0, C7, C8, #1
   2541       SYS_ALIAS(0, 7, 8, 1);
   2542     } else if (!Op.compare_lower("s1e2w")) {
   2543       // SYS #4, C7, C8, #1
   2544       SYS_ALIAS(4, 7, 8, 1);
   2545     } else if (!Op.compare_lower("s1e3w")) {
   2546       // SYS #6, C7, C8, #1
   2547       SYS_ALIAS(6, 7, 8, 1);
   2548     } else if (!Op.compare_lower("s1e0r")) {
   2549       // SYS #0, C7, C8, #3
   2550       SYS_ALIAS(0, 7, 8, 2);
   2551     } else if (!Op.compare_lower("s1e0w")) {
   2552       // SYS #0, C7, C8, #3
   2553       SYS_ALIAS(0, 7, 8, 3);
   2554     } else if (!Op.compare_lower("s12e1r")) {
   2555       // SYS #4, C7, C8, #4
   2556       SYS_ALIAS(4, 7, 8, 4);
   2557     } else if (!Op.compare_lower("s12e1w")) {
   2558       // SYS #4, C7, C8, #5
   2559       SYS_ALIAS(4, 7, 8, 5);
   2560     } else if (!Op.compare_lower("s12e0r")) {
   2561       // SYS #4, C7, C8, #6
   2562       SYS_ALIAS(4, 7, 8, 6);
   2563     } else if (!Op.compare_lower("s12e0w")) {
   2564       // SYS #4, C7, C8, #7
   2565       SYS_ALIAS(4, 7, 8, 7);
   2566     } else if (!Op.compare_lower("s1e1rp")) {
   2567       if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
   2568         // SYS #0, C7, C9, #0
   2569         SYS_ALIAS(0, 7, 9, 0);
   2570       } else {
   2571         return TokError("AT S1E1RP requires ARMv8.2a");
   2572       }
   2573     } else if (!Op.compare_lower("s1e1wp")) {
   2574       if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
   2575         // SYS #0, C7, C9, #1
   2576         SYS_ALIAS(0, 7, 9, 1);
   2577       } else {
   2578         return TokError("AT S1E1WP requires ARMv8.2a");
   2579       }
   2580     } else {
   2581       return TokError("invalid operand for AT instruction");
   2582     }
   2583   } else if (Mnemonic == "tlbi") {
   2584     if (!Op.compare_lower("vmalle1is")) {
   2585       // SYS #0, C8, C3, #0
   2586       SYS_ALIAS(0, 8, 3, 0);
   2587     } else if (!Op.compare_lower("alle2is")) {
   2588       // SYS #4, C8, C3, #0
   2589       SYS_ALIAS(4, 8, 3, 0);
   2590     } else if (!Op.compare_lower("alle3is")) {
   2591       // SYS #6, C8, C3, #0
   2592       SYS_ALIAS(6, 8, 3, 0);
   2593     } else if (!Op.compare_lower("vae1is")) {
   2594       // SYS #0, C8, C3, #1
   2595       SYS_ALIAS(0, 8, 3, 1);
   2596     } else if (!Op.compare_lower("vae2is")) {
   2597       // SYS #4, C8, C3, #1
   2598       SYS_ALIAS(4, 8, 3, 1);
   2599     } else if (!Op.compare_lower("vae3is")) {
   2600       // SYS #6, C8, C3, #1
   2601       SYS_ALIAS(6, 8, 3, 1);
   2602     } else if (!Op.compare_lower("aside1is")) {
   2603       // SYS #0, C8, C3, #2
   2604       SYS_ALIAS(0, 8, 3, 2);
   2605     } else if (!Op.compare_lower("vaae1is")) {
   2606       // SYS #0, C8, C3, #3
   2607       SYS_ALIAS(0, 8, 3, 3);
   2608     } else if (!Op.compare_lower("alle1is")) {
   2609       // SYS #4, C8, C3, #4
   2610       SYS_ALIAS(4, 8, 3, 4);
   2611     } else if (!Op.compare_lower("vale1is")) {
   2612       // SYS #0, C8, C3, #5
   2613       SYS_ALIAS(0, 8, 3, 5);
   2614     } else if (!Op.compare_lower("vaale1is")) {
   2615       // SYS #0, C8, C3, #7
   2616       SYS_ALIAS(0, 8, 3, 7);
   2617     } else if (!Op.compare_lower("vmalle1")) {
   2618       // SYS #0, C8, C7, #0
   2619       SYS_ALIAS(0, 8, 7, 0);
   2620     } else if (!Op.compare_lower("alle2")) {
   2621       // SYS #4, C8, C7, #0
   2622       SYS_ALIAS(4, 8, 7, 0);
   2623     } else if (!Op.compare_lower("vale2is")) {
   2624       // SYS #4, C8, C3, #5
   2625       SYS_ALIAS(4, 8, 3, 5);
   2626     } else if (!Op.compare_lower("vale3is")) {
   2627       // SYS #6, C8, C3, #5
   2628       SYS_ALIAS(6, 8, 3, 5);
   2629     } else if (!Op.compare_lower("alle3")) {
   2630       // SYS #6, C8, C7, #0
   2631       SYS_ALIAS(6, 8, 7, 0);
   2632     } else if (!Op.compare_lower("vae1")) {
   2633       // SYS #0, C8, C7, #1
   2634       SYS_ALIAS(0, 8, 7, 1);
   2635     } else if (!Op.compare_lower("vae2")) {
   2636       // SYS #4, C8, C7, #1
   2637       SYS_ALIAS(4, 8, 7, 1);
   2638     } else if (!Op.compare_lower("vae3")) {
   2639       // SYS #6, C8, C7, #1
   2640       SYS_ALIAS(6, 8, 7, 1);
   2641     } else if (!Op.compare_lower("aside1")) {
   2642       // SYS #0, C8, C7, #2
   2643       SYS_ALIAS(0, 8, 7, 2);
   2644     } else if (!Op.compare_lower("vaae1")) {
   2645       // SYS #0, C8, C7, #3
   2646       SYS_ALIAS(0, 8, 7, 3);
   2647     } else if (!Op.compare_lower("alle1")) {
   2648       // SYS #4, C8, C7, #4
   2649       SYS_ALIAS(4, 8, 7, 4);
   2650     } else if (!Op.compare_lower("vale1")) {
   2651       // SYS #0, C8, C7, #5
   2652       SYS_ALIAS(0, 8, 7, 5);
   2653     } else if (!Op.compare_lower("vale2")) {
   2654       // SYS #4, C8, C7, #5
   2655       SYS_ALIAS(4, 8, 7, 5);
   2656     } else if (!Op.compare_lower("vale3")) {
   2657       // SYS #6, C8, C7, #5
   2658       SYS_ALIAS(6, 8, 7, 5);
   2659     } else if (!Op.compare_lower("vaale1")) {
   2660       // SYS #0, C8, C7, #7
   2661       SYS_ALIAS(0, 8, 7, 7);
   2662     } else if (!Op.compare_lower("ipas2e1")) {
   2663       // SYS #4, C8, C4, #1
   2664       SYS_ALIAS(4, 8, 4, 1);
   2665     } else if (!Op.compare_lower("ipas2le1")) {
   2666       // SYS #4, C8, C4, #5
   2667       SYS_ALIAS(4, 8, 4, 5);
   2668     } else if (!Op.compare_lower("ipas2e1is")) {
   2669       // SYS #4, C8, C4, #1
   2670       SYS_ALIAS(4, 8, 0, 1);
   2671     } else if (!Op.compare_lower("ipas2le1is")) {
   2672       // SYS #4, C8, C4, #5
   2673       SYS_ALIAS(4, 8, 0, 5);
   2674     } else if (!Op.compare_lower("vmalls12e1")) {
   2675       // SYS #4, C8, C7, #6
   2676       SYS_ALIAS(4, 8, 7, 6);
   2677     } else if (!Op.compare_lower("vmalls12e1is")) {
   2678       // SYS #4, C8, C3, #6
   2679       SYS_ALIAS(4, 8, 3, 6);
   2680     } else {
   2681       return TokError("invalid operand for TLBI instruction");
   2682     }
   2683   }
   2684 
   2685 #undef SYS_ALIAS
   2686 
   2687   Parser.Lex(); // Eat operand.
   2688 
   2689   bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
   2690   bool HasRegister = false;
   2691 
   2692   // Check for the optional register operand.
   2693   if (getLexer().is(AsmToken::Comma)) {
   2694     Parser.Lex(); // Eat comma.
   2695 
   2696     if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
   2697       return TokError("expected register operand");
   2698 
   2699     HasRegister = true;
   2700   }
   2701 
   2702   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   2703     Parser.eatToEndOfStatement();
   2704     return TokError("unexpected token in argument list");
   2705   }
   2706 
   2707   if (ExpectRegister && !HasRegister) {
   2708     return TokError("specified " + Mnemonic + " op requires a register");
   2709   }
   2710   else if (!ExpectRegister && HasRegister) {
   2711     return TokError("specified " + Mnemonic + " op does not use a register");
   2712   }
   2713 
   2714   Parser.Lex(); // Consume the EndOfStatement
   2715   return false;
   2716 }
   2717 
   2718 AArch64AsmParser::OperandMatchResultTy
   2719 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
   2720   MCAsmParser &Parser = getParser();
   2721   const AsmToken &Tok = Parser.getTok();
   2722 
   2723   // Can be either a #imm style literal or an option name
   2724   bool Hash = Tok.is(AsmToken::Hash);
   2725   if (Hash || Tok.is(AsmToken::Integer)) {
   2726     // Immediate operand.
   2727     if (Hash)
   2728       Parser.Lex(); // Eat the '#'
   2729     const MCExpr *ImmVal;
   2730     SMLoc ExprLoc = getLoc();
   2731     if (getParser().parseExpression(ImmVal))
   2732       return MatchOperand_ParseFail;
   2733     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2734     if (!MCE) {
   2735       Error(ExprLoc, "immediate value expected for barrier operand");
   2736       return MatchOperand_ParseFail;
   2737     }
   2738     if (MCE->getValue() < 0 || MCE->getValue() > 15) {
   2739       Error(ExprLoc, "barrier operand out of range");
   2740       return MatchOperand_ParseFail;
   2741     }
   2742     auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
   2743     Operands.push_back(AArch64Operand::CreateBarrier(
   2744         MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
   2745     return MatchOperand_Success;
   2746   }
   2747 
   2748   if (Tok.isNot(AsmToken::Identifier)) {
   2749     TokError("invalid operand for instruction");
   2750     return MatchOperand_ParseFail;
   2751   }
   2752 
   2753   auto DB = AArch64DB::lookupDBByName(Tok.getString());
   2754   if (!DB) {
   2755     TokError("invalid barrier option name");
   2756     return MatchOperand_ParseFail;
   2757   }
   2758 
   2759   // The only valid named option for ISB is 'sy'
   2760   if (Mnemonic == "isb" && DB->Encoding != AArch64DB::sy) {
   2761     TokError("'sy' or #imm operand expected");
   2762     return MatchOperand_ParseFail;
   2763   }
   2764 
   2765   Operands.push_back(AArch64Operand::CreateBarrier(
   2766       DB->Encoding, Tok.getString(), getLoc(), getContext()));
   2767   Parser.Lex(); // Consume the option
   2768 
   2769   return MatchOperand_Success;
   2770 }
   2771 
   2772 AArch64AsmParser::OperandMatchResultTy
   2773 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
   2774   MCAsmParser &Parser = getParser();
   2775   const AsmToken &Tok = Parser.getTok();
   2776 
   2777   if (Tok.isNot(AsmToken::Identifier))
   2778     return MatchOperand_NoMatch;
   2779 
   2780   int MRSReg, MSRReg;
   2781   auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
   2782   if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
   2783     MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
   2784     MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
   2785   } else
   2786     MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
   2787 
   2788   auto PState = AArch64PState::lookupPStateByName(Tok.getString());
   2789   unsigned PStateImm = -1;
   2790   if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
   2791     PStateImm = PState->Encoding;
   2792 
   2793   Operands.push_back(
   2794       AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
   2795                                    PStateImm, getContext()));
   2796   Parser.Lex(); // Eat identifier
   2797 
   2798   return MatchOperand_Success;
   2799 }
   2800 
   2801 /// tryParseVectorRegister - Parse a vector register operand.
   2802 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
   2803   MCAsmParser &Parser = getParser();
   2804   if (Parser.getTok().isNot(AsmToken::Identifier))
   2805     return true;
   2806 
   2807   SMLoc S = getLoc();
   2808   // Check for a vector register specifier first.
   2809   StringRef Kind;
   2810   int64_t Reg = tryMatchVectorRegister(Kind, false);
   2811   if (Reg == -1)
   2812     return true;
   2813   Operands.push_back(
   2814       AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
   2815   // If there was an explicit qualifier, that goes on as a literal text
   2816   // operand.
   2817   if (!Kind.empty())
   2818     Operands.push_back(
   2819         AArch64Operand::CreateToken(Kind, false, S, getContext()));
   2820 
   2821   // If there is an index specifier following the register, parse that too.
   2822   if (Parser.getTok().is(AsmToken::LBrac)) {
   2823     SMLoc SIdx = getLoc();
   2824     Parser.Lex(); // Eat left bracket token.
   2825 
   2826     const MCExpr *ImmVal;
   2827     if (getParser().parseExpression(ImmVal))
   2828       return false;
   2829     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2830     if (!MCE) {
   2831       TokError("immediate value expected for vector index");
   2832       return false;
   2833     }
   2834 
   2835     SMLoc E = getLoc();
   2836     if (Parser.getTok().isNot(AsmToken::RBrac)) {
   2837       Error(E, "']' expected");
   2838       return false;
   2839     }
   2840 
   2841     Parser.Lex(); // Eat right bracket token.
   2842 
   2843     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
   2844                                                          E, getContext()));
   2845   }
   2846 
   2847   return false;
   2848 }
   2849 
   2850 /// parseRegister - Parse a non-vector register operand.
   2851 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
   2852   MCAsmParser &Parser = getParser();
   2853   SMLoc S = getLoc();
   2854   // Try for a vector register.
   2855   if (!tryParseVectorRegister(Operands))
   2856     return false;
   2857 
   2858   // Try for a scalar register.
   2859   int64_t Reg = tryParseRegister();
   2860   if (Reg == -1)
   2861     return true;
   2862   Operands.push_back(
   2863       AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
   2864 
   2865   // A small number of instructions (FMOVXDhighr, for example) have "[1]"
   2866   // as a string token in the instruction itself.
   2867   if (getLexer().getKind() == AsmToken::LBrac) {
   2868     SMLoc LBracS = getLoc();
   2869     Parser.Lex();
   2870     const AsmToken &Tok = Parser.getTok();
   2871     if (Tok.is(AsmToken::Integer)) {
   2872       SMLoc IntS = getLoc();
   2873       int64_t Val = Tok.getIntVal();
   2874       if (Val == 1) {
   2875         Parser.Lex();
   2876         if (getLexer().getKind() == AsmToken::RBrac) {
   2877           SMLoc RBracS = getLoc();
   2878           Parser.Lex();
   2879           Operands.push_back(
   2880               AArch64Operand::CreateToken("[", false, LBracS, getContext()));
   2881           Operands.push_back(
   2882               AArch64Operand::CreateToken("1", false, IntS, getContext()));
   2883           Operands.push_back(
   2884               AArch64Operand::CreateToken("]", false, RBracS, getContext()));
   2885           return false;
   2886         }
   2887       }
   2888     }
   2889   }
   2890 
   2891   return false;
   2892 }
   2893 
   2894 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
   2895   MCAsmParser &Parser = getParser();
   2896   bool HasELFModifier = false;
   2897   AArch64MCExpr::VariantKind RefKind;
   2898 
   2899   if (Parser.getTok().is(AsmToken::Colon)) {
   2900     Parser.Lex(); // Eat ':"
   2901     HasELFModifier = true;
   2902 
   2903     if (Parser.getTok().isNot(AsmToken::Identifier)) {
   2904       Error(Parser.getTok().getLoc(),
   2905             "expect relocation specifier in operand after ':'");
   2906       return true;
   2907     }
   2908 
   2909     std::string LowerCase = Parser.getTok().getIdentifier().lower();
   2910     RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
   2911                   .Case("lo12", AArch64MCExpr::VK_LO12)
   2912                   .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
   2913                   .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
   2914                   .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
   2915                   .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
   2916                   .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
   2917                   .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
   2918                   .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
   2919                   .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
   2920                   .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
   2921                   .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
   2922                   .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
   2923                   .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
   2924                   .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
   2925                   .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
   2926                   .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
   2927                   .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
   2928                   .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
   2929                   .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
   2930                   .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
   2931                   .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
   2932                   .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
   2933                   .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
   2934                   .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
   2935                   .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
   2936                   .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
   2937                   .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
   2938                   .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
   2939                   .Case("got", AArch64MCExpr::VK_GOT_PAGE)
   2940                   .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
   2941                   .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
   2942                   .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
   2943                   .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
   2944                   .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
   2945                   .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
   2946                   .Default(AArch64MCExpr::VK_INVALID);
   2947 
   2948     if (RefKind == AArch64MCExpr::VK_INVALID) {
   2949       Error(Parser.getTok().getLoc(),
   2950             "expect relocation specifier in operand after ':'");
   2951       return true;
   2952     }
   2953 
   2954     Parser.Lex(); // Eat identifier
   2955 
   2956     if (Parser.getTok().isNot(AsmToken::Colon)) {
   2957       Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
   2958       return true;
   2959     }
   2960     Parser.Lex(); // Eat ':'
   2961   }
   2962 
   2963   if (getParser().parseExpression(ImmVal))
   2964     return true;
   2965 
   2966   if (HasELFModifier)
   2967     ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
   2968 
   2969   return false;
   2970 }
   2971 
   2972 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
   2973 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
   2974   MCAsmParser &Parser = getParser();
   2975   assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
   2976   SMLoc S = getLoc();
   2977   Parser.Lex(); // Eat left bracket token.
   2978   StringRef Kind;
   2979   int64_t FirstReg = tryMatchVectorRegister(Kind, true);
   2980   if (FirstReg == -1)
   2981     return true;
   2982   int64_t PrevReg = FirstReg;
   2983   unsigned Count = 1;
   2984 
   2985   if (Parser.getTok().is(AsmToken::Minus)) {
   2986     Parser.Lex(); // Eat the minus.
   2987 
   2988     SMLoc Loc = getLoc();
   2989     StringRef NextKind;
   2990     int64_t Reg = tryMatchVectorRegister(NextKind, true);
   2991     if (Reg == -1)
   2992       return true;
   2993     // Any Kind suffices must match on all regs in the list.
   2994     if (Kind != NextKind)
   2995       return Error(Loc, "mismatched register size suffix");
   2996 
   2997     unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
   2998 
   2999     if (Space == 0 || Space > 3) {
   3000       return Error(Loc, "invalid number of vectors");
   3001     }
   3002 
   3003     Count += Space;
   3004   }
   3005   else {
   3006     while (Parser.getTok().is(AsmToken::Comma)) {
   3007       Parser.Lex(); // Eat the comma token.
   3008 
   3009       SMLoc Loc = getLoc();
   3010       StringRef NextKind;
   3011       int64_t Reg = tryMatchVectorRegister(NextKind, true);
   3012       if (Reg == -1)
   3013         return true;
   3014       // Any Kind suffices must match on all regs in the list.
   3015       if (Kind != NextKind)
   3016         return Error(Loc, "mismatched register size suffix");
   3017 
   3018       // Registers must be incremental (with wraparound at 31)
   3019       if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
   3020           (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
   3021        return Error(Loc, "registers must be sequential");
   3022 
   3023       PrevReg = Reg;
   3024       ++Count;
   3025     }
   3026   }
   3027 
   3028   if (Parser.getTok().isNot(AsmToken::RCurly))
   3029     return Error(getLoc(), "'}' expected");
   3030   Parser.Lex(); // Eat the '}' token.
   3031 
   3032   if (Count > 4)
   3033     return Error(S, "invalid number of vectors");
   3034 
   3035   unsigned NumElements = 0;
   3036   char ElementKind = 0;
   3037   if (!Kind.empty())
   3038     parseValidVectorKind(Kind, NumElements, ElementKind);
   3039 
   3040   Operands.push_back(AArch64Operand::CreateVectorList(
   3041       FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
   3042 
   3043   // If there is an index specifier following the list, parse that too.
   3044   if (Parser.getTok().is(AsmToken::LBrac)) {
   3045     SMLoc SIdx = getLoc();
   3046     Parser.Lex(); // Eat left bracket token.
   3047 
   3048     const MCExpr *ImmVal;
   3049     if (getParser().parseExpression(ImmVal))
   3050       return false;
   3051     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   3052     if (!MCE) {
   3053       TokError("immediate value expected for vector index");
   3054       return false;
   3055     }
   3056 
   3057     SMLoc E = getLoc();
   3058     if (Parser.getTok().isNot(AsmToken::RBrac)) {
   3059       Error(E, "']' expected");
   3060       return false;
   3061     }
   3062 
   3063     Parser.Lex(); // Eat right bracket token.
   3064 
   3065     Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
   3066                                                          E, getContext()));
   3067   }
   3068   return false;
   3069 }
   3070 
   3071 AArch64AsmParser::OperandMatchResultTy
   3072 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
   3073   MCAsmParser &Parser = getParser();
   3074   const AsmToken &Tok = Parser.getTok();
   3075   if (!Tok.is(AsmToken::Identifier))
   3076     return MatchOperand_NoMatch;
   3077 
   3078   unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
   3079 
   3080   MCContext &Ctx = getContext();
   3081   const MCRegisterInfo *RI = Ctx.getRegisterInfo();
   3082   if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
   3083     return MatchOperand_NoMatch;
   3084 
   3085   SMLoc S = getLoc();
   3086   Parser.Lex(); // Eat register
   3087 
   3088   if (Parser.getTok().isNot(AsmToken::Comma)) {
   3089     Operands.push_back(
   3090         AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
   3091     return MatchOperand_Success;
   3092   }
   3093   Parser.Lex(); // Eat comma.
   3094 
   3095   if (Parser.getTok().is(AsmToken::Hash))
   3096     Parser.Lex(); // Eat hash
   3097 
   3098   if (Parser.getTok().isNot(AsmToken::Integer)) {
   3099     Error(getLoc(), "index must be absent or #0");
   3100     return MatchOperand_ParseFail;
   3101   }
   3102 
   3103   const MCExpr *ImmVal;
   3104   if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
   3105       cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
   3106     Error(getLoc(), "index must be absent or #0");
   3107     return MatchOperand_ParseFail;
   3108   }
   3109 
   3110   Operands.push_back(
   3111       AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
   3112   return MatchOperand_Success;
   3113 }
   3114 
   3115 /// parseOperand - Parse a arm instruction operand.  For now this parses the
   3116 /// operand regardless of the mnemonic.
   3117 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
   3118                                   bool invertCondCode) {
   3119   MCAsmParser &Parser = getParser();
   3120   // Check if the current operand has a custom associated parser, if so, try to
   3121   // custom parse the operand, or fallback to the general approach.
   3122   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
   3123   if (ResTy == MatchOperand_Success)
   3124     return false;
   3125   // If there wasn't a custom match, try the generic matcher below. Otherwise,
   3126   // there was a match, but an error occurred, in which case, just return that
   3127   // the operand parsing failed.
   3128   if (ResTy == MatchOperand_ParseFail)
   3129     return true;
   3130 
   3131   // Nothing custom, so do general case parsing.
   3132   SMLoc S, E;
   3133   switch (getLexer().getKind()) {
   3134   default: {
   3135     SMLoc S = getLoc();
   3136     const MCExpr *Expr;
   3137     if (parseSymbolicImmVal(Expr))
   3138       return Error(S, "invalid operand");
   3139 
   3140     SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   3141     Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
   3142     return false;
   3143   }
   3144   case AsmToken::LBrac: {
   3145     SMLoc Loc = Parser.getTok().getLoc();
   3146     Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
   3147                                                    getContext()));
   3148     Parser.Lex(); // Eat '['
   3149 
   3150     // There's no comma after a '[', so we can parse the next operand
   3151     // immediately.
   3152     return parseOperand(Operands, false, false);
   3153   }
   3154   case AsmToken::LCurly:
   3155     return parseVectorList(Operands);
   3156   case AsmToken::Identifier: {
   3157     // If we're expecting a Condition Code operand, then just parse that.
   3158     if (isCondCode)
   3159       return parseCondCode(Operands, invertCondCode);
   3160 
   3161     // If it's a register name, parse it.
   3162     if (!parseRegister(Operands))
   3163       return false;
   3164 
   3165     // This could be an optional "shift" or "extend" operand.
   3166     OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
   3167     // We can only continue if no tokens were eaten.
   3168     if (GotShift != MatchOperand_NoMatch)
   3169       return GotShift;
   3170 
   3171     // This was not a register so parse other operands that start with an
   3172     // identifier (like labels) as expressions and create them as immediates.
   3173     const MCExpr *IdVal;
   3174     S = getLoc();
   3175     if (getParser().parseExpression(IdVal))
   3176       return true;
   3177 
   3178     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   3179     Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
   3180     return false;
   3181   }
   3182   case AsmToken::Integer:
   3183   case AsmToken::Real:
   3184   case AsmToken::Hash: {
   3185     // #42 -> immediate.
   3186     S = getLoc();
   3187     if (getLexer().is(AsmToken::Hash))
   3188       Parser.Lex();
   3189 
   3190     // Parse a negative sign
   3191     bool isNegative = false;
   3192     if (Parser.getTok().is(AsmToken::Minus)) {
   3193       isNegative = true;
   3194       // We need to consume this token only when we have a Real, otherwise
   3195       // we let parseSymbolicImmVal take care of it
   3196       if (Parser.getLexer().peekTok().is(AsmToken::Real))
   3197         Parser.Lex();
   3198     }
   3199 
   3200     // The only Real that should come through here is a literal #0.0 for
   3201     // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
   3202     // so convert the value.
   3203     const AsmToken &Tok = Parser.getTok();
   3204     if (Tok.is(AsmToken::Real)) {
   3205       APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
   3206       uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   3207       if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
   3208           Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
   3209           Mnemonic != "fcmlt")
   3210         return TokError("unexpected floating point literal");
   3211       else if (IntVal != 0 || isNegative)
   3212         return TokError("expected floating-point constant #0.0");
   3213       Parser.Lex(); // Eat the token.
   3214 
   3215       Operands.push_back(
   3216           AArch64Operand::CreateToken("#0", false, S, getContext()));
   3217       Operands.push_back(
   3218           AArch64Operand::CreateToken(".0", false, S, getContext()));
   3219       return false;
   3220     }
   3221 
   3222     const MCExpr *ImmVal;
   3223     if (parseSymbolicImmVal(ImmVal))
   3224       return true;
   3225 
   3226     E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
   3227     Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
   3228     return false;
   3229   }
   3230   case AsmToken::Equal: {
   3231     SMLoc Loc = Parser.getTok().getLoc();
   3232     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
   3233       return Error(Loc, "unexpected token in operand");
   3234     Parser.Lex(); // Eat '='
   3235     const MCExpr *SubExprVal;
   3236     if (getParser().parseExpression(SubExprVal))
   3237       return true;
   3238 
   3239     if (Operands.size() < 2 ||
   3240         !static_cast<AArch64Operand &>(*Operands[1]).isReg())
   3241       return Error(Loc, "Only valid when first operand is register");
   3242 
   3243     bool IsXReg =
   3244         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3245             Operands[1]->getReg());
   3246 
   3247     MCContext& Ctx = getContext();
   3248     E = SMLoc::getFromPointer(Loc.getPointer() - 1);
   3249     // If the op is an imm and can be fit into a mov, then replace ldr with mov.
   3250     if (isa<MCConstantExpr>(SubExprVal)) {
   3251       uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
   3252       uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
   3253       while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
   3254         ShiftAmt += 16;
   3255         Imm >>= 16;
   3256       }
   3257       if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
   3258           Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
   3259           Operands.push_back(AArch64Operand::CreateImm(
   3260                      MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
   3261         if (ShiftAmt)
   3262           Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
   3263                      ShiftAmt, true, S, E, Ctx));
   3264         return false;
   3265       }
   3266       APInt Simm = APInt(64, Imm << ShiftAmt);
   3267       // check if the immediate is an unsigned or signed 32-bit int for W regs
   3268       if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
   3269         return Error(Loc, "Immediate too large for register");
   3270     }
   3271     // If it is a label or an imm that cannot fit in a movz, put it into CP.
   3272     const MCExpr *CPLoc =
   3273         getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
   3274     Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
   3275     return false;
   3276   }
   3277   }
   3278 }
   3279 
   3280 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
   3281 /// operands.
   3282 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
   3283                                         StringRef Name, SMLoc NameLoc,
   3284                                         OperandVector &Operands) {
   3285   MCAsmParser &Parser = getParser();
   3286   Name = StringSwitch<StringRef>(Name.lower())
   3287              .Case("beq", "b.eq")
   3288              .Case("bne", "b.ne")
   3289              .Case("bhs", "b.hs")
   3290              .Case("bcs", "b.cs")
   3291              .Case("blo", "b.lo")
   3292              .Case("bcc", "b.cc")
   3293              .Case("bmi", "b.mi")
   3294              .Case("bpl", "b.pl")
   3295              .Case("bvs", "b.vs")
   3296              .Case("bvc", "b.vc")
   3297              .Case("bhi", "b.hi")
   3298              .Case("bls", "b.ls")
   3299              .Case("bge", "b.ge")
   3300              .Case("blt", "b.lt")
   3301              .Case("bgt", "b.gt")
   3302              .Case("ble", "b.le")
   3303              .Case("bal", "b.al")
   3304              .Case("bnv", "b.nv")
   3305              .Default(Name);
   3306 
   3307   // First check for the AArch64-specific .req directive.
   3308   if (Parser.getTok().is(AsmToken::Identifier) &&
   3309       Parser.getTok().getIdentifier() == ".req") {
   3310     parseDirectiveReq(Name, NameLoc);
   3311     // We always return 'error' for this, as we're done with this
   3312     // statement and don't need to match the 'instruction."
   3313     return true;
   3314   }
   3315 
   3316   // Create the leading tokens for the mnemonic, split by '.' characters.
   3317   size_t Start = 0, Next = Name.find('.');
   3318   StringRef Head = Name.slice(Start, Next);
   3319 
   3320   // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
   3321   if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
   3322     bool IsError = parseSysAlias(Head, NameLoc, Operands);
   3323     if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
   3324       Parser.eatToEndOfStatement();
   3325     return IsError;
   3326   }
   3327 
   3328   Operands.push_back(
   3329       AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
   3330   Mnemonic = Head;
   3331 
   3332   // Handle condition codes for a branch mnemonic
   3333   if (Head == "b" && Next != StringRef::npos) {
   3334     Start = Next;
   3335     Next = Name.find('.', Start + 1);
   3336     Head = Name.slice(Start + 1, Next);
   3337 
   3338     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
   3339                                             (Head.data() - Name.data()));
   3340     AArch64CC::CondCode CC = parseCondCodeString(Head);
   3341     if (CC == AArch64CC::Invalid)
   3342       return Error(SuffixLoc, "invalid condition code");
   3343     Operands.push_back(
   3344         AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
   3345     Operands.push_back(
   3346         AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
   3347   }
   3348 
   3349   // Add the remaining tokens in the mnemonic.
   3350   while (Next != StringRef::npos) {
   3351     Start = Next;
   3352     Next = Name.find('.', Start + 1);
   3353     Head = Name.slice(Start, Next);
   3354     SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
   3355                                             (Head.data() - Name.data()) + 1);
   3356     Operands.push_back(
   3357         AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
   3358   }
   3359 
   3360   // Conditional compare instructions have a Condition Code operand, which needs
   3361   // to be parsed and an immediate operand created.
   3362   bool condCodeFourthOperand =
   3363       (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
   3364        Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
   3365        Head == "csinc" || Head == "csinv" || Head == "csneg");
   3366 
   3367   // These instructions are aliases to some of the conditional select
   3368   // instructions. However, the condition code is inverted in the aliased
   3369   // instruction.
   3370   //
   3371   // FIXME: Is this the correct way to handle these? Or should the parser
   3372   //        generate the aliased instructions directly?
   3373   bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
   3374   bool condCodeThirdOperand =
   3375       (Head == "cinc" || Head == "cinv" || Head == "cneg");
   3376 
   3377   // Read the remaining operands.
   3378   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   3379     // Read the first operand.
   3380     if (parseOperand(Operands, false, false)) {
   3381       Parser.eatToEndOfStatement();
   3382       return true;
   3383     }
   3384 
   3385     unsigned N = 2;
   3386     while (getLexer().is(AsmToken::Comma)) {
   3387       Parser.Lex(); // Eat the comma.
   3388 
   3389       // Parse and remember the operand.
   3390       if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
   3391                                      (N == 3 && condCodeThirdOperand) ||
   3392                                      (N == 2 && condCodeSecondOperand),
   3393                        condCodeSecondOperand || condCodeThirdOperand)) {
   3394         Parser.eatToEndOfStatement();
   3395         return true;
   3396       }
   3397 
   3398       // After successfully parsing some operands there are two special cases to
   3399       // consider (i.e. notional operands not separated by commas). Both are due
   3400       // to memory specifiers:
   3401       //  + An RBrac will end an address for load/store/prefetch
   3402       //  + An '!' will indicate a pre-indexed operation.
   3403       //
   3404       // It's someone else's responsibility to make sure these tokens are sane
   3405       // in the given context!
   3406       if (Parser.getTok().is(AsmToken::RBrac)) {
   3407         SMLoc Loc = Parser.getTok().getLoc();
   3408         Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
   3409                                                        getContext()));
   3410         Parser.Lex();
   3411       }
   3412 
   3413       if (Parser.getTok().is(AsmToken::Exclaim)) {
   3414         SMLoc Loc = Parser.getTok().getLoc();
   3415         Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
   3416                                                        getContext()));
   3417         Parser.Lex();
   3418       }
   3419 
   3420       ++N;
   3421     }
   3422   }
   3423 
   3424   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   3425     SMLoc Loc = Parser.getTok().getLoc();
   3426     Parser.eatToEndOfStatement();
   3427     return Error(Loc, "unexpected token in argument list");
   3428   }
   3429 
   3430   Parser.Lex(); // Consume the EndOfStatement
   3431   return false;
   3432 }
   3433 
   3434 // FIXME: This entire function is a giant hack to provide us with decent
   3435 // operand range validation/diagnostics until TableGen/MC can be extended
   3436 // to support autogeneration of this kind of validation.
   3437 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
   3438                                          SmallVectorImpl<SMLoc> &Loc) {
   3439   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   3440   // Check for indexed addressing modes w/ the base register being the
   3441   // same as a destination/source register or pair load where
   3442   // the Rt == Rt2. All of those are undefined behaviour.
   3443   switch (Inst.getOpcode()) {
   3444   case AArch64::LDPSWpre:
   3445   case AArch64::LDPWpost:
   3446   case AArch64::LDPWpre:
   3447   case AArch64::LDPXpost:
   3448   case AArch64::LDPXpre: {
   3449     unsigned Rt = Inst.getOperand(1).getReg();
   3450     unsigned Rt2 = Inst.getOperand(2).getReg();
   3451     unsigned Rn = Inst.getOperand(3).getReg();
   3452     if (RI->isSubRegisterEq(Rn, Rt))
   3453       return Error(Loc[0], "unpredictable LDP instruction, writeback base "
   3454                            "is also a destination");
   3455     if (RI->isSubRegisterEq(Rn, Rt2))
   3456       return Error(Loc[1], "unpredictable LDP instruction, writeback base "
   3457                            "is also a destination");
   3458     // FALLTHROUGH
   3459   }
   3460   case AArch64::LDPDi:
   3461   case AArch64::LDPQi:
   3462   case AArch64::LDPSi:
   3463   case AArch64::LDPSWi:
   3464   case AArch64::LDPWi:
   3465   case AArch64::LDPXi: {
   3466     unsigned Rt = Inst.getOperand(0).getReg();
   3467     unsigned Rt2 = Inst.getOperand(1).getReg();
   3468     if (Rt == Rt2)
   3469       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
   3470     break;
   3471   }
   3472   case AArch64::LDPDpost:
   3473   case AArch64::LDPDpre:
   3474   case AArch64::LDPQpost:
   3475   case AArch64::LDPQpre:
   3476   case AArch64::LDPSpost:
   3477   case AArch64::LDPSpre:
   3478   case AArch64::LDPSWpost: {
   3479     unsigned Rt = Inst.getOperand(1).getReg();
   3480     unsigned Rt2 = Inst.getOperand(2).getReg();
   3481     if (Rt == Rt2)
   3482       return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
   3483     break;
   3484   }
   3485   case AArch64::STPDpost:
   3486   case AArch64::STPDpre:
   3487   case AArch64::STPQpost:
   3488   case AArch64::STPQpre:
   3489   case AArch64::STPSpost:
   3490   case AArch64::STPSpre:
   3491   case AArch64::STPWpost:
   3492   case AArch64::STPWpre:
   3493   case AArch64::STPXpost:
   3494   case AArch64::STPXpre: {
   3495     unsigned Rt = Inst.getOperand(1).getReg();
   3496     unsigned Rt2 = Inst.getOperand(2).getReg();
   3497     unsigned Rn = Inst.getOperand(3).getReg();
   3498     if (RI->isSubRegisterEq(Rn, Rt))
   3499       return Error(Loc[0], "unpredictable STP instruction, writeback base "
   3500                            "is also a source");
   3501     if (RI->isSubRegisterEq(Rn, Rt2))
   3502       return Error(Loc[1], "unpredictable STP instruction, writeback base "
   3503                            "is also a source");
   3504     break;
   3505   }
   3506   case AArch64::LDRBBpre:
   3507   case AArch64::LDRBpre:
   3508   case AArch64::LDRHHpre:
   3509   case AArch64::LDRHpre:
   3510   case AArch64::LDRSBWpre:
   3511   case AArch64::LDRSBXpre:
   3512   case AArch64::LDRSHWpre:
   3513   case AArch64::LDRSHXpre:
   3514   case AArch64::LDRSWpre:
   3515   case AArch64::LDRWpre:
   3516   case AArch64::LDRXpre:
   3517   case AArch64::LDRBBpost:
   3518   case AArch64::LDRBpost:
   3519   case AArch64::LDRHHpost:
   3520   case AArch64::LDRHpost:
   3521   case AArch64::LDRSBWpost:
   3522   case AArch64::LDRSBXpost:
   3523   case AArch64::LDRSHWpost:
   3524   case AArch64::LDRSHXpost:
   3525   case AArch64::LDRSWpost:
   3526   case AArch64::LDRWpost:
   3527   case AArch64::LDRXpost: {
   3528     unsigned Rt = Inst.getOperand(1).getReg();
   3529     unsigned Rn = Inst.getOperand(2).getReg();
   3530     if (RI->isSubRegisterEq(Rn, Rt))
   3531       return Error(Loc[0], "unpredictable LDR instruction, writeback base "
   3532                            "is also a source");
   3533     break;
   3534   }
   3535   case AArch64::STRBBpost:
   3536   case AArch64::STRBpost:
   3537   case AArch64::STRHHpost:
   3538   case AArch64::STRHpost:
   3539   case AArch64::STRWpost:
   3540   case AArch64::STRXpost:
   3541   case AArch64::STRBBpre:
   3542   case AArch64::STRBpre:
   3543   case AArch64::STRHHpre:
   3544   case AArch64::STRHpre:
   3545   case AArch64::STRWpre:
   3546   case AArch64::STRXpre: {
   3547     unsigned Rt = Inst.getOperand(1).getReg();
   3548     unsigned Rn = Inst.getOperand(2).getReg();
   3549     if (RI->isSubRegisterEq(Rn, Rt))
   3550       return Error(Loc[0], "unpredictable STR instruction, writeback base "
   3551                            "is also a source");
   3552     break;
   3553   }
   3554   }
   3555 
   3556   // Now check immediate ranges. Separate from the above as there is overlap
   3557   // in the instructions being checked and this keeps the nested conditionals
   3558   // to a minimum.
   3559   switch (Inst.getOpcode()) {
   3560   case AArch64::ADDSWri:
   3561   case AArch64::ADDSXri:
   3562   case AArch64::ADDWri:
   3563   case AArch64::ADDXri:
   3564   case AArch64::SUBSWri:
   3565   case AArch64::SUBSXri:
   3566   case AArch64::SUBWri:
   3567   case AArch64::SUBXri: {
   3568     // Annoyingly we can't do this in the isAddSubImm predicate, so there is
   3569     // some slight duplication here.
   3570     if (Inst.getOperand(2).isExpr()) {
   3571       const MCExpr *Expr = Inst.getOperand(2).getExpr();
   3572       AArch64MCExpr::VariantKind ELFRefKind;
   3573       MCSymbolRefExpr::VariantKind DarwinRefKind;
   3574       int64_t Addend;
   3575       if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
   3576         return Error(Loc[2], "invalid immediate expression");
   3577       }
   3578 
   3579       // Only allow these with ADDXri.
   3580       if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
   3581           DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
   3582           Inst.getOpcode() == AArch64::ADDXri)
   3583         return false;
   3584 
   3585       // Only allow these with ADDXri/ADDWri
   3586       if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
   3587           ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
   3588           ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
   3589           ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
   3590           ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
   3591           ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
   3592           ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
   3593           ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
   3594           (Inst.getOpcode() == AArch64::ADDXri ||
   3595           Inst.getOpcode() == AArch64::ADDWri))
   3596         return false;
   3597 
   3598       // Don't allow expressions in the immediate field otherwise
   3599       return Error(Loc[2], "invalid immediate expression");
   3600     }
   3601     return false;
   3602   }
   3603   default:
   3604     return false;
   3605   }
   3606 }
   3607 
   3608 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
   3609   switch (ErrCode) {
   3610   case Match_MissingFeature:
   3611     return Error(Loc,
   3612                  "instruction requires a CPU feature not currently enabled");
   3613   case Match_InvalidOperand:
   3614     return Error(Loc, "invalid operand for instruction");
   3615   case Match_InvalidSuffix:
   3616     return Error(Loc, "invalid type suffix for instruction");
   3617   case Match_InvalidCondCode:
   3618     return Error(Loc, "expected AArch64 condition code");
   3619   case Match_AddSubRegExtendSmall:
   3620     return Error(Loc,
   3621       "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
   3622   case Match_AddSubRegExtendLarge:
   3623     return Error(Loc,
   3624       "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
   3625   case Match_AddSubSecondSource:
   3626     return Error(Loc,
   3627       "expected compatible register, symbol or integer in range [0, 4095]");
   3628   case Match_LogicalSecondSource:
   3629     return Error(Loc, "expected compatible register or logical immediate");
   3630   case Match_InvalidMovImm32Shift:
   3631     return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
   3632   case Match_InvalidMovImm64Shift:
   3633     return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
   3634   case Match_AddSubRegShift32:
   3635     return Error(Loc,
   3636        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
   3637   case Match_AddSubRegShift64:
   3638     return Error(Loc,
   3639        "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
   3640   case Match_InvalidFPImm:
   3641     return Error(Loc,
   3642                  "expected compatible register or floating-point constant");
   3643   case Match_InvalidMemoryIndexedSImm9:
   3644     return Error(Loc, "index must be an integer in range [-256, 255].");
   3645   case Match_InvalidMemoryIndexed4SImm7:
   3646     return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
   3647   case Match_InvalidMemoryIndexed8SImm7:
   3648     return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
   3649   case Match_InvalidMemoryIndexed16SImm7:
   3650     return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
   3651   case Match_InvalidMemoryWExtend8:
   3652     return Error(Loc,
   3653                  "expected 'uxtw' or 'sxtw' with optional shift of #0");
   3654   case Match_InvalidMemoryWExtend16:
   3655     return Error(Loc,
   3656                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
   3657   case Match_InvalidMemoryWExtend32:
   3658     return Error(Loc,
   3659                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
   3660   case Match_InvalidMemoryWExtend64:
   3661     return Error(Loc,
   3662                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
   3663   case Match_InvalidMemoryWExtend128:
   3664     return Error(Loc,
   3665                  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
   3666   case Match_InvalidMemoryXExtend8:
   3667     return Error(Loc,
   3668                  "expected 'lsl' or 'sxtx' with optional shift of #0");
   3669   case Match_InvalidMemoryXExtend16:
   3670     return Error(Loc,
   3671                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
   3672   case Match_InvalidMemoryXExtend32:
   3673     return Error(Loc,
   3674                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
   3675   case Match_InvalidMemoryXExtend64:
   3676     return Error(Loc,
   3677                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
   3678   case Match_InvalidMemoryXExtend128:
   3679     return Error(Loc,
   3680                  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
   3681   case Match_InvalidMemoryIndexed1:
   3682     return Error(Loc, "index must be an integer in range [0, 4095].");
   3683   case Match_InvalidMemoryIndexed2:
   3684     return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
   3685   case Match_InvalidMemoryIndexed4:
   3686     return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
   3687   case Match_InvalidMemoryIndexed8:
   3688     return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
   3689   case Match_InvalidMemoryIndexed16:
   3690     return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
   3691   case Match_InvalidImm0_1:
   3692     return Error(Loc, "immediate must be an integer in range [0, 1].");
   3693   case Match_InvalidImm0_7:
   3694     return Error(Loc, "immediate must be an integer in range [0, 7].");
   3695   case Match_InvalidImm0_15:
   3696     return Error(Loc, "immediate must be an integer in range [0, 15].");
   3697   case Match_InvalidImm0_31:
   3698     return Error(Loc, "immediate must be an integer in range [0, 31].");
   3699   case Match_InvalidImm0_63:
   3700     return Error(Loc, "immediate must be an integer in range [0, 63].");
   3701   case Match_InvalidImm0_127:
   3702     return Error(Loc, "immediate must be an integer in range [0, 127].");
   3703   case Match_InvalidImm0_65535:
   3704     return Error(Loc, "immediate must be an integer in range [0, 65535].");
   3705   case Match_InvalidImm1_8:
   3706     return Error(Loc, "immediate must be an integer in range [1, 8].");
   3707   case Match_InvalidImm1_16:
   3708     return Error(Loc, "immediate must be an integer in range [1, 16].");
   3709   case Match_InvalidImm1_32:
   3710     return Error(Loc, "immediate must be an integer in range [1, 32].");
   3711   case Match_InvalidImm1_64:
   3712     return Error(Loc, "immediate must be an integer in range [1, 64].");
   3713   case Match_InvalidIndex1:
   3714     return Error(Loc, "expected lane specifier '[1]'");
   3715   case Match_InvalidIndexB:
   3716     return Error(Loc, "vector lane must be an integer in range [0, 15].");
   3717   case Match_InvalidIndexH:
   3718     return Error(Loc, "vector lane must be an integer in range [0, 7].");
   3719   case Match_InvalidIndexS:
   3720     return Error(Loc, "vector lane must be an integer in range [0, 3].");
   3721   case Match_InvalidIndexD:
   3722     return Error(Loc, "vector lane must be an integer in range [0, 1].");
   3723   case Match_InvalidLabel:
   3724     return Error(Loc, "expected label or encodable integer pc offset");
   3725   case Match_MRS:
   3726     return Error(Loc, "expected readable system register");
   3727   case Match_MSR:
   3728     return Error(Loc, "expected writable system register or pstate");
   3729   case Match_MnemonicFail:
   3730     return Error(Loc, "unrecognized instruction mnemonic");
   3731   default:
   3732     llvm_unreachable("unexpected error code!");
   3733   }
   3734 }
   3735 
   3736 static const char *getSubtargetFeatureName(uint64_t Val);
   3737 
   3738 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
   3739                                                OperandVector &Operands,
   3740                                                MCStreamer &Out,
   3741                                                uint64_t &ErrorInfo,
   3742                                                bool MatchingInlineAsm) {
   3743   assert(!Operands.empty() && "Unexpect empty operand list!");
   3744   AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
   3745   assert(Op.isToken() && "Leading operand should always be a mnemonic!");
   3746 
   3747   StringRef Tok = Op.getToken();
   3748   unsigned NumOperands = Operands.size();
   3749 
   3750   if (NumOperands == 4 && Tok == "lsl") {
   3751     AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
   3752     AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
   3753     if (Op2.isReg() && Op3.isImm()) {
   3754       const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
   3755       if (Op3CE) {
   3756         uint64_t Op3Val = Op3CE->getValue();
   3757         uint64_t NewOp3Val = 0;
   3758         uint64_t NewOp4Val = 0;
   3759         if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
   3760                 Op2.getReg())) {
   3761           NewOp3Val = (32 - Op3Val) & 0x1f;
   3762           NewOp4Val = 31 - Op3Val;
   3763         } else {
   3764           NewOp3Val = (64 - Op3Val) & 0x3f;
   3765           NewOp4Val = 63 - Op3Val;
   3766         }
   3767 
   3768         const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
   3769         const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
   3770 
   3771         Operands[0] = AArch64Operand::CreateToken(
   3772             "ubfm", false, Op.getStartLoc(), getContext());
   3773         Operands.push_back(AArch64Operand::CreateImm(
   3774             NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
   3775         Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
   3776                                                 Op3.getEndLoc(), getContext());
   3777       }
   3778     }
   3779   } else if (NumOperands == 4 && Tok == "bfc") {
   3780     // FIXME: Horrible hack to handle BFC->BFM alias.
   3781     AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
   3782     AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
   3783     AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
   3784 
   3785     if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
   3786       const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
   3787       const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
   3788 
   3789       if (LSBCE && WidthCE) {
   3790         uint64_t LSB = LSBCE->getValue();
   3791         uint64_t Width = WidthCE->getValue();
   3792 
   3793         uint64_t RegWidth = 0;
   3794         if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3795                 Op1.getReg()))
   3796           RegWidth = 64;
   3797         else
   3798           RegWidth = 32;
   3799 
   3800         if (LSB >= RegWidth)
   3801           return Error(LSBOp.getStartLoc(),
   3802                        "expected integer in range [0, 31]");
   3803         if (Width < 1 || Width > RegWidth)
   3804           return Error(WidthOp.getStartLoc(),
   3805                        "expected integer in range [1, 32]");
   3806 
   3807         uint64_t ImmR = 0;
   3808         if (RegWidth == 32)
   3809           ImmR = (32 - LSB) & 0x1f;
   3810         else
   3811           ImmR = (64 - LSB) & 0x3f;
   3812 
   3813         uint64_t ImmS = Width - 1;
   3814 
   3815         if (ImmR != 0 && ImmS >= ImmR)
   3816           return Error(WidthOp.getStartLoc(),
   3817                        "requested insert overflows register");
   3818 
   3819         const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
   3820         const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
   3821         Operands[0] = AArch64Operand::CreateToken(
   3822               "bfm", false, Op.getStartLoc(), getContext());
   3823         Operands[2] = AArch64Operand::CreateReg(
   3824             RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
   3825             SMLoc(), getContext());
   3826         Operands[3] = AArch64Operand::CreateImm(
   3827             ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
   3828         Operands.emplace_back(
   3829             AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
   3830                                       WidthOp.getEndLoc(), getContext()));
   3831       }
   3832     }
   3833   } else if (NumOperands == 5) {
   3834     // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
   3835     // UBFIZ -> UBFM aliases.
   3836     if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
   3837       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
   3838       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
   3839       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
   3840 
   3841       if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
   3842         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
   3843         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
   3844 
   3845         if (Op3CE && Op4CE) {
   3846           uint64_t Op3Val = Op3CE->getValue();
   3847           uint64_t Op4Val = Op4CE->getValue();
   3848 
   3849           uint64_t RegWidth = 0;
   3850           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3851                   Op1.getReg()))
   3852             RegWidth = 64;
   3853           else
   3854             RegWidth = 32;
   3855 
   3856           if (Op3Val >= RegWidth)
   3857             return Error(Op3.getStartLoc(),
   3858                          "expected integer in range [0, 31]");
   3859           if (Op4Val < 1 || Op4Val > RegWidth)
   3860             return Error(Op4.getStartLoc(),
   3861                          "expected integer in range [1, 32]");
   3862 
   3863           uint64_t NewOp3Val = 0;
   3864           if (RegWidth == 32)
   3865             NewOp3Val = (32 - Op3Val) & 0x1f;
   3866           else
   3867             NewOp3Val = (64 - Op3Val) & 0x3f;
   3868 
   3869           uint64_t NewOp4Val = Op4Val - 1;
   3870 
   3871           if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
   3872             return Error(Op4.getStartLoc(),
   3873                          "requested insert overflows register");
   3874 
   3875           const MCExpr *NewOp3 =
   3876               MCConstantExpr::create(NewOp3Val, getContext());
   3877           const MCExpr *NewOp4 =
   3878               MCConstantExpr::create(NewOp4Val, getContext());
   3879           Operands[3] = AArch64Operand::CreateImm(
   3880               NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
   3881           Operands[4] = AArch64Operand::CreateImm(
   3882               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
   3883           if (Tok == "bfi")
   3884             Operands[0] = AArch64Operand::CreateToken(
   3885                 "bfm", false, Op.getStartLoc(), getContext());
   3886           else if (Tok == "sbfiz")
   3887             Operands[0] = AArch64Operand::CreateToken(
   3888                 "sbfm", false, Op.getStartLoc(), getContext());
   3889           else if (Tok == "ubfiz")
   3890             Operands[0] = AArch64Operand::CreateToken(
   3891                 "ubfm", false, Op.getStartLoc(), getContext());
   3892           else
   3893             llvm_unreachable("No valid mnemonic for alias?");
   3894         }
   3895       }
   3896 
   3897       // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
   3898       // UBFX -> UBFM aliases.
   3899     } else if (NumOperands == 5 &&
   3900                (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
   3901       AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
   3902       AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
   3903       AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
   3904 
   3905       if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
   3906         const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
   3907         const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
   3908 
   3909         if (Op3CE && Op4CE) {
   3910           uint64_t Op3Val = Op3CE->getValue();
   3911           uint64_t Op4Val = Op4CE->getValue();
   3912 
   3913           uint64_t RegWidth = 0;
   3914           if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3915                   Op1.getReg()))
   3916             RegWidth = 64;
   3917           else
   3918             RegWidth = 32;
   3919 
   3920           if (Op3Val >= RegWidth)
   3921             return Error(Op3.getStartLoc(),
   3922                          "expected integer in range [0, 31]");
   3923           if (Op4Val < 1 || Op4Val > RegWidth)
   3924             return Error(Op4.getStartLoc(),
   3925                          "expected integer in range [1, 32]");
   3926 
   3927           uint64_t NewOp4Val = Op3Val + Op4Val - 1;
   3928 
   3929           if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
   3930             return Error(Op4.getStartLoc(),
   3931                          "requested extract overflows register");
   3932 
   3933           const MCExpr *NewOp4 =
   3934               MCConstantExpr::create(NewOp4Val, getContext());
   3935           Operands[4] = AArch64Operand::CreateImm(
   3936               NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
   3937           if (Tok == "bfxil")
   3938             Operands[0] = AArch64Operand::CreateToken(
   3939                 "bfm", false, Op.getStartLoc(), getContext());
   3940           else if (Tok == "sbfx")
   3941             Operands[0] = AArch64Operand::CreateToken(
   3942                 "sbfm", false, Op.getStartLoc(), getContext());
   3943           else if (Tok == "ubfx")
   3944             Operands[0] = AArch64Operand::CreateToken(
   3945                 "ubfm", false, Op.getStartLoc(), getContext());
   3946           else
   3947             llvm_unreachable("No valid mnemonic for alias?");
   3948         }
   3949       }
   3950     }
   3951   }
   3952   // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
   3953   //        InstAlias can't quite handle this since the reg classes aren't
   3954   //        subclasses.
   3955   if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
   3956     // The source register can be Wn here, but the matcher expects a
   3957     // GPR64. Twiddle it here if necessary.
   3958     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
   3959     if (Op.isReg()) {
   3960       unsigned Reg = getXRegFromWReg(Op.getReg());
   3961       Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
   3962                                               Op.getEndLoc(), getContext());
   3963     }
   3964   }
   3965   // FIXME: Likewise for sxt[bh] with a Xd dst operand
   3966   else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
   3967     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
   3968     if (Op.isReg() &&
   3969         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3970             Op.getReg())) {
   3971       // The source register can be Wn here, but the matcher expects a
   3972       // GPR64. Twiddle it here if necessary.
   3973       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
   3974       if (Op.isReg()) {
   3975         unsigned Reg = getXRegFromWReg(Op.getReg());
   3976         Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
   3977                                                 Op.getEndLoc(), getContext());
   3978       }
   3979     }
   3980   }
   3981   // FIXME: Likewise for uxt[bh] with a Xd dst operand
   3982   else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
   3983     AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
   3984     if (Op.isReg() &&
   3985         AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
   3986             Op.getReg())) {
   3987       // The source register can be Wn here, but the matcher expects a
   3988       // GPR32. Twiddle it here if necessary.
   3989       AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
   3990       if (Op.isReg()) {
   3991         unsigned Reg = getWRegFromXReg(Op.getReg());
   3992         Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
   3993                                                 Op.getEndLoc(), getContext());
   3994       }
   3995     }
   3996   }
   3997 
   3998   // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
   3999   if (NumOperands == 3 && Tok == "fmov") {
   4000     AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
   4001     AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
   4002     if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
   4003       unsigned zreg =
   4004           !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
   4005               RegOp.getReg())
   4006               ? AArch64::WZR
   4007               : AArch64::XZR;
   4008       Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
   4009                                               Op.getEndLoc(), getContext());
   4010     }
   4011   }
   4012 
   4013   MCInst Inst;
   4014   // First try to match against the secondary set of tables containing the
   4015   // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
   4016   unsigned MatchResult =
   4017       MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
   4018 
   4019   // If that fails, try against the alternate table containing long-form NEON:
   4020   // "fadd v0.2s, v1.2s, v2.2s"
   4021   if (MatchResult != Match_Success) {
   4022     // But first, save the short-form match result: we can use it in case the
   4023     // long-form match also fails.
   4024     auto ShortFormNEONErrorInfo = ErrorInfo;
   4025     auto ShortFormNEONMatchResult = MatchResult;
   4026 
   4027     MatchResult =
   4028         MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
   4029 
   4030     // Now, both matches failed, and the long-form match failed on the mnemonic
   4031     // suffix token operand.  The short-form match failure is probably more
   4032     // relevant: use it instead.
   4033     if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
   4034         Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
   4035         ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
   4036       MatchResult = ShortFormNEONMatchResult;
   4037       ErrorInfo = ShortFormNEONErrorInfo;
   4038     }
   4039   }
   4040 
   4041 
   4042   switch (MatchResult) {
   4043   case Match_Success: {
   4044     // Perform range checking and other semantic validations
   4045     SmallVector<SMLoc, 8> OperandLocs;
   4046     NumOperands = Operands.size();
   4047     for (unsigned i = 1; i < NumOperands; ++i)
   4048       OperandLocs.push_back(Operands[i]->getStartLoc());
   4049     if (validateInstruction(Inst, OperandLocs))
   4050       return true;
   4051 
   4052     Inst.setLoc(IDLoc);
   4053     Out.EmitInstruction(Inst, getSTI());
   4054     return false;
   4055   }
   4056   case Match_MissingFeature: {
   4057     assert(ErrorInfo && "Unknown missing feature!");
   4058     // Special case the error message for the very common case where only
   4059     // a single subtarget feature is missing (neon, e.g.).
   4060     std::string Msg = "instruction requires:";
   4061     uint64_t Mask = 1;
   4062     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
   4063       if (ErrorInfo & Mask) {
   4064         Msg += " ";
   4065         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
   4066       }
   4067       Mask <<= 1;
   4068     }
   4069     return Error(IDLoc, Msg);
   4070   }
   4071   case Match_MnemonicFail:
   4072     return showMatchError(IDLoc, MatchResult);
   4073   case Match_InvalidOperand: {
   4074     SMLoc ErrorLoc = IDLoc;
   4075 
   4076     if (ErrorInfo != ~0ULL) {
   4077       if (ErrorInfo >= Operands.size())
   4078         return Error(IDLoc, "too few operands for instruction");
   4079 
   4080       ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
   4081       if (ErrorLoc == SMLoc())
   4082         ErrorLoc = IDLoc;
   4083     }
   4084     // If the match failed on a suffix token operand, tweak the diagnostic
   4085     // accordingly.
   4086     if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
   4087         ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
   4088       MatchResult = Match_InvalidSuffix;
   4089 
   4090     return showMatchError(ErrorLoc, MatchResult);
   4091   }
   4092   case Match_InvalidMemoryIndexed1:
   4093   case Match_InvalidMemoryIndexed2:
   4094   case Match_InvalidMemoryIndexed4:
   4095   case Match_InvalidMemoryIndexed8:
   4096   case Match_InvalidMemoryIndexed16:
   4097   case Match_InvalidCondCode:
   4098   case Match_AddSubRegExtendSmall:
   4099   case Match_AddSubRegExtendLarge:
   4100   case Match_AddSubSecondSource:
   4101   case Match_LogicalSecondSource:
   4102   case Match_AddSubRegShift32:
   4103   case Match_AddSubRegShift64:
   4104   case Match_InvalidMovImm32Shift:
   4105   case Match_InvalidMovImm64Shift:
   4106   case Match_InvalidFPImm:
   4107   case Match_InvalidMemoryWExtend8:
   4108   case Match_InvalidMemoryWExtend16:
   4109   case Match_InvalidMemoryWExtend32:
   4110   case Match_InvalidMemoryWExtend64:
   4111   case Match_InvalidMemoryWExtend128:
   4112   case Match_InvalidMemoryXExtend8:
   4113   case Match_InvalidMemoryXExtend16:
   4114   case Match_InvalidMemoryXExtend32:
   4115   case Match_InvalidMemoryXExtend64:
   4116   case Match_InvalidMemoryXExtend128:
   4117   case Match_InvalidMemoryIndexed4SImm7:
   4118   case Match_InvalidMemoryIndexed8SImm7:
   4119   case Match_InvalidMemoryIndexed16SImm7:
   4120   case Match_InvalidMemoryIndexedSImm9:
   4121   case Match_InvalidImm0_1:
   4122   case Match_InvalidImm0_7:
   4123   case Match_InvalidImm0_15:
   4124   case Match_InvalidImm0_31:
   4125   case Match_InvalidImm0_63:
   4126   case Match_InvalidImm0_127:
   4127   case Match_InvalidImm0_65535:
   4128   case Match_InvalidImm1_8:
   4129   case Match_InvalidImm1_16:
   4130   case Match_InvalidImm1_32:
   4131   case Match_InvalidImm1_64:
   4132   case Match_InvalidIndex1:
   4133   case Match_InvalidIndexB:
   4134   case Match_InvalidIndexH:
   4135   case Match_InvalidIndexS:
   4136   case Match_InvalidIndexD:
   4137   case Match_InvalidLabel:
   4138   case Match_MSR:
   4139   case Match_MRS: {
   4140     if (ErrorInfo >= Operands.size())
   4141       return Error(IDLoc, "too few operands for instruction");
   4142     // Any time we get here, there's nothing fancy to do. Just get the
   4143     // operand SMLoc and display the diagnostic.
   4144     SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
   4145     if (ErrorLoc == SMLoc())
   4146       ErrorLoc = IDLoc;
   4147     return showMatchError(ErrorLoc, MatchResult);
   4148   }
   4149   }
   4150 
   4151   llvm_unreachable("Implement any new match types added!");
   4152 }
   4153 
   4154 /// ParseDirective parses the arm specific directives
   4155 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
   4156   const MCObjectFileInfo::Environment Format =
   4157     getContext().getObjectFileInfo()->getObjectFileType();
   4158   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
   4159   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
   4160 
   4161   StringRef IDVal = DirectiveID.getIdentifier();
   4162   SMLoc Loc = DirectiveID.getLoc();
   4163   if (IDVal == ".arch")
   4164     return parseDirectiveArch(Loc);
   4165   if (IDVal == ".cpu")
   4166     return parseDirectiveCPU(Loc);
   4167   if (IDVal == ".hword")
   4168     return parseDirectiveWord(2, Loc);
   4169   if (IDVal == ".word")
   4170     return parseDirectiveWord(4, Loc);
   4171   if (IDVal == ".xword")
   4172     return parseDirectiveWord(8, Loc);
   4173   if (IDVal == ".tlsdesccall")
   4174     return parseDirectiveTLSDescCall(Loc);
   4175   if (IDVal == ".ltorg" || IDVal == ".pool")
   4176     return parseDirectiveLtorg(Loc);
   4177   if (IDVal == ".unreq")
   4178     return parseDirectiveUnreq(Loc);
   4179 
   4180   if (!IsMachO && !IsCOFF) {
   4181     if (IDVal == ".inst")
   4182       return parseDirectiveInst(Loc);
   4183   }
   4184 
   4185   return parseDirectiveLOH(IDVal, Loc);
   4186 }
   4187 
   4188 static const struct {
   4189   const char *Name;
   4190   const FeatureBitset Features;
   4191 } ExtensionMap[] = {
   4192   { "crc", {AArch64::FeatureCRC} },
   4193   { "crypto", {AArch64::FeatureCrypto} },
   4194   { "fp", {AArch64::FeatureFPARMv8} },
   4195   { "simd", {AArch64::FeatureNEON} },
   4196 
   4197   // FIXME: Unsupported extensions
   4198   { "lse", {} },
   4199   { "pan", {} },
   4200   { "lor", {} },
   4201   { "rdma", {} },
   4202   { "profile", {} },
   4203 };
   4204 
   4205 /// parseDirectiveArch
   4206 ///   ::= .arch token
   4207 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
   4208   SMLoc ArchLoc = getLoc();
   4209 
   4210   StringRef Arch, ExtensionString;
   4211   std::tie(Arch, ExtensionString) =
   4212       getParser().parseStringToEndOfStatement().trim().split('+');
   4213 
   4214   unsigned ID = AArch64::parseArch(Arch);
   4215   if (ID == static_cast<unsigned>(AArch64::ArchKind::AK_INVALID)) {
   4216     Error(ArchLoc, "unknown arch name");
   4217     return false;
   4218   }
   4219 
   4220   MCSubtargetInfo &STI = copySTI();
   4221   STI.setDefaultFeatures("", "");
   4222   if (!ExtensionString.empty())
   4223     STI.setDefaultFeatures("", ("+" + ExtensionString).str());
   4224   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
   4225 
   4226   return false;
   4227 }
   4228 
   4229 /// parseDirectiveCPU
   4230 ///   ::= .cpu id
   4231 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
   4232   SMLoc CPULoc = getLoc();
   4233 
   4234   StringRef CPU, ExtensionString;
   4235   std::tie(CPU, ExtensionString) =
   4236       getParser().parseStringToEndOfStatement().trim().split('+');
   4237 
   4238   SmallVector<StringRef, 4> RequestedExtensions;
   4239   if (!ExtensionString.empty())
   4240     ExtensionString.split(RequestedExtensions, '+');
   4241 
   4242   // FIXME This is using tablegen data, but should be moved to ARMTargetParser
   4243   // once that is tablegen'ed
   4244   if (!getSTI().isCPUStringValid(CPU)) {
   4245     Error(CPULoc, "unknown CPU name");
   4246     return false;
   4247   }
   4248 
   4249   MCSubtargetInfo &STI = copySTI();
   4250   STI.setDefaultFeatures(CPU, "");
   4251 
   4252   FeatureBitset Features = STI.getFeatureBits();
   4253   for (auto Name : RequestedExtensions) {
   4254     bool EnableFeature = true;
   4255 
   4256     if (Name.startswith_lower("no")) {
   4257       EnableFeature = false;
   4258       Name = Name.substr(2);
   4259     }
   4260 
   4261     for (const auto &Extension : ExtensionMap) {
   4262       if (Extension.Name != Name)
   4263         continue;
   4264 
   4265       if (Extension.Features.none())
   4266         report_fatal_error("unsupported architectural extension: " + Name);
   4267 
   4268       FeatureBitset ToggleFeatures = EnableFeature
   4269                                          ? (~Features & Extension.Features)
   4270                                          : ( Features & Extension.Features);
   4271       uint64_t Features =
   4272           ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
   4273       setAvailableFeatures(Features);
   4274 
   4275       break;
   4276     }
   4277   }
   4278   return false;
   4279 }
   4280 
   4281 /// parseDirectiveWord
   4282 ///  ::= .word [ expression (, expression)* ]
   4283 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
   4284   MCAsmParser &Parser = getParser();
   4285   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   4286     for (;;) {
   4287       const MCExpr *Value;
   4288       if (getParser().parseExpression(Value))
   4289         return true;
   4290 
   4291       getParser().getStreamer().EmitValue(Value, Size, L);
   4292 
   4293       if (getLexer().is(AsmToken::EndOfStatement))
   4294         break;
   4295 
   4296       // FIXME: Improve diagnostic.
   4297       if (getLexer().isNot(AsmToken::Comma))
   4298         return Error(L, "unexpected token in directive");
   4299       Parser.Lex();
   4300     }
   4301   }
   4302 
   4303   Parser.Lex();
   4304   return false;
   4305 }
   4306 
   4307 /// parseDirectiveInst
   4308 ///  ::= .inst opcode [, ...]
   4309 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
   4310   MCAsmParser &Parser = getParser();
   4311   if (getLexer().is(AsmToken::EndOfStatement)) {
   4312     Parser.eatToEndOfStatement();
   4313     Error(Loc, "expected expression following directive");
   4314     return false;
   4315   }
   4316 
   4317   for (;;) {
   4318     const MCExpr *Expr;
   4319 
   4320     if (getParser().parseExpression(Expr)) {
   4321       Error(Loc, "expected expression");
   4322       return false;
   4323     }
   4324 
   4325     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
   4326     if (!Value) {
   4327       Error(Loc, "expected constant expression");
   4328       return false;
   4329     }
   4330 
   4331     getTargetStreamer().emitInst(Value->getValue());
   4332 
   4333     if (getLexer().is(AsmToken::EndOfStatement))
   4334       break;
   4335 
   4336     if (getLexer().isNot(AsmToken::Comma)) {
   4337       Error(Loc, "unexpected token in directive");
   4338       return false;
   4339     }
   4340 
   4341     Parser.Lex(); // Eat comma.
   4342   }
   4343 
   4344   Parser.Lex();
   4345   return false;
   4346 }
   4347 
   4348 // parseDirectiveTLSDescCall:
   4349 //   ::= .tlsdesccall symbol
   4350 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
   4351   StringRef Name;
   4352   if (getParser().parseIdentifier(Name))
   4353     return Error(L, "expected symbol after directive");
   4354 
   4355   MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
   4356   const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
   4357   Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
   4358 
   4359   MCInst Inst;
   4360   Inst.setOpcode(AArch64::TLSDESCCALL);
   4361   Inst.addOperand(MCOperand::createExpr(Expr));
   4362 
   4363   getParser().getStreamer().EmitInstruction(Inst, getSTI());
   4364   return false;
   4365 }
   4366 
   4367 /// ::= .loh <lohName | lohId> label1, ..., labelN
   4368 /// The number of arguments depends on the loh identifier.
   4369 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
   4370   if (IDVal != MCLOHDirectiveName())
   4371     return true;
   4372   MCLOHType Kind;
   4373   if (getParser().getTok().isNot(AsmToken::Identifier)) {
   4374     if (getParser().getTok().isNot(AsmToken::Integer))
   4375       return TokError("expected an identifier or a number in directive");
   4376     // We successfully get a numeric value for the identifier.
   4377     // Check if it is valid.
   4378     int64_t Id = getParser().getTok().getIntVal();
   4379     if (Id <= -1U && !isValidMCLOHType(Id))
   4380       return TokError("invalid numeric identifier in directive");
   4381     Kind = (MCLOHType)Id;
   4382   } else {
   4383     StringRef Name = getTok().getIdentifier();
   4384     // We successfully parse an identifier.
   4385     // Check if it is a recognized one.
   4386     int Id = MCLOHNameToId(Name);
   4387 
   4388     if (Id == -1)
   4389       return TokError("invalid identifier in directive");
   4390     Kind = (MCLOHType)Id;
   4391   }
   4392   // Consume the identifier.
   4393   Lex();
   4394   // Get the number of arguments of this LOH.
   4395   int NbArgs = MCLOHIdToNbArgs(Kind);
   4396 
   4397   assert(NbArgs != -1 && "Invalid number of arguments");
   4398 
   4399   SmallVector<MCSymbol *, 3> Args;
   4400   for (int Idx = 0; Idx < NbArgs; ++Idx) {
   4401     StringRef Name;
   4402     if (getParser().parseIdentifier(Name))
   4403       return TokError("expected identifier in directive");
   4404     Args.push_back(getContext().getOrCreateSymbol(Name));
   4405 
   4406     if (Idx + 1 == NbArgs)
   4407       break;
   4408     if (getLexer().isNot(AsmToken::Comma))
   4409       return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
   4410     Lex();
   4411   }
   4412   if (getLexer().isNot(AsmToken::EndOfStatement))
   4413     return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
   4414 
   4415   getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
   4416   return false;
   4417 }
   4418 
   4419 /// parseDirectiveLtorg
   4420 ///  ::= .ltorg | .pool
   4421 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
   4422   getTargetStreamer().emitCurrentConstantPool();
   4423   return false;
   4424 }
   4425 
   4426 /// parseDirectiveReq
   4427 ///  ::= name .req registername
   4428 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
   4429   MCAsmParser &Parser = getParser();
   4430   Parser.Lex(); // Eat the '.req' token.
   4431   SMLoc SRegLoc = getLoc();
   4432   unsigned RegNum = tryParseRegister();
   4433   bool IsVector = false;
   4434 
   4435   if (RegNum == static_cast<unsigned>(-1)) {
   4436     StringRef Kind;
   4437     RegNum = tryMatchVectorRegister(Kind, false);
   4438     if (!Kind.empty()) {
   4439       Error(SRegLoc, "vector register without type specifier expected");
   4440       return false;
   4441     }
   4442     IsVector = true;
   4443   }
   4444 
   4445   if (RegNum == static_cast<unsigned>(-1)) {
   4446     Parser.eatToEndOfStatement();
   4447     Error(SRegLoc, "register name or alias expected");
   4448     return false;
   4449   }
   4450 
   4451   // Shouldn't be anything else.
   4452   if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
   4453     Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
   4454     Parser.eatToEndOfStatement();
   4455     return false;
   4456   }
   4457 
   4458   Parser.Lex(); // Consume the EndOfStatement
   4459 
   4460   auto pair = std::make_pair(IsVector, RegNum);
   4461   if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
   4462     Warning(L, "ignoring redefinition of register alias '" + Name + "'");
   4463 
   4464   return true;
   4465 }
   4466 
   4467 /// parseDirectiveUneq
   4468 ///  ::= .unreq registername
   4469 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
   4470   MCAsmParser &Parser = getParser();
   4471   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   4472     Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
   4473     Parser.eatToEndOfStatement();
   4474     return false;
   4475   }
   4476   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
   4477   Parser.Lex(); // Eat the identifier.
   4478   return false;
   4479 }
   4480 
   4481 bool
   4482 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
   4483                                     AArch64MCExpr::VariantKind &ELFRefKind,
   4484                                     MCSymbolRefExpr::VariantKind &DarwinRefKind,
   4485                                     int64_t &Addend) {
   4486   ELFRefKind = AArch64MCExpr::VK_INVALID;
   4487   DarwinRefKind = MCSymbolRefExpr::VK_None;
   4488   Addend = 0;
   4489 
   4490   if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
   4491     ELFRefKind = AE->getKind();
   4492     Expr = AE->getSubExpr();
   4493   }
   4494 
   4495   const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
   4496   if (SE) {
   4497     // It's a simple symbol reference with no addend.
   4498     DarwinRefKind = SE->getKind();
   4499     return true;
   4500   }
   4501 
   4502   const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
   4503   if (!BE)
   4504     return false;
   4505 
   4506   SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
   4507   if (!SE)
   4508     return false;
   4509   DarwinRefKind = SE->getKind();
   4510 
   4511   if (BE->getOpcode() != MCBinaryExpr::Add &&
   4512       BE->getOpcode() != MCBinaryExpr::Sub)
   4513     return false;
   4514 
   4515   // See if the addend is is a constant, otherwise there's more going
   4516   // on here than we can deal with.
   4517   auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
   4518   if (!AddendExpr)
   4519     return false;
   4520 
   4521   Addend = AddendExpr->getValue();
   4522   if (BE->getOpcode() == MCBinaryExpr::Sub)
   4523     Addend = -Addend;
   4524 
   4525   // It's some symbol reference + a constant addend, but really
   4526   // shouldn't use both Darwin and ELF syntax.
   4527   return ELFRefKind == AArch64MCExpr::VK_INVALID ||
   4528          DarwinRefKind == MCSymbolRefExpr::VK_None;
   4529 }
   4530 
   4531 /// Force static initialization.
   4532 extern "C" void LLVMInitializeAArch64AsmParser() {
   4533   RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
   4534   RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
   4535   RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
   4536 }
   4537 
   4538 #define GET_REGISTER_MATCHER
   4539 #define GET_SUBTARGET_FEATURE_NAME
   4540 #define GET_MATCHER_IMPLEMENTATION
   4541 #include "AArch64GenAsmMatcher.inc"
   4542 
   4543 // Define this matcher function after the auto-generated include so we
   4544 // have the match class enum definitions.
   4545 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
   4546                                                       unsigned Kind) {
   4547   AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
   4548   // If the kind is a token for a literal immediate, check if our asm
   4549   // operand matches. This is for InstAliases which have a fixed-value
   4550   // immediate in the syntax.
   4551   int64_t ExpectedVal;
   4552   switch (Kind) {
   4553   default:
   4554     return Match_InvalidOperand;
   4555   case MCK__35_0:
   4556     ExpectedVal = 0;
   4557     break;
   4558   case MCK__35_1:
   4559     ExpectedVal = 1;
   4560     break;
   4561   case MCK__35_12:
   4562     ExpectedVal = 12;
   4563     break;
   4564   case MCK__35_16:
   4565     ExpectedVal = 16;
   4566     break;
   4567   case MCK__35_2:
   4568     ExpectedVal = 2;
   4569     break;
   4570   case MCK__35_24:
   4571     ExpectedVal = 24;
   4572     break;
   4573   case MCK__35_3:
   4574     ExpectedVal = 3;
   4575     break;
   4576   case MCK__35_32:
   4577     ExpectedVal = 32;
   4578     break;
   4579   case MCK__35_4:
   4580     ExpectedVal = 4;
   4581     break;
   4582   case MCK__35_48:
   4583     ExpectedVal = 48;
   4584     break;
   4585   case MCK__35_6:
   4586     ExpectedVal = 6;
   4587     break;
   4588   case MCK__35_64:
   4589     ExpectedVal = 64;
   4590     break;
   4591   case MCK__35_8:
   4592     ExpectedVal = 8;
   4593     break;
   4594   }
   4595   if (!Op.isImm())
   4596     return Match_InvalidOperand;
   4597   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
   4598   if (!CE)
   4599     return Match_InvalidOperand;
   4600   if (CE->getValue() == ExpectedVal)
   4601     return Match_Success;
   4602   return Match_InvalidOperand;
   4603 }
   4604 
   4605 
   4606 AArch64AsmParser::OperandMatchResultTy
   4607 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
   4608 
   4609   SMLoc S = getLoc();
   4610 
   4611   if (getParser().getTok().isNot(AsmToken::Identifier)) {
   4612     Error(S, "expected register");
   4613     return MatchOperand_ParseFail;
   4614   }
   4615 
   4616   int FirstReg = tryParseRegister();
   4617   if (FirstReg == -1) {
   4618     return MatchOperand_ParseFail;
   4619   }
   4620   const MCRegisterClass &WRegClass =
   4621       AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
   4622   const MCRegisterClass &XRegClass =
   4623       AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
   4624 
   4625   bool isXReg = XRegClass.contains(FirstReg),
   4626        isWReg = WRegClass.contains(FirstReg);
   4627   if (!isXReg && !isWReg) {
   4628     Error(S, "expected first even register of a "
   4629              "consecutive same-size even/odd register pair");
   4630     return MatchOperand_ParseFail;
   4631   }
   4632 
   4633   const MCRegisterInfo *RI = getContext().getRegisterInfo();
   4634   unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
   4635 
   4636   if (FirstEncoding & 0x1) {
   4637     Error(S, "expected first even register of a "
   4638              "consecutive same-size even/odd register pair");
   4639     return MatchOperand_ParseFail;
   4640   }
   4641 
   4642   SMLoc M = getLoc();
   4643   if (getParser().getTok().isNot(AsmToken::Comma)) {
   4644     Error(M, "expected comma");
   4645     return MatchOperand_ParseFail;
   4646   }
   4647   // Eat the comma
   4648   getParser().Lex();
   4649 
   4650   SMLoc E = getLoc();
   4651   int SecondReg = tryParseRegister();
   4652   if (SecondReg ==-1) {
   4653     return MatchOperand_ParseFail;
   4654   }
   4655 
   4656  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
   4657       (isXReg && !XRegClass.contains(SecondReg)) ||
   4658       (isWReg && !WRegClass.contains(SecondReg))) {
   4659     Error(E,"expected second odd register of a "
   4660              "consecutive same-size even/odd register pair");
   4661     return MatchOperand_ParseFail;
   4662   }
   4663 
   4664   unsigned Pair = 0;
   4665   if(isXReg) {
   4666     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
   4667            &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
   4668   } else {
   4669     Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
   4670            &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
   4671   }
   4672 
   4673   Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
   4674       getContext()));
   4675 
   4676   return MatchOperand_Success;
   4677 }
   4678