Home | History | Annotate | Download | only in AsmParser
      1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "llvm/MC/MCTargetAsmParser.h"
     11 #include "MCTargetDesc/ARMAddressingModes.h"
     12 #include "MCTargetDesc/ARMBaseInfo.h"
     13 #include "MCTargetDesc/ARMMCExpr.h"
     14 #include "llvm/ADT/BitVector.h"
     15 #include "llvm/ADT/OwningPtr.h"
     16 #include "llvm/ADT/STLExtras.h"
     17 #include "llvm/ADT/SmallVector.h"
     18 #include "llvm/ADT/StringSwitch.h"
     19 #include "llvm/ADT/Twine.h"
     20 #include "llvm/MC/MCAsmInfo.h"
     21 #include "llvm/MC/MCAssembler.h"
     22 #include "llvm/MC/MCContext.h"
     23 #include "llvm/MC/MCELFStreamer.h"
     24 #include "llvm/MC/MCExpr.h"
     25 #include "llvm/MC/MCInst.h"
     26 #include "llvm/MC/MCInstrDesc.h"
     27 #include "llvm/MC/MCParser/MCAsmLexer.h"
     28 #include "llvm/MC/MCParser/MCAsmParser.h"
     29 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
     30 #include "llvm/MC/MCRegisterInfo.h"
     31 #include "llvm/MC/MCStreamer.h"
     32 #include "llvm/MC/MCSubtargetInfo.h"
     33 #include "llvm/Support/ELF.h"
     34 #include "llvm/Support/MathExtras.h"
     35 #include "llvm/Support/SourceMgr.h"
     36 #include "llvm/Support/TargetRegistry.h"
     37 #include "llvm/Support/raw_ostream.h"
     38 
     39 using namespace llvm;
     40 
     41 namespace {
     42 
     43 class ARMOperand;
     44 
     45 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
     46 
     47 class ARMAsmParser : public MCTargetAsmParser {
     48   MCSubtargetInfo &STI;
     49   MCAsmParser &Parser;
     50   const MCRegisterInfo *MRI;
     51 
     52   // Map of register aliases registers via the .req directive.
     53   StringMap<unsigned> RegisterReqs;
     54 
     55   struct {
     56     ARMCC::CondCodes Cond;    // Condition for IT block.
     57     unsigned Mask:4;          // Condition mask for instructions.
     58                               // Starting at first 1 (from lsb).
     59                               //   '1'  condition as indicated in IT.
     60                               //   '0'  inverse of condition (else).
     61                               // Count of instructions in IT block is
     62                               // 4 - trailingzeroes(mask)
     63 
     64     bool FirstCond;           // Explicit flag for when we're parsing the
     65                               // First instruction in the IT block. It's
     66                               // implied in the mask, so needs special
     67                               // handling.
     68 
     69     unsigned CurPosition;     // Current position in parsing of IT
     70                               // block. In range [0,3]. Initialized
     71                               // according to count of instructions in block.
     72                               // ~0U if no active IT block.
     73   } ITState;
     74   bool inITBlock() { return ITState.CurPosition != ~0U;}
     75   void forwardITPosition() {
     76     if (!inITBlock()) return;
     77     // Move to the next instruction in the IT block, if there is one. If not,
     78     // mark the block as done.
     79     unsigned TZ = CountTrailingZeros_32(ITState.Mask);
     80     if (++ITState.CurPosition == 5 - TZ)
     81       ITState.CurPosition = ~0U; // Done with the IT block after this.
     82   }
     83 
     84 
     85   MCAsmParser &getParser() const { return Parser; }
     86   MCAsmLexer &getLexer() const { return Parser.getLexer(); }
     87 
     88   bool Warning(SMLoc L, const Twine &Msg,
     89                ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
     90     return Parser.Warning(L, Msg, Ranges);
     91   }
     92   bool Error(SMLoc L, const Twine &Msg,
     93              ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
     94     return Parser.Error(L, Msg, Ranges);
     95   }
     96 
     97   int tryParseRegister();
     98   bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
     99   int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
    100   bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
    101   bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
    102   bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
    103   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
    104   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
    105                               unsigned &ShiftAmount);
    106   bool parseDirectiveWord(unsigned Size, SMLoc L);
    107   bool parseDirectiveThumb(SMLoc L);
    108   bool parseDirectiveARM(SMLoc L);
    109   bool parseDirectiveThumbFunc(SMLoc L);
    110   bool parseDirectiveCode(SMLoc L);
    111   bool parseDirectiveSyntax(SMLoc L);
    112   bool parseDirectiveReq(StringRef Name, SMLoc L);
    113   bool parseDirectiveUnreq(SMLoc L);
    114   bool parseDirectiveArch(SMLoc L);
    115   bool parseDirectiveEabiAttr(SMLoc L);
    116 
    117   StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
    118                           bool &CarrySetting, unsigned &ProcessorIMod,
    119                           StringRef &ITMask);
    120   void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
    121                              bool &CanAcceptPredicationCode);
    122 
    123   bool isThumb() const {
    124     // FIXME: Can tablegen auto-generate this?
    125     return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
    126   }
    127   bool isThumbOne() const {
    128     return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
    129   }
    130   bool isThumbTwo() const {
    131     return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
    132   }
    133   bool hasV6Ops() const {
    134     return STI.getFeatureBits() & ARM::HasV6Ops;
    135   }
    136   bool hasV7Ops() const {
    137     return STI.getFeatureBits() & ARM::HasV7Ops;
    138   }
    139   void SwitchMode() {
    140     unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
    141     setAvailableFeatures(FB);
    142   }
    143   bool isMClass() const {
    144     return STI.getFeatureBits() & ARM::FeatureMClass;
    145   }
    146 
    147   /// @name Auto-generated Match Functions
    148   /// {
    149 
    150 #define GET_ASSEMBLER_HEADER
    151 #include "ARMGenAsmMatcher.inc"
    152 
    153   /// }
    154 
    155   OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
    156   OperandMatchResultTy parseCoprocNumOperand(
    157     SmallVectorImpl<MCParsedAsmOperand*>&);
    158   OperandMatchResultTy parseCoprocRegOperand(
    159     SmallVectorImpl<MCParsedAsmOperand*>&);
    160   OperandMatchResultTy parseCoprocOptionOperand(
    161     SmallVectorImpl<MCParsedAsmOperand*>&);
    162   OperandMatchResultTy parseMemBarrierOptOperand(
    163     SmallVectorImpl<MCParsedAsmOperand*>&);
    164   OperandMatchResultTy parseProcIFlagsOperand(
    165     SmallVectorImpl<MCParsedAsmOperand*>&);
    166   OperandMatchResultTy parseMSRMaskOperand(
    167     SmallVectorImpl<MCParsedAsmOperand*>&);
    168   OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
    169                                    StringRef Op, int Low, int High);
    170   OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
    171     return parsePKHImm(O, "lsl", 0, 31);
    172   }
    173   OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
    174     return parsePKHImm(O, "asr", 1, 32);
    175   }
    176   OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
    177   OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
    178   OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
    179   OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
    180   OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
    181   OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
    182   OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
    183   OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
    184   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
    185                                        SMLoc &EndLoc);
    186 
    187   // Asm Match Converter Methods
    188   void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
    189   void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
    190   void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
    191                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    192   void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
    193                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    194   void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
    195                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    196   void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
    197                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    198   void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
    199                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    200   void cvtStWriteBackRegAddrMode2(MCInst &Inst,
    201                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    202   void cvtStWriteBackRegAddrMode3(MCInst &Inst,
    203                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    204   void cvtLdExtTWriteBackImm(MCInst &Inst,
    205                              const SmallVectorImpl<MCParsedAsmOperand*> &);
    206   void cvtLdExtTWriteBackReg(MCInst &Inst,
    207                              const SmallVectorImpl<MCParsedAsmOperand*> &);
    208   void cvtStExtTWriteBackImm(MCInst &Inst,
    209                              const SmallVectorImpl<MCParsedAsmOperand*> &);
    210   void cvtStExtTWriteBackReg(MCInst &Inst,
    211                              const SmallVectorImpl<MCParsedAsmOperand*> &);
    212   void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
    213   void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
    214   void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
    215                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
    216   void cvtThumbMultiply(MCInst &Inst,
    217                         const SmallVectorImpl<MCParsedAsmOperand*> &);
    218   void cvtVLDwbFixed(MCInst &Inst,
    219                      const SmallVectorImpl<MCParsedAsmOperand*> &);
    220   void cvtVLDwbRegister(MCInst &Inst,
    221                         const SmallVectorImpl<MCParsedAsmOperand*> &);
    222   void cvtVSTwbFixed(MCInst &Inst,
    223                      const SmallVectorImpl<MCParsedAsmOperand*> &);
    224   void cvtVSTwbRegister(MCInst &Inst,
    225                         const SmallVectorImpl<MCParsedAsmOperand*> &);
    226   bool validateInstruction(MCInst &Inst,
    227                            const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
    228   bool processInstruction(MCInst &Inst,
    229                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
    230   bool shouldOmitCCOutOperand(StringRef Mnemonic,
    231                               SmallVectorImpl<MCParsedAsmOperand*> &Operands);
    232 
    233 public:
    234   enum ARMMatchResultTy {
    235     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
    236     Match_RequiresNotITBlock,
    237     Match_RequiresV6,
    238     Match_RequiresThumb2,
    239 #define GET_OPERAND_DIAGNOSTIC_TYPES
    240 #include "ARMGenAsmMatcher.inc"
    241 
    242   };
    243 
    244   ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
    245     : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
    246     MCAsmParserExtension::Initialize(_Parser);
    247 
    248     // Cache the MCRegisterInfo.
    249     MRI = &getContext().getRegisterInfo();
    250 
    251     // Initialize the set of available features.
    252     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
    253 
    254     // Not in an ITBlock to start with.
    255     ITState.CurPosition = ~0U;
    256 
    257     // Set ELF header flags.
    258     // FIXME: This should eventually end up somewhere else where more
    259     // intelligent flag decisions can be made. For now we are just maintaining
    260     // the statu/parseDirects quo for ARM and setting EF_ARM_EABI_VER5 as the default.
    261     if (MCELFStreamer *MES = dyn_cast<MCELFStreamer>(&Parser.getStreamer()))
    262       MES->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
    263   }
    264 
    265   // Implementation of the MCTargetAsmParser interface:
    266   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
    267   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
    268                         SMLoc NameLoc,
    269                         SmallVectorImpl<MCParsedAsmOperand*> &Operands);
    270   bool ParseDirective(AsmToken DirectiveID);
    271 
    272   unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
    273   unsigned checkTargetMatchPredicate(MCInst &Inst);
    274 
    275   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
    276                                SmallVectorImpl<MCParsedAsmOperand*> &Operands,
    277                                MCStreamer &Out, unsigned &ErrorInfo,
    278                                bool MatchingInlineAsm);
    279 };
    280 } // end anonymous namespace
    281 
    282 namespace {
    283 
    284 /// ARMOperand - Instances of this class represent a parsed ARM machine
    285 /// operand.
    286 class ARMOperand : public MCParsedAsmOperand {
    287   enum KindTy {
    288     k_CondCode,
    289     k_CCOut,
    290     k_ITCondMask,
    291     k_CoprocNum,
    292     k_CoprocReg,
    293     k_CoprocOption,
    294     k_Immediate,
    295     k_MemBarrierOpt,
    296     k_Memory,
    297     k_PostIndexRegister,
    298     k_MSRMask,
    299     k_ProcIFlags,
    300     k_VectorIndex,
    301     k_Register,
    302     k_RegisterList,
    303     k_DPRRegisterList,
    304     k_SPRRegisterList,
    305     k_VectorList,
    306     k_VectorListAllLanes,
    307     k_VectorListIndexed,
    308     k_ShiftedRegister,
    309     k_ShiftedImmediate,
    310     k_ShifterImmediate,
    311     k_RotateImmediate,
    312     k_BitfieldDescriptor,
    313     k_Token
    314   } Kind;
    315 
    316   SMLoc StartLoc, EndLoc;
    317   SmallVector<unsigned, 8> Registers;
    318 
    319   struct CCOp {
    320     ARMCC::CondCodes Val;
    321   };
    322 
    323   struct CopOp {
    324     unsigned Val;
    325   };
    326 
    327   struct CoprocOptionOp {
    328     unsigned Val;
    329   };
    330 
    331   struct ITMaskOp {
    332     unsigned Mask:4;
    333   };
    334 
    335   struct MBOptOp {
    336     ARM_MB::MemBOpt Val;
    337   };
    338 
    339   struct IFlagsOp {
    340     ARM_PROC::IFlags Val;
    341   };
    342 
    343   struct MMaskOp {
    344     unsigned Val;
    345   };
    346 
    347   struct TokOp {
    348     const char *Data;
    349     unsigned Length;
    350   };
    351 
    352   struct RegOp {
    353     unsigned RegNum;
    354   };
    355 
    356   // A vector register list is a sequential list of 1 to 4 registers.
    357   struct VectorListOp {
    358     unsigned RegNum;
    359     unsigned Count;
    360     unsigned LaneIndex;
    361     bool isDoubleSpaced;
    362   };
    363 
    364   struct VectorIndexOp {
    365     unsigned Val;
    366   };
    367 
    368   struct ImmOp {
    369     const MCExpr *Val;
    370   };
    371 
    372   /// Combined record for all forms of ARM address expressions.
    373   struct MemoryOp {
    374     unsigned BaseRegNum;
    375     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
    376     // was specified.
    377     const MCConstantExpr *OffsetImm;  // Offset immediate value
    378     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
    379     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
    380     unsigned ShiftImm;        // shift for OffsetReg.
    381     unsigned Alignment;       // 0 = no alignment specified
    382     // n = alignment in bytes (2, 4, 8, 16, or 32)
    383     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
    384   };
    385 
    386   struct PostIdxRegOp {
    387     unsigned RegNum;
    388     bool isAdd;
    389     ARM_AM::ShiftOpc ShiftTy;
    390     unsigned ShiftImm;
    391   };
    392 
    393   struct ShifterImmOp {
    394     bool isASR;
    395     unsigned Imm;
    396   };
    397 
    398   struct RegShiftedRegOp {
    399     ARM_AM::ShiftOpc ShiftTy;
    400     unsigned SrcReg;
    401     unsigned ShiftReg;
    402     unsigned ShiftImm;
    403   };
    404 
    405   struct RegShiftedImmOp {
    406     ARM_AM::ShiftOpc ShiftTy;
    407     unsigned SrcReg;
    408     unsigned ShiftImm;
    409   };
    410 
    411   struct RotImmOp {
    412     unsigned Imm;
    413   };
    414 
    415   struct BitfieldOp {
    416     unsigned LSB;
    417     unsigned Width;
    418   };
    419 
    420   union {
    421     struct CCOp CC;
    422     struct CopOp Cop;
    423     struct CoprocOptionOp CoprocOption;
    424     struct MBOptOp MBOpt;
    425     struct ITMaskOp ITMask;
    426     struct IFlagsOp IFlags;
    427     struct MMaskOp MMask;
    428     struct TokOp Tok;
    429     struct RegOp Reg;
    430     struct VectorListOp VectorList;
    431     struct VectorIndexOp VectorIndex;
    432     struct ImmOp Imm;
    433     struct MemoryOp Memory;
    434     struct PostIdxRegOp PostIdxReg;
    435     struct ShifterImmOp ShifterImm;
    436     struct RegShiftedRegOp RegShiftedReg;
    437     struct RegShiftedImmOp RegShiftedImm;
    438     struct RotImmOp RotImm;
    439     struct BitfieldOp Bitfield;
    440   };
    441 
    442   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
    443 public:
    444   ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
    445     Kind = o.Kind;
    446     StartLoc = o.StartLoc;
    447     EndLoc = o.EndLoc;
    448     switch (Kind) {
    449     case k_CondCode:
    450       CC = o.CC;
    451       break;
    452     case k_ITCondMask:
    453       ITMask = o.ITMask;
    454       break;
    455     case k_Token:
    456       Tok = o.Tok;
    457       break;
    458     case k_CCOut:
    459     case k_Register:
    460       Reg = o.Reg;
    461       break;
    462     case k_RegisterList:
    463     case k_DPRRegisterList:
    464     case k_SPRRegisterList:
    465       Registers = o.Registers;
    466       break;
    467     case k_VectorList:
    468     case k_VectorListAllLanes:
    469     case k_VectorListIndexed:
    470       VectorList = o.VectorList;
    471       break;
    472     case k_CoprocNum:
    473     case k_CoprocReg:
    474       Cop = o.Cop;
    475       break;
    476     case k_CoprocOption:
    477       CoprocOption = o.CoprocOption;
    478       break;
    479     case k_Immediate:
    480       Imm = o.Imm;
    481       break;
    482     case k_MemBarrierOpt:
    483       MBOpt = o.MBOpt;
    484       break;
    485     case k_Memory:
    486       Memory = o.Memory;
    487       break;
    488     case k_PostIndexRegister:
    489       PostIdxReg = o.PostIdxReg;
    490       break;
    491     case k_MSRMask:
    492       MMask = o.MMask;
    493       break;
    494     case k_ProcIFlags:
    495       IFlags = o.IFlags;
    496       break;
    497     case k_ShifterImmediate:
    498       ShifterImm = o.ShifterImm;
    499       break;
    500     case k_ShiftedRegister:
    501       RegShiftedReg = o.RegShiftedReg;
    502       break;
    503     case k_ShiftedImmediate:
    504       RegShiftedImm = o.RegShiftedImm;
    505       break;
    506     case k_RotateImmediate:
    507       RotImm = o.RotImm;
    508       break;
    509     case k_BitfieldDescriptor:
    510       Bitfield = o.Bitfield;
    511       break;
    512     case k_VectorIndex:
    513       VectorIndex = o.VectorIndex;
    514       break;
    515     }
    516   }
    517 
    518   /// getStartLoc - Get the location of the first token of this operand.
    519   SMLoc getStartLoc() const { return StartLoc; }
    520   /// getEndLoc - Get the location of the last token of this operand.
    521   SMLoc getEndLoc() const { return EndLoc; }
    522   /// getLocRange - Get the range between the first and last token of this
    523   /// operand.
    524   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
    525 
    526   ARMCC::CondCodes getCondCode() const {
    527     assert(Kind == k_CondCode && "Invalid access!");
    528     return CC.Val;
    529   }
    530 
    531   unsigned getCoproc() const {
    532     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
    533     return Cop.Val;
    534   }
    535 
    536   StringRef getToken() const {
    537     assert(Kind == k_Token && "Invalid access!");
    538     return StringRef(Tok.Data, Tok.Length);
    539   }
    540 
    541   unsigned getReg() const {
    542     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
    543     return Reg.RegNum;
    544   }
    545 
    546   const SmallVectorImpl<unsigned> &getRegList() const {
    547     assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
    548             Kind == k_SPRRegisterList) && "Invalid access!");
    549     return Registers;
    550   }
    551 
    552   const MCExpr *getImm() const {
    553     assert(isImm() && "Invalid access!");
    554     return Imm.Val;
    555   }
    556 
    557   unsigned getVectorIndex() const {
    558     assert(Kind == k_VectorIndex && "Invalid access!");
    559     return VectorIndex.Val;
    560   }
    561 
    562   ARM_MB::MemBOpt getMemBarrierOpt() const {
    563     assert(Kind == k_MemBarrierOpt && "Invalid access!");
    564     return MBOpt.Val;
    565   }
    566 
    567   ARM_PROC::IFlags getProcIFlags() const {
    568     assert(Kind == k_ProcIFlags && "Invalid access!");
    569     return IFlags.Val;
    570   }
    571 
    572   unsigned getMSRMask() const {
    573     assert(Kind == k_MSRMask && "Invalid access!");
    574     return MMask.Val;
    575   }
    576 
    577   bool isCoprocNum() const { return Kind == k_CoprocNum; }
    578   bool isCoprocReg() const { return Kind == k_CoprocReg; }
    579   bool isCoprocOption() const { return Kind == k_CoprocOption; }
    580   bool isCondCode() const { return Kind == k_CondCode; }
    581   bool isCCOut() const { return Kind == k_CCOut; }
    582   bool isITMask() const { return Kind == k_ITCondMask; }
    583   bool isITCondCode() const { return Kind == k_CondCode; }
    584   bool isImm() const { return Kind == k_Immediate; }
    585   bool isFPImm() const {
    586     if (!isImm()) return false;
    587     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    588     if (!CE) return false;
    589     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
    590     return Val != -1;
    591   }
    592   bool isFBits16() const {
    593     if (!isImm()) return false;
    594     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    595     if (!CE) return false;
    596     int64_t Value = CE->getValue();
    597     return Value >= 0 && Value <= 16;
    598   }
    599   bool isFBits32() const {
    600     if (!isImm()) return false;
    601     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    602     if (!CE) return false;
    603     int64_t Value = CE->getValue();
    604     return Value >= 1 && Value <= 32;
    605   }
    606   bool isImm8s4() const {
    607     if (!isImm()) return false;
    608     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    609     if (!CE) return false;
    610     int64_t Value = CE->getValue();
    611     return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
    612   }
    613   bool isImm0_1020s4() const {
    614     if (!isImm()) return false;
    615     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    616     if (!CE) return false;
    617     int64_t Value = CE->getValue();
    618     return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
    619   }
    620   bool isImm0_508s4() const {
    621     if (!isImm()) return false;
    622     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    623     if (!CE) return false;
    624     int64_t Value = CE->getValue();
    625     return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
    626   }
    627   bool isImm0_508s4Neg() const {
    628     if (!isImm()) return false;
    629     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    630     if (!CE) return false;
    631     int64_t Value = -CE->getValue();
    632     // explicitly exclude zero. we want that to use the normal 0_508 version.
    633     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
    634   }
    635   bool isImm0_255() const {
    636     if (!isImm()) return false;
    637     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    638     if (!CE) return false;
    639     int64_t Value = CE->getValue();
    640     return Value >= 0 && Value < 256;
    641   }
    642   bool isImm0_4095() const {
    643     if (!isImm()) return false;
    644     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    645     if (!CE) return false;
    646     int64_t Value = CE->getValue();
    647     return Value >= 0 && Value < 4096;
    648   }
    649   bool isImm0_4095Neg() const {
    650     if (!isImm()) return false;
    651     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    652     if (!CE) return false;
    653     int64_t Value = -CE->getValue();
    654     return Value > 0 && Value < 4096;
    655   }
    656   bool isImm0_1() const {
    657     if (!isImm()) return false;
    658     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    659     if (!CE) return false;
    660     int64_t Value = CE->getValue();
    661     return Value >= 0 && Value < 2;
    662   }
    663   bool isImm0_3() const {
    664     if (!isImm()) return false;
    665     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    666     if (!CE) return false;
    667     int64_t Value = CE->getValue();
    668     return Value >= 0 && Value < 4;
    669   }
    670   bool isImm0_7() const {
    671     if (!isImm()) return false;
    672     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    673     if (!CE) return false;
    674     int64_t Value = CE->getValue();
    675     return Value >= 0 && Value < 8;
    676   }
    677   bool isImm0_15() const {
    678     if (!isImm()) return false;
    679     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    680     if (!CE) return false;
    681     int64_t Value = CE->getValue();
    682     return Value >= 0 && Value < 16;
    683   }
    684   bool isImm0_31() const {
    685     if (!isImm()) return false;
    686     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    687     if (!CE) return false;
    688     int64_t Value = CE->getValue();
    689     return Value >= 0 && Value < 32;
    690   }
    691   bool isImm0_63() const {
    692     if (!isImm()) return false;
    693     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    694     if (!CE) return false;
    695     int64_t Value = CE->getValue();
    696     return Value >= 0 && Value < 64;
    697   }
    698   bool isImm8() const {
    699     if (!isImm()) return false;
    700     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    701     if (!CE) return false;
    702     int64_t Value = CE->getValue();
    703     return Value == 8;
    704   }
    705   bool isImm16() const {
    706     if (!isImm()) return false;
    707     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    708     if (!CE) return false;
    709     int64_t Value = CE->getValue();
    710     return Value == 16;
    711   }
    712   bool isImm32() const {
    713     if (!isImm()) return false;
    714     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    715     if (!CE) return false;
    716     int64_t Value = CE->getValue();
    717     return Value == 32;
    718   }
    719   bool isShrImm8() const {
    720     if (!isImm()) return false;
    721     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    722     if (!CE) return false;
    723     int64_t Value = CE->getValue();
    724     return Value > 0 && Value <= 8;
    725   }
    726   bool isShrImm16() const {
    727     if (!isImm()) return false;
    728     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    729     if (!CE) return false;
    730     int64_t Value = CE->getValue();
    731     return Value > 0 && Value <= 16;
    732   }
    733   bool isShrImm32() const {
    734     if (!isImm()) return false;
    735     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    736     if (!CE) return false;
    737     int64_t Value = CE->getValue();
    738     return Value > 0 && Value <= 32;
    739   }
    740   bool isShrImm64() const {
    741     if (!isImm()) return false;
    742     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    743     if (!CE) return false;
    744     int64_t Value = CE->getValue();
    745     return Value > 0 && Value <= 64;
    746   }
    747   bool isImm1_7() const {
    748     if (!isImm()) return false;
    749     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    750     if (!CE) return false;
    751     int64_t Value = CE->getValue();
    752     return Value > 0 && Value < 8;
    753   }
    754   bool isImm1_15() const {
    755     if (!isImm()) return false;
    756     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    757     if (!CE) return false;
    758     int64_t Value = CE->getValue();
    759     return Value > 0 && Value < 16;
    760   }
    761   bool isImm1_31() const {
    762     if (!isImm()) return false;
    763     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    764     if (!CE) return false;
    765     int64_t Value = CE->getValue();
    766     return Value > 0 && Value < 32;
    767   }
    768   bool isImm1_16() const {
    769     if (!isImm()) return false;
    770     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    771     if (!CE) return false;
    772     int64_t Value = CE->getValue();
    773     return Value > 0 && Value < 17;
    774   }
    775   bool isImm1_32() const {
    776     if (!isImm()) return false;
    777     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    778     if (!CE) return false;
    779     int64_t Value = CE->getValue();
    780     return Value > 0 && Value < 33;
    781   }
    782   bool isImm0_32() const {
    783     if (!isImm()) return false;
    784     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    785     if (!CE) return false;
    786     int64_t Value = CE->getValue();
    787     return Value >= 0 && Value < 33;
    788   }
    789   bool isImm0_65535() const {
    790     if (!isImm()) return false;
    791     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    792     if (!CE) return false;
    793     int64_t Value = CE->getValue();
    794     return Value >= 0 && Value < 65536;
    795   }
    796   bool isImm0_65535Expr() const {
    797     if (!isImm()) return false;
    798     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    799     // If it's not a constant expression, it'll generate a fixup and be
    800     // handled later.
    801     if (!CE) return true;
    802     int64_t Value = CE->getValue();
    803     return Value >= 0 && Value < 65536;
    804   }
    805   bool isImm24bit() const {
    806     if (!isImm()) return false;
    807     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    808     if (!CE) return false;
    809     int64_t Value = CE->getValue();
    810     return Value >= 0 && Value <= 0xffffff;
    811   }
    812   bool isImmThumbSR() const {
    813     if (!isImm()) return false;
    814     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    815     if (!CE) return false;
    816     int64_t Value = CE->getValue();
    817     return Value > 0 && Value < 33;
    818   }
    819   bool isPKHLSLImm() const {
    820     if (!isImm()) return false;
    821     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    822     if (!CE) return false;
    823     int64_t Value = CE->getValue();
    824     return Value >= 0 && Value < 32;
    825   }
    826   bool isPKHASRImm() const {
    827     if (!isImm()) return false;
    828     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    829     if (!CE) return false;
    830     int64_t Value = CE->getValue();
    831     return Value > 0 && Value <= 32;
    832   }
    833   bool isAdrLabel() const {
    834     // If we have an immediate that's not a constant, treat it as a label
    835     // reference needing a fixup. If it is a constant, but it can't fit
    836     // into shift immediate encoding, we reject it.
    837     if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
    838     else return (isARMSOImm() || isARMSOImmNeg());
    839   }
    840   bool isARMSOImm() const {
    841     if (!isImm()) return false;
    842     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    843     if (!CE) return false;
    844     int64_t Value = CE->getValue();
    845     return ARM_AM::getSOImmVal(Value) != -1;
    846   }
    847   bool isARMSOImmNot() const {
    848     if (!isImm()) return false;
    849     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    850     if (!CE) return false;
    851     int64_t Value = CE->getValue();
    852     return ARM_AM::getSOImmVal(~Value) != -1;
    853   }
    854   bool isARMSOImmNeg() const {
    855     if (!isImm()) return false;
    856     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    857     if (!CE) return false;
    858     int64_t Value = CE->getValue();
    859     // Only use this when not representable as a plain so_imm.
    860     return ARM_AM::getSOImmVal(Value) == -1 &&
    861       ARM_AM::getSOImmVal(-Value) != -1;
    862   }
    863   bool isT2SOImm() const {
    864     if (!isImm()) return false;
    865     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    866     if (!CE) return false;
    867     int64_t Value = CE->getValue();
    868     return ARM_AM::getT2SOImmVal(Value) != -1;
    869   }
    870   bool isT2SOImmNot() const {
    871     if (!isImm()) return false;
    872     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    873     if (!CE) return false;
    874     int64_t Value = CE->getValue();
    875     return ARM_AM::getT2SOImmVal(~Value) != -1;
    876   }
    877   bool isT2SOImmNeg() const {
    878     if (!isImm()) return false;
    879     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    880     if (!CE) return false;
    881     int64_t Value = CE->getValue();
    882     // Only use this when not representable as a plain so_imm.
    883     return ARM_AM::getT2SOImmVal(Value) == -1 &&
    884       ARM_AM::getT2SOImmVal(-Value) != -1;
    885   }
    886   bool isSetEndImm() const {
    887     if (!isImm()) return false;
    888     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    889     if (!CE) return false;
    890     int64_t Value = CE->getValue();
    891     return Value == 1 || Value == 0;
    892   }
    893   bool isReg() const { return Kind == k_Register; }
    894   bool isRegList() const { return Kind == k_RegisterList; }
    895   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
    896   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
    897   bool isToken() const { return Kind == k_Token; }
    898   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
    899   bool isMem() const { return Kind == k_Memory; }
    900   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
    901   bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
    902   bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
    903   bool isRotImm() const { return Kind == k_RotateImmediate; }
    904   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
    905   bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
    906   bool isPostIdxReg() const {
    907     return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
    908   }
    909   bool isMemNoOffset(bool alignOK = false) const {
    910     if (!isMem())
    911       return false;
    912     // No offset of any kind.
    913     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
    914      (alignOK || Memory.Alignment == 0);
    915   }
    916   bool isMemPCRelImm12() const {
    917     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
    918       return false;
    919     // Base register must be PC.
    920     if (Memory.BaseRegNum != ARM::PC)
    921       return false;
    922     // Immediate offset in range [-4095, 4095].
    923     if (!Memory.OffsetImm) return true;
    924     int64_t Val = Memory.OffsetImm->getValue();
    925     return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
    926   }
    927   bool isAlignedMemory() const {
    928     return isMemNoOffset(true);
    929   }
    930   bool isAddrMode2() const {
    931     if (!isMem() || Memory.Alignment != 0) return false;
    932     // Check for register offset.
    933     if (Memory.OffsetRegNum) return true;
    934     // Immediate offset in range [-4095, 4095].
    935     if (!Memory.OffsetImm) return true;
    936     int64_t Val = Memory.OffsetImm->getValue();
    937     return Val > -4096 && Val < 4096;
    938   }
    939   bool isAM2OffsetImm() const {
    940     if (!isImm()) return false;
    941     // Immediate offset in range [-4095, 4095].
    942     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    943     if (!CE) return false;
    944     int64_t Val = CE->getValue();
    945     return Val > -4096 && Val < 4096;
    946   }
    947   bool isAddrMode3() const {
    948     // If we have an immediate that's not a constant, treat it as a label
    949     // reference needing a fixup. If it is a constant, it's something else
    950     // and we reject it.
    951     if (isImm() && !isa<MCConstantExpr>(getImm()))
    952       return true;
    953     if (!isMem() || Memory.Alignment != 0) return false;
    954     // No shifts are legal for AM3.
    955     if (Memory.ShiftType != ARM_AM::no_shift) return false;
    956     // Check for register offset.
    957     if (Memory.OffsetRegNum) return true;
    958     // Immediate offset in range [-255, 255].
    959     if (!Memory.OffsetImm) return true;
    960     int64_t Val = Memory.OffsetImm->getValue();
    961     // The #-0 offset is encoded as INT32_MIN, and we have to check
    962     // for this too.
    963     return (Val > -256 && Val < 256) || Val == INT32_MIN;
    964   }
    965   bool isAM3Offset() const {
    966     if (Kind != k_Immediate && Kind != k_PostIndexRegister)
    967       return false;
    968     if (Kind == k_PostIndexRegister)
    969       return PostIdxReg.ShiftTy == ARM_AM::no_shift;
    970     // Immediate offset in range [-255, 255].
    971     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
    972     if (!CE) return false;
    973     int64_t Val = CE->getValue();
    974     // Special case, #-0 is INT32_MIN.
    975     return (Val > -256 && Val < 256) || Val == INT32_MIN;
    976   }
    977   bool isAddrMode5() const {
    978     // If we have an immediate that's not a constant, treat it as a label
    979     // reference needing a fixup. If it is a constant, it's something else
    980     // and we reject it.
    981     if (isImm() && !isa<MCConstantExpr>(getImm()))
    982       return true;
    983     if (!isMem() || Memory.Alignment != 0) return false;
    984     // Check for register offset.
    985     if (Memory.OffsetRegNum) return false;
    986     // Immediate offset in range [-1020, 1020] and a multiple of 4.
    987     if (!Memory.OffsetImm) return true;
    988     int64_t Val = Memory.OffsetImm->getValue();
    989     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
    990       Val == INT32_MIN;
    991   }
    992   bool isMemTBB() const {
    993     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
    994         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
    995       return false;
    996     return true;
    997   }
    998   bool isMemTBH() const {
    999     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
   1000         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
   1001         Memory.Alignment != 0 )
   1002       return false;
   1003     return true;
   1004   }
   1005   bool isMemRegOffset() const {
   1006     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
   1007       return false;
   1008     return true;
   1009   }
   1010   bool isT2MemRegOffset() const {
   1011     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
   1012         Memory.Alignment != 0)
   1013       return false;
   1014     // Only lsl #{0, 1, 2, 3} allowed.
   1015     if (Memory.ShiftType == ARM_AM::no_shift)
   1016       return true;
   1017     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
   1018       return false;
   1019     return true;
   1020   }
   1021   bool isMemThumbRR() const {
   1022     // Thumb reg+reg addressing is simple. Just two registers, a base and
   1023     // an offset. No shifts, negations or any other complicating factors.
   1024     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
   1025         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
   1026       return false;
   1027     return isARMLowRegister(Memory.BaseRegNum) &&
   1028       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
   1029   }
   1030   bool isMemThumbRIs4() const {
   1031     if (!isMem() || Memory.OffsetRegNum != 0 ||
   1032         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
   1033       return false;
   1034     // Immediate offset, multiple of 4 in range [0, 124].
   1035     if (!Memory.OffsetImm) return true;
   1036     int64_t Val = Memory.OffsetImm->getValue();
   1037     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
   1038   }
   1039   bool isMemThumbRIs2() const {
   1040     if (!isMem() || Memory.OffsetRegNum != 0 ||
   1041         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
   1042       return false;
   1043     // Immediate offset, multiple of 4 in range [0, 62].
   1044     if (!Memory.OffsetImm) return true;
   1045     int64_t Val = Memory.OffsetImm->getValue();
   1046     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
   1047   }
   1048   bool isMemThumbRIs1() const {
   1049     if (!isMem() || Memory.OffsetRegNum != 0 ||
   1050         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
   1051       return false;
   1052     // Immediate offset in range [0, 31].
   1053     if (!Memory.OffsetImm) return true;
   1054     int64_t Val = Memory.OffsetImm->getValue();
   1055     return Val >= 0 && Val <= 31;
   1056   }
   1057   bool isMemThumbSPI() const {
   1058     if (!isMem() || Memory.OffsetRegNum != 0 ||
   1059         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
   1060       return false;
   1061     // Immediate offset, multiple of 4 in range [0, 1020].
   1062     if (!Memory.OffsetImm) return true;
   1063     int64_t Val = Memory.OffsetImm->getValue();
   1064     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
   1065   }
   1066   bool isMemImm8s4Offset() const {
   1067     // If we have an immediate that's not a constant, treat it as a label
   1068     // reference needing a fixup. If it is a constant, it's something else
   1069     // and we reject it.
   1070     if (isImm() && !isa<MCConstantExpr>(getImm()))
   1071       return true;
   1072     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1073       return false;
   1074     // Immediate offset a multiple of 4 in range [-1020, 1020].
   1075     if (!Memory.OffsetImm) return true;
   1076     int64_t Val = Memory.OffsetImm->getValue();
   1077     // Special case, #-0 is INT32_MIN.
   1078     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
   1079   }
   1080   bool isMemImm0_1020s4Offset() const {
   1081     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1082       return false;
   1083     // Immediate offset a multiple of 4 in range [0, 1020].
   1084     if (!Memory.OffsetImm) return true;
   1085     int64_t Val = Memory.OffsetImm->getValue();
   1086     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
   1087   }
   1088   bool isMemImm8Offset() const {
   1089     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1090       return false;
   1091     // Base reg of PC isn't allowed for these encodings.
   1092     if (Memory.BaseRegNum == ARM::PC) return false;
   1093     // Immediate offset in range [-255, 255].
   1094     if (!Memory.OffsetImm) return true;
   1095     int64_t Val = Memory.OffsetImm->getValue();
   1096     return (Val == INT32_MIN) || (Val > -256 && Val < 256);
   1097   }
   1098   bool isMemPosImm8Offset() const {
   1099     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1100       return false;
   1101     // Immediate offset in range [0, 255].
   1102     if (!Memory.OffsetImm) return true;
   1103     int64_t Val = Memory.OffsetImm->getValue();
   1104     return Val >= 0 && Val < 256;
   1105   }
   1106   bool isMemNegImm8Offset() const {
   1107     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1108       return false;
   1109     // Base reg of PC isn't allowed for these encodings.
   1110     if (Memory.BaseRegNum == ARM::PC) return false;
   1111     // Immediate offset in range [-255, -1].
   1112     if (!Memory.OffsetImm) return false;
   1113     int64_t Val = Memory.OffsetImm->getValue();
   1114     return (Val == INT32_MIN) || (Val > -256 && Val < 0);
   1115   }
   1116   bool isMemUImm12Offset() const {
   1117     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1118       return false;
   1119     // Immediate offset in range [0, 4095].
   1120     if (!Memory.OffsetImm) return true;
   1121     int64_t Val = Memory.OffsetImm->getValue();
   1122     return (Val >= 0 && Val < 4096);
   1123   }
   1124   bool isMemImm12Offset() const {
   1125     // If we have an immediate that's not a constant, treat it as a label
   1126     // reference needing a fixup. If it is a constant, it's something else
   1127     // and we reject it.
   1128     if (isImm() && !isa<MCConstantExpr>(getImm()))
   1129       return true;
   1130 
   1131     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
   1132       return false;
   1133     // Immediate offset in range [-4095, 4095].
   1134     if (!Memory.OffsetImm) return true;
   1135     int64_t Val = Memory.OffsetImm->getValue();
   1136     return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
   1137   }
   1138   bool isPostIdxImm8() const {
   1139     if (!isImm()) return false;
   1140     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1141     if (!CE) return false;
   1142     int64_t Val = CE->getValue();
   1143     return (Val > -256 && Val < 256) || (Val == INT32_MIN);
   1144   }
   1145   bool isPostIdxImm8s4() const {
   1146     if (!isImm()) return false;
   1147     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1148     if (!CE) return false;
   1149     int64_t Val = CE->getValue();
   1150     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
   1151       (Val == INT32_MIN);
   1152   }
   1153 
   1154   bool isMSRMask() const { return Kind == k_MSRMask; }
   1155   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
   1156 
   1157   // NEON operands.
   1158   bool isSingleSpacedVectorList() const {
   1159     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
   1160   }
   1161   bool isDoubleSpacedVectorList() const {
   1162     return Kind == k_VectorList && VectorList.isDoubleSpaced;
   1163   }
   1164   bool isVecListOneD() const {
   1165     if (!isSingleSpacedVectorList()) return false;
   1166     return VectorList.Count == 1;
   1167   }
   1168 
   1169   bool isVecListDPair() const {
   1170     if (!isSingleSpacedVectorList()) return false;
   1171     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
   1172               .contains(VectorList.RegNum));
   1173   }
   1174 
   1175   bool isVecListThreeD() const {
   1176     if (!isSingleSpacedVectorList()) return false;
   1177     return VectorList.Count == 3;
   1178   }
   1179 
   1180   bool isVecListFourD() const {
   1181     if (!isSingleSpacedVectorList()) return false;
   1182     return VectorList.Count == 4;
   1183   }
   1184 
   1185   bool isVecListDPairSpaced() const {
   1186     if (isSingleSpacedVectorList()) return false;
   1187     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
   1188               .contains(VectorList.RegNum));
   1189   }
   1190 
   1191   bool isVecListThreeQ() const {
   1192     if (!isDoubleSpacedVectorList()) return false;
   1193     return VectorList.Count == 3;
   1194   }
   1195 
   1196   bool isVecListFourQ() const {
   1197     if (!isDoubleSpacedVectorList()) return false;
   1198     return VectorList.Count == 4;
   1199   }
   1200 
   1201   bool isSingleSpacedVectorAllLanes() const {
   1202     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
   1203   }
   1204   bool isDoubleSpacedVectorAllLanes() const {
   1205     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
   1206   }
   1207   bool isVecListOneDAllLanes() const {
   1208     if (!isSingleSpacedVectorAllLanes()) return false;
   1209     return VectorList.Count == 1;
   1210   }
   1211 
   1212   bool isVecListDPairAllLanes() const {
   1213     if (!isSingleSpacedVectorAllLanes()) return false;
   1214     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
   1215               .contains(VectorList.RegNum));
   1216   }
   1217 
   1218   bool isVecListDPairSpacedAllLanes() const {
   1219     if (!isDoubleSpacedVectorAllLanes()) return false;
   1220     return VectorList.Count == 2;
   1221   }
   1222 
   1223   bool isVecListThreeDAllLanes() const {
   1224     if (!isSingleSpacedVectorAllLanes()) return false;
   1225     return VectorList.Count == 3;
   1226   }
   1227 
   1228   bool isVecListThreeQAllLanes() const {
   1229     if (!isDoubleSpacedVectorAllLanes()) return false;
   1230     return VectorList.Count == 3;
   1231   }
   1232 
   1233   bool isVecListFourDAllLanes() const {
   1234     if (!isSingleSpacedVectorAllLanes()) return false;
   1235     return VectorList.Count == 4;
   1236   }
   1237 
   1238   bool isVecListFourQAllLanes() const {
   1239     if (!isDoubleSpacedVectorAllLanes()) return false;
   1240     return VectorList.Count == 4;
   1241   }
   1242 
   1243   bool isSingleSpacedVectorIndexed() const {
   1244     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
   1245   }
   1246   bool isDoubleSpacedVectorIndexed() const {
   1247     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
   1248   }
   1249   bool isVecListOneDByteIndexed() const {
   1250     if (!isSingleSpacedVectorIndexed()) return false;
   1251     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
   1252   }
   1253 
   1254   bool isVecListOneDHWordIndexed() const {
   1255     if (!isSingleSpacedVectorIndexed()) return false;
   1256     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
   1257   }
   1258 
   1259   bool isVecListOneDWordIndexed() const {
   1260     if (!isSingleSpacedVectorIndexed()) return false;
   1261     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
   1262   }
   1263 
   1264   bool isVecListTwoDByteIndexed() const {
   1265     if (!isSingleSpacedVectorIndexed()) return false;
   1266     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
   1267   }
   1268 
   1269   bool isVecListTwoDHWordIndexed() const {
   1270     if (!isSingleSpacedVectorIndexed()) return false;
   1271     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
   1272   }
   1273 
   1274   bool isVecListTwoQWordIndexed() const {
   1275     if (!isDoubleSpacedVectorIndexed()) return false;
   1276     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
   1277   }
   1278 
   1279   bool isVecListTwoQHWordIndexed() const {
   1280     if (!isDoubleSpacedVectorIndexed()) return false;
   1281     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
   1282   }
   1283 
   1284   bool isVecListTwoDWordIndexed() const {
   1285     if (!isSingleSpacedVectorIndexed()) return false;
   1286     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
   1287   }
   1288 
   1289   bool isVecListThreeDByteIndexed() const {
   1290     if (!isSingleSpacedVectorIndexed()) return false;
   1291     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
   1292   }
   1293 
   1294   bool isVecListThreeDHWordIndexed() const {
   1295     if (!isSingleSpacedVectorIndexed()) return false;
   1296     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
   1297   }
   1298 
   1299   bool isVecListThreeQWordIndexed() const {
   1300     if (!isDoubleSpacedVectorIndexed()) return false;
   1301     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
   1302   }
   1303 
   1304   bool isVecListThreeQHWordIndexed() const {
   1305     if (!isDoubleSpacedVectorIndexed()) return false;
   1306     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
   1307   }
   1308 
   1309   bool isVecListThreeDWordIndexed() const {
   1310     if (!isSingleSpacedVectorIndexed()) return false;
   1311     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
   1312   }
   1313 
   1314   bool isVecListFourDByteIndexed() const {
   1315     if (!isSingleSpacedVectorIndexed()) return false;
   1316     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
   1317   }
   1318 
   1319   bool isVecListFourDHWordIndexed() const {
   1320     if (!isSingleSpacedVectorIndexed()) return false;
   1321     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
   1322   }
   1323 
   1324   bool isVecListFourQWordIndexed() const {
   1325     if (!isDoubleSpacedVectorIndexed()) return false;
   1326     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
   1327   }
   1328 
   1329   bool isVecListFourQHWordIndexed() const {
   1330     if (!isDoubleSpacedVectorIndexed()) return false;
   1331     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
   1332   }
   1333 
   1334   bool isVecListFourDWordIndexed() const {
   1335     if (!isSingleSpacedVectorIndexed()) return false;
   1336     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
   1337   }
   1338 
   1339   bool isVectorIndex8() const {
   1340     if (Kind != k_VectorIndex) return false;
   1341     return VectorIndex.Val < 8;
   1342   }
   1343   bool isVectorIndex16() const {
   1344     if (Kind != k_VectorIndex) return false;
   1345     return VectorIndex.Val < 4;
   1346   }
   1347   bool isVectorIndex32() const {
   1348     if (Kind != k_VectorIndex) return false;
   1349     return VectorIndex.Val < 2;
   1350   }
   1351 
   1352   bool isNEONi8splat() const {
   1353     if (!isImm()) return false;
   1354     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1355     // Must be a constant.
   1356     if (!CE) return false;
   1357     int64_t Value = CE->getValue();
   1358     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
   1359     // value.
   1360     return Value >= 0 && Value < 256;
   1361   }
   1362 
   1363   bool isNEONi16splat() const {
   1364     if (!isImm()) return false;
   1365     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1366     // Must be a constant.
   1367     if (!CE) return false;
   1368     int64_t Value = CE->getValue();
   1369     // i16 value in the range [0,255] or [0x0100, 0xff00]
   1370     return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
   1371   }
   1372 
   1373   bool isNEONi32splat() const {
   1374     if (!isImm()) return false;
   1375     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1376     // Must be a constant.
   1377     if (!CE) return false;
   1378     int64_t Value = CE->getValue();
   1379     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
   1380     return (Value >= 0 && Value < 256) ||
   1381       (Value >= 0x0100 && Value <= 0xff00) ||
   1382       (Value >= 0x010000 && Value <= 0xff0000) ||
   1383       (Value >= 0x01000000 && Value <= 0xff000000);
   1384   }
   1385 
   1386   bool isNEONi32vmov() const {
   1387     if (!isImm()) return false;
   1388     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1389     // Must be a constant.
   1390     if (!CE) return false;
   1391     int64_t Value = CE->getValue();
   1392     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
   1393     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
   1394     return (Value >= 0 && Value < 256) ||
   1395       (Value >= 0x0100 && Value <= 0xff00) ||
   1396       (Value >= 0x010000 && Value <= 0xff0000) ||
   1397       (Value >= 0x01000000 && Value <= 0xff000000) ||
   1398       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
   1399       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
   1400   }
   1401   bool isNEONi32vmovNeg() const {
   1402     if (!isImm()) return false;
   1403     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1404     // Must be a constant.
   1405     if (!CE) return false;
   1406     int64_t Value = ~CE->getValue();
   1407     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
   1408     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
   1409     return (Value >= 0 && Value < 256) ||
   1410       (Value >= 0x0100 && Value <= 0xff00) ||
   1411       (Value >= 0x010000 && Value <= 0xff0000) ||
   1412       (Value >= 0x01000000 && Value <= 0xff000000) ||
   1413       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
   1414       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
   1415   }
   1416 
   1417   bool isNEONi64splat() const {
   1418     if (!isImm()) return false;
   1419     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1420     // Must be a constant.
   1421     if (!CE) return false;
   1422     uint64_t Value = CE->getValue();
   1423     // i64 value with each byte being either 0 or 0xff.
   1424     for (unsigned i = 0; i < 8; ++i)
   1425       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
   1426     return true;
   1427   }
   1428 
   1429   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
   1430     // Add as immediates when possible.  Null MCExpr = 0.
   1431     if (Expr == 0)
   1432       Inst.addOperand(MCOperand::CreateImm(0));
   1433     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
   1434       Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
   1435     else
   1436       Inst.addOperand(MCOperand::CreateExpr(Expr));
   1437   }
   1438 
   1439   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
   1440     assert(N == 2 && "Invalid number of operands!");
   1441     Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
   1442     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
   1443     Inst.addOperand(MCOperand::CreateReg(RegNum));
   1444   }
   1445 
   1446   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
   1447     assert(N == 1 && "Invalid number of operands!");
   1448     Inst.addOperand(MCOperand::CreateImm(getCoproc()));
   1449   }
   1450 
   1451   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
   1452     assert(N == 1 && "Invalid number of operands!");
   1453     Inst.addOperand(MCOperand::CreateImm(getCoproc()));
   1454   }
   1455 
   1456   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
   1457     assert(N == 1 && "Invalid number of operands!");
   1458     Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
   1459   }
   1460 
   1461   void addITMaskOperands(MCInst &Inst, unsigned N) const {
   1462     assert(N == 1 && "Invalid number of operands!");
   1463     Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
   1464   }
   1465 
   1466   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
   1467     assert(N == 1 && "Invalid number of operands!");
   1468     Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
   1469   }
   1470 
   1471   void addCCOutOperands(MCInst &Inst, unsigned N) const {
   1472     assert(N == 1 && "Invalid number of operands!");
   1473     Inst.addOperand(MCOperand::CreateReg(getReg()));
   1474   }
   1475 
   1476   void addRegOperands(MCInst &Inst, unsigned N) const {
   1477     assert(N == 1 && "Invalid number of operands!");
   1478     Inst.addOperand(MCOperand::CreateReg(getReg()));
   1479   }
   1480 
   1481   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
   1482     assert(N == 3 && "Invalid number of operands!");
   1483     assert(isRegShiftedReg() &&
   1484            "addRegShiftedRegOperands() on non RegShiftedReg!");
   1485     Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
   1486     Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
   1487     Inst.addOperand(MCOperand::CreateImm(
   1488       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
   1489   }
   1490 
   1491   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
   1492     assert(N == 2 && "Invalid number of operands!");
   1493     assert(isRegShiftedImm() &&
   1494            "addRegShiftedImmOperands() on non RegShiftedImm!");
   1495     Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
   1496     // Shift of #32 is encoded as 0 where permitted
   1497     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
   1498     Inst.addOperand(MCOperand::CreateImm(
   1499       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
   1500   }
   1501 
   1502   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
   1503     assert(N == 1 && "Invalid number of operands!");
   1504     Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
   1505                                          ShifterImm.Imm));
   1506   }
   1507 
   1508   void addRegListOperands(MCInst &Inst, unsigned N) const {
   1509     assert(N == 1 && "Invalid number of operands!");
   1510     const SmallVectorImpl<unsigned> &RegList = getRegList();
   1511     for (SmallVectorImpl<unsigned>::const_iterator
   1512            I = RegList.begin(), E = RegList.end(); I != E; ++I)
   1513       Inst.addOperand(MCOperand::CreateReg(*I));
   1514   }
   1515 
   1516   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
   1517     addRegListOperands(Inst, N);
   1518   }
   1519 
   1520   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
   1521     addRegListOperands(Inst, N);
   1522   }
   1523 
   1524   void addRotImmOperands(MCInst &Inst, unsigned N) const {
   1525     assert(N == 1 && "Invalid number of operands!");
   1526     // Encoded as val>>3. The printer handles display as 8, 16, 24.
   1527     Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
   1528   }
   1529 
   1530   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
   1531     assert(N == 1 && "Invalid number of operands!");
   1532     // Munge the lsb/width into a bitfield mask.
   1533     unsigned lsb = Bitfield.LSB;
   1534     unsigned width = Bitfield.Width;
   1535     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
   1536     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
   1537                       (32 - (lsb + width)));
   1538     Inst.addOperand(MCOperand::CreateImm(Mask));
   1539   }
   1540 
   1541   void addImmOperands(MCInst &Inst, unsigned N) const {
   1542     assert(N == 1 && "Invalid number of operands!");
   1543     addExpr(Inst, getImm());
   1544   }
   1545 
   1546   void addFBits16Operands(MCInst &Inst, unsigned N) const {
   1547     assert(N == 1 && "Invalid number of operands!");
   1548     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1549     Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
   1550   }
   1551 
   1552   void addFBits32Operands(MCInst &Inst, unsigned N) const {
   1553     assert(N == 1 && "Invalid number of operands!");
   1554     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1555     Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
   1556   }
   1557 
   1558   void addFPImmOperands(MCInst &Inst, unsigned N) const {
   1559     assert(N == 1 && "Invalid number of operands!");
   1560     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1561     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
   1562     Inst.addOperand(MCOperand::CreateImm(Val));
   1563   }
   1564 
   1565   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
   1566     assert(N == 1 && "Invalid number of operands!");
   1567     // FIXME: We really want to scale the value here, but the LDRD/STRD
   1568     // instruction don't encode operands that way yet.
   1569     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1570     Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
   1571   }
   1572 
   1573   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
   1574     assert(N == 1 && "Invalid number of operands!");
   1575     // The immediate is scaled by four in the encoding and is stored
   1576     // in the MCInst as such. Lop off the low two bits here.
   1577     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1578     Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
   1579   }
   1580 
   1581   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
   1582     assert(N == 1 && "Invalid number of operands!");
   1583     // The immediate is scaled by four in the encoding and is stored
   1584     // in the MCInst as such. Lop off the low two bits here.
   1585     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1586     Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
   1587   }
   1588 
   1589   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
   1590     assert(N == 1 && "Invalid number of operands!");
   1591     // The immediate is scaled by four in the encoding and is stored
   1592     // in the MCInst as such. Lop off the low two bits here.
   1593     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1594     Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
   1595   }
   1596 
   1597   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
   1598     assert(N == 1 && "Invalid number of operands!");
   1599     // The constant encodes as the immediate-1, and we store in the instruction
   1600     // the bits as encoded, so subtract off one here.
   1601     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1602     Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
   1603   }
   1604 
   1605   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
   1606     assert(N == 1 && "Invalid number of operands!");
   1607     // The constant encodes as the immediate-1, and we store in the instruction
   1608     // the bits as encoded, so subtract off one here.
   1609     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1610     Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
   1611   }
   1612 
   1613   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
   1614     assert(N == 1 && "Invalid number of operands!");
   1615     // The constant encodes as the immediate, except for 32, which encodes as
   1616     // zero.
   1617     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1618     unsigned Imm = CE->getValue();
   1619     Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
   1620   }
   1621 
   1622   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
   1623     assert(N == 1 && "Invalid number of operands!");
   1624     // An ASR value of 32 encodes as 0, so that's how we want to add it to
   1625     // the instruction as well.
   1626     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1627     int Val = CE->getValue();
   1628     Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
   1629   }
   1630 
   1631   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
   1632     assert(N == 1 && "Invalid number of operands!");
   1633     // The operand is actually a t2_so_imm, but we have its bitwise
   1634     // negation in the assembly source, so twiddle it here.
   1635     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1636     Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
   1637   }
   1638 
   1639   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
   1640     assert(N == 1 && "Invalid number of operands!");
   1641     // The operand is actually a t2_so_imm, but we have its
   1642     // negation in the assembly source, so twiddle it here.
   1643     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1644     Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
   1645   }
   1646 
   1647   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
   1648     assert(N == 1 && "Invalid number of operands!");
   1649     // The operand is actually an imm0_4095, but we have its
   1650     // negation in the assembly source, so twiddle it here.
   1651     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1652     Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
   1653   }
   1654 
   1655   void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
   1656     assert(N == 1 && "Invalid number of operands!");
   1657     // The operand is actually a so_imm, but we have its bitwise
   1658     // negation in the assembly source, so twiddle it here.
   1659     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1660     Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
   1661   }
   1662 
   1663   void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
   1664     assert(N == 1 && "Invalid number of operands!");
   1665     // The operand is actually a so_imm, but we have its
   1666     // negation in the assembly source, so twiddle it here.
   1667     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1668     Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
   1669   }
   1670 
   1671   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
   1672     assert(N == 1 && "Invalid number of operands!");
   1673     Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
   1674   }
   1675 
   1676   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
   1677     assert(N == 1 && "Invalid number of operands!");
   1678     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1679   }
   1680 
   1681   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
   1682     assert(N == 1 && "Invalid number of operands!");
   1683     int32_t Imm = Memory.OffsetImm->getValue();
   1684     // FIXME: Handle #-0
   1685     if (Imm == INT32_MIN) Imm = 0;
   1686     Inst.addOperand(MCOperand::CreateImm(Imm));
   1687   }
   1688 
   1689   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
   1690     assert(N == 1 && "Invalid number of operands!");
   1691     assert(isImm() && "Not an immediate!");
   1692 
   1693     // If we have an immediate that's not a constant, treat it as a label
   1694     // reference needing a fixup.
   1695     if (!isa<MCConstantExpr>(getImm())) {
   1696       Inst.addOperand(MCOperand::CreateExpr(getImm()));
   1697       return;
   1698     }
   1699 
   1700     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1701     int Val = CE->getValue();
   1702     Inst.addOperand(MCOperand::CreateImm(Val));
   1703   }
   1704 
   1705   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
   1706     assert(N == 2 && "Invalid number of operands!");
   1707     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1708     Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
   1709   }
   1710 
   1711   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
   1712     assert(N == 3 && "Invalid number of operands!");
   1713     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
   1714     if (!Memory.OffsetRegNum) {
   1715       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
   1716       // Special case for #-0
   1717       if (Val == INT32_MIN) Val = 0;
   1718       if (Val < 0) Val = -Val;
   1719       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
   1720     } else {
   1721       // For register offset, we encode the shift type and negation flag
   1722       // here.
   1723       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
   1724                               Memory.ShiftImm, Memory.ShiftType);
   1725     }
   1726     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1727     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1728     Inst.addOperand(MCOperand::CreateImm(Val));
   1729   }
   1730 
   1731   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
   1732     assert(N == 2 && "Invalid number of operands!");
   1733     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1734     assert(CE && "non-constant AM2OffsetImm operand!");
   1735     int32_t Val = CE->getValue();
   1736     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
   1737     // Special case for #-0
   1738     if (Val == INT32_MIN) Val = 0;
   1739     if (Val < 0) Val = -Val;
   1740     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
   1741     Inst.addOperand(MCOperand::CreateReg(0));
   1742     Inst.addOperand(MCOperand::CreateImm(Val));
   1743   }
   1744 
   1745   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
   1746     assert(N == 3 && "Invalid number of operands!");
   1747     // If we have an immediate that's not a constant, treat it as a label
   1748     // reference needing a fixup. If it is a constant, it's something else
   1749     // and we reject it.
   1750     if (isImm()) {
   1751       Inst.addOperand(MCOperand::CreateExpr(getImm()));
   1752       Inst.addOperand(MCOperand::CreateReg(0));
   1753       Inst.addOperand(MCOperand::CreateImm(0));
   1754       return;
   1755     }
   1756 
   1757     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
   1758     if (!Memory.OffsetRegNum) {
   1759       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
   1760       // Special case for #-0
   1761       if (Val == INT32_MIN) Val = 0;
   1762       if (Val < 0) Val = -Val;
   1763       Val = ARM_AM::getAM3Opc(AddSub, Val);
   1764     } else {
   1765       // For register offset, we encode the shift type and negation flag
   1766       // here.
   1767       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
   1768     }
   1769     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1770     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1771     Inst.addOperand(MCOperand::CreateImm(Val));
   1772   }
   1773 
   1774   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
   1775     assert(N == 2 && "Invalid number of operands!");
   1776     if (Kind == k_PostIndexRegister) {
   1777       int32_t Val =
   1778         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
   1779       Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
   1780       Inst.addOperand(MCOperand::CreateImm(Val));
   1781       return;
   1782     }
   1783 
   1784     // Constant offset.
   1785     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
   1786     int32_t Val = CE->getValue();
   1787     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
   1788     // Special case for #-0
   1789     if (Val == INT32_MIN) Val = 0;
   1790     if (Val < 0) Val = -Val;
   1791     Val = ARM_AM::getAM3Opc(AddSub, Val);
   1792     Inst.addOperand(MCOperand::CreateReg(0));
   1793     Inst.addOperand(MCOperand::CreateImm(Val));
   1794   }
   1795 
   1796   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
   1797     assert(N == 2 && "Invalid number of operands!");
   1798     // If we have an immediate that's not a constant, treat it as a label
   1799     // reference needing a fixup. If it is a constant, it's something else
   1800     // and we reject it.
   1801     if (isImm()) {
   1802       Inst.addOperand(MCOperand::CreateExpr(getImm()));
   1803       Inst.addOperand(MCOperand::CreateImm(0));
   1804       return;
   1805     }
   1806 
   1807     // The lower two bits are always zero and as such are not encoded.
   1808     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
   1809     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
   1810     // Special case for #-0
   1811     if (Val == INT32_MIN) Val = 0;
   1812     if (Val < 0) Val = -Val;
   1813     Val = ARM_AM::getAM5Opc(AddSub, Val);
   1814     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1815     Inst.addOperand(MCOperand::CreateImm(Val));
   1816   }
   1817 
   1818   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
   1819     assert(N == 2 && "Invalid number of operands!");
   1820     // If we have an immediate that's not a constant, treat it as a label
   1821     // reference needing a fixup. If it is a constant, it's something else
   1822     // and we reject it.
   1823     if (isImm()) {
   1824       Inst.addOperand(MCOperand::CreateExpr(getImm()));
   1825       Inst.addOperand(MCOperand::CreateImm(0));
   1826       return;
   1827     }
   1828 
   1829     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
   1830     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1831     Inst.addOperand(MCOperand::CreateImm(Val));
   1832   }
   1833 
   1834   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
   1835     assert(N == 2 && "Invalid number of operands!");
   1836     // The lower two bits are always zero and as such are not encoded.
   1837     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
   1838     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1839     Inst.addOperand(MCOperand::CreateImm(Val));
   1840   }
   1841 
   1842   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
   1843     assert(N == 2 && "Invalid number of operands!");
   1844     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
   1845     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1846     Inst.addOperand(MCOperand::CreateImm(Val));
   1847   }
   1848 
   1849   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
   1850     addMemImm8OffsetOperands(Inst, N);
   1851   }
   1852 
   1853   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
   1854     addMemImm8OffsetOperands(Inst, N);
   1855   }
   1856 
   1857   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
   1858     assert(N == 2 && "Invalid number of operands!");
   1859     // If this is an immediate, it's a label reference.
   1860     if (isImm()) {
   1861       addExpr(Inst, getImm());
   1862       Inst.addOperand(MCOperand::CreateImm(0));
   1863       return;
   1864     }
   1865 
   1866     // Otherwise, it's a normal memory reg+offset.
   1867     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
   1868     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1869     Inst.addOperand(MCOperand::CreateImm(Val));
   1870   }
   1871 
   1872   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
   1873     assert(N == 2 && "Invalid number of operands!");
   1874     // If this is an immediate, it's a label reference.
   1875     if (isImm()) {
   1876       addExpr(Inst, getImm());
   1877       Inst.addOperand(MCOperand::CreateImm(0));
   1878       return;
   1879     }
   1880 
   1881     // Otherwise, it's a normal memory reg+offset.
   1882     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
   1883     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1884     Inst.addOperand(MCOperand::CreateImm(Val));
   1885   }
   1886 
   1887   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
   1888     assert(N == 2 && "Invalid number of operands!");
   1889     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1890     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1891   }
   1892 
   1893   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
   1894     assert(N == 2 && "Invalid number of operands!");
   1895     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1896     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1897   }
   1898 
   1899   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
   1900     assert(N == 3 && "Invalid number of operands!");
   1901     unsigned Val =
   1902       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
   1903                         Memory.ShiftImm, Memory.ShiftType);
   1904     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1905     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1906     Inst.addOperand(MCOperand::CreateImm(Val));
   1907   }
   1908 
   1909   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
   1910     assert(N == 3 && "Invalid number of operands!");
   1911     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1912     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1913     Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
   1914   }
   1915 
   1916   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
   1917     assert(N == 2 && "Invalid number of operands!");
   1918     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1919     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
   1920   }
   1921 
   1922   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
   1923     assert(N == 2 && "Invalid number of operands!");
   1924     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
   1925     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1926     Inst.addOperand(MCOperand::CreateImm(Val));
   1927   }
   1928 
   1929   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
   1930     assert(N == 2 && "Invalid number of operands!");
   1931     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
   1932     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1933     Inst.addOperand(MCOperand::CreateImm(Val));
   1934   }
   1935 
   1936   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
   1937     assert(N == 2 && "Invalid number of operands!");
   1938     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
   1939     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1940     Inst.addOperand(MCOperand::CreateImm(Val));
   1941   }
   1942 
   1943   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
   1944     assert(N == 2 && "Invalid number of operands!");
   1945     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
   1946     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
   1947     Inst.addOperand(MCOperand::CreateImm(Val));
   1948   }
   1949 
   1950   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
   1951     assert(N == 1 && "Invalid number of operands!");
   1952     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1953     assert(CE && "non-constant post-idx-imm8 operand!");
   1954     int Imm = CE->getValue();
   1955     bool isAdd = Imm >= 0;
   1956     if (Imm == INT32_MIN) Imm = 0;
   1957     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
   1958     Inst.addOperand(MCOperand::CreateImm(Imm));
   1959   }
   1960 
   1961   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
   1962     assert(N == 1 && "Invalid number of operands!");
   1963     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   1964     assert(CE && "non-constant post-idx-imm8s4 operand!");
   1965     int Imm = CE->getValue();
   1966     bool isAdd = Imm >= 0;
   1967     if (Imm == INT32_MIN) Imm = 0;
   1968     // Immediate is scaled by 4.
   1969     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
   1970     Inst.addOperand(MCOperand::CreateImm(Imm));
   1971   }
   1972 
   1973   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
   1974     assert(N == 2 && "Invalid number of operands!");
   1975     Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
   1976     Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
   1977   }
   1978 
   1979   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
   1980     assert(N == 2 && "Invalid number of operands!");
   1981     Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
   1982     // The sign, shift type, and shift amount are encoded in a single operand
   1983     // using the AM2 encoding helpers.
   1984     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
   1985     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
   1986                                      PostIdxReg.ShiftTy);
   1987     Inst.addOperand(MCOperand::CreateImm(Imm));
   1988   }
   1989 
   1990   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
   1991     assert(N == 1 && "Invalid number of operands!");
   1992     Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
   1993   }
   1994 
   1995   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
   1996     assert(N == 1 && "Invalid number of operands!");
   1997     Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
   1998   }
   1999 
   2000   void addVecListOperands(MCInst &Inst, unsigned N) const {
   2001     assert(N == 1 && "Invalid number of operands!");
   2002     Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
   2003   }
   2004 
   2005   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
   2006     assert(N == 2 && "Invalid number of operands!");
   2007     Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
   2008     Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
   2009   }
   2010 
   2011   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
   2012     assert(N == 1 && "Invalid number of operands!");
   2013     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
   2014   }
   2015 
   2016   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
   2017     assert(N == 1 && "Invalid number of operands!");
   2018     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
   2019   }
   2020 
   2021   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
   2022     assert(N == 1 && "Invalid number of operands!");
   2023     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
   2024   }
   2025 
   2026   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
   2027     assert(N == 1 && "Invalid number of operands!");
   2028     // The immediate encodes the type of constant as well as the value.
   2029     // Mask in that this is an i8 splat.
   2030     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   2031     Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
   2032   }
   2033 
   2034   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
   2035     assert(N == 1 && "Invalid number of operands!");
   2036     // The immediate encodes the type of constant as well as the value.
   2037     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   2038     unsigned Value = CE->getValue();
   2039     if (Value >= 256)
   2040       Value = (Value >> 8) | 0xa00;
   2041     else
   2042       Value |= 0x800;
   2043     Inst.addOperand(MCOperand::CreateImm(Value));
   2044   }
   2045 
   2046   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
   2047     assert(N == 1 && "Invalid number of operands!");
   2048     // The immediate encodes the type of constant as well as the value.
   2049     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   2050     unsigned Value = CE->getValue();
   2051     if (Value >= 256 && Value <= 0xff00)
   2052       Value = (Value >> 8) | 0x200;
   2053     else if (Value > 0xffff && Value <= 0xff0000)
   2054       Value = (Value >> 16) | 0x400;
   2055     else if (Value > 0xffffff)
   2056       Value = (Value >> 24) | 0x600;
   2057     Inst.addOperand(MCOperand::CreateImm(Value));
   2058   }
   2059 
   2060   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
   2061     assert(N == 1 && "Invalid number of operands!");
   2062     // The immediate encodes the type of constant as well as the value.
   2063     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   2064     unsigned Value = CE->getValue();
   2065     if (Value >= 256 && Value <= 0xffff)
   2066       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
   2067     else if (Value > 0xffff && Value <= 0xffffff)
   2068       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
   2069     else if (Value > 0xffffff)
   2070       Value = (Value >> 24) | 0x600;
   2071     Inst.addOperand(MCOperand::CreateImm(Value));
   2072   }
   2073 
   2074   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
   2075     assert(N == 1 && "Invalid number of operands!");
   2076     // The immediate encodes the type of constant as well as the value.
   2077     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   2078     unsigned Value = ~CE->getValue();
   2079     if (Value >= 256 && Value <= 0xffff)
   2080       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
   2081     else if (Value > 0xffff && Value <= 0xffffff)
   2082       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
   2083     else if (Value > 0xffffff)
   2084       Value = (Value >> 24) | 0x600;
   2085     Inst.addOperand(MCOperand::CreateImm(Value));
   2086   }
   2087 
   2088   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
   2089     assert(N == 1 && "Invalid number of operands!");
   2090     // The immediate encodes the type of constant as well as the value.
   2091     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
   2092     uint64_t Value = CE->getValue();
   2093     unsigned Imm = 0;
   2094     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
   2095       Imm |= (Value & 1) << i;
   2096     }
   2097     Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
   2098   }
   2099 
   2100   virtual void print(raw_ostream &OS) const;
   2101 
   2102   static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
   2103     ARMOperand *Op = new ARMOperand(k_ITCondMask);
   2104     Op->ITMask.Mask = Mask;
   2105     Op->StartLoc = S;
   2106     Op->EndLoc = S;
   2107     return Op;
   2108   }
   2109 
   2110   static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
   2111     ARMOperand *Op = new ARMOperand(k_CondCode);
   2112     Op->CC.Val = CC;
   2113     Op->StartLoc = S;
   2114     Op->EndLoc = S;
   2115     return Op;
   2116   }
   2117 
   2118   static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
   2119     ARMOperand *Op = new ARMOperand(k_CoprocNum);
   2120     Op->Cop.Val = CopVal;
   2121     Op->StartLoc = S;
   2122     Op->EndLoc = S;
   2123     return Op;
   2124   }
   2125 
   2126   static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
   2127     ARMOperand *Op = new ARMOperand(k_CoprocReg);
   2128     Op->Cop.Val = CopVal;
   2129     Op->StartLoc = S;
   2130     Op->EndLoc = S;
   2131     return Op;
   2132   }
   2133 
   2134   static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
   2135     ARMOperand *Op = new ARMOperand(k_CoprocOption);
   2136     Op->Cop.Val = Val;
   2137     Op->StartLoc = S;
   2138     Op->EndLoc = E;
   2139     return Op;
   2140   }
   2141 
   2142   static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
   2143     ARMOperand *Op = new ARMOperand(k_CCOut);
   2144     Op->Reg.RegNum = RegNum;
   2145     Op->StartLoc = S;
   2146     Op->EndLoc = S;
   2147     return Op;
   2148   }
   2149 
   2150   static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
   2151     ARMOperand *Op = new ARMOperand(k_Token);
   2152     Op->Tok.Data = Str.data();
   2153     Op->Tok.Length = Str.size();
   2154     Op->StartLoc = S;
   2155     Op->EndLoc = S;
   2156     return Op;
   2157   }
   2158 
   2159   static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
   2160     ARMOperand *Op = new ARMOperand(k_Register);
   2161     Op->Reg.RegNum = RegNum;
   2162     Op->StartLoc = S;
   2163     Op->EndLoc = E;
   2164     return Op;
   2165   }
   2166 
   2167   static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
   2168                                            unsigned SrcReg,
   2169                                            unsigned ShiftReg,
   2170                                            unsigned ShiftImm,
   2171                                            SMLoc S, SMLoc E) {
   2172     ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
   2173     Op->RegShiftedReg.ShiftTy = ShTy;
   2174     Op->RegShiftedReg.SrcReg = SrcReg;
   2175     Op->RegShiftedReg.ShiftReg = ShiftReg;
   2176     Op->RegShiftedReg.ShiftImm = ShiftImm;
   2177     Op->StartLoc = S;
   2178     Op->EndLoc = E;
   2179     return Op;
   2180   }
   2181 
   2182   static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
   2183                                             unsigned SrcReg,
   2184                                             unsigned ShiftImm,
   2185                                             SMLoc S, SMLoc E) {
   2186     ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
   2187     Op->RegShiftedImm.ShiftTy = ShTy;
   2188     Op->RegShiftedImm.SrcReg = SrcReg;
   2189     Op->RegShiftedImm.ShiftImm = ShiftImm;
   2190     Op->StartLoc = S;
   2191     Op->EndLoc = E;
   2192     return Op;
   2193   }
   2194 
   2195   static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
   2196                                    SMLoc S, SMLoc E) {
   2197     ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
   2198     Op->ShifterImm.isASR = isASR;
   2199     Op->ShifterImm.Imm = Imm;
   2200     Op->StartLoc = S;
   2201     Op->EndLoc = E;
   2202     return Op;
   2203   }
   2204 
   2205   static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
   2206     ARMOperand *Op = new ARMOperand(k_RotateImmediate);
   2207     Op->RotImm.Imm = Imm;
   2208     Op->StartLoc = S;
   2209     Op->EndLoc = E;
   2210     return Op;
   2211   }
   2212 
   2213   static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
   2214                                     SMLoc S, SMLoc E) {
   2215     ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
   2216     Op->Bitfield.LSB = LSB;
   2217     Op->Bitfield.Width = Width;
   2218     Op->StartLoc = S;
   2219     Op->EndLoc = E;
   2220     return Op;
   2221   }
   2222 
   2223   static ARMOperand *
   2224   CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
   2225                 SMLoc StartLoc, SMLoc EndLoc) {
   2226     KindTy Kind = k_RegisterList;
   2227 
   2228     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
   2229       Kind = k_DPRRegisterList;
   2230     else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
   2231              contains(Regs.front().first))
   2232       Kind = k_SPRRegisterList;
   2233 
   2234     ARMOperand *Op = new ARMOperand(Kind);
   2235     for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
   2236            I = Regs.begin(), E = Regs.end(); I != E; ++I)
   2237       Op->Registers.push_back(I->first);
   2238     array_pod_sort(Op->Registers.begin(), Op->Registers.end());
   2239     Op->StartLoc = StartLoc;
   2240     Op->EndLoc = EndLoc;
   2241     return Op;
   2242   }
   2243 
   2244   static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
   2245                                       bool isDoubleSpaced, SMLoc S, SMLoc E) {
   2246     ARMOperand *Op = new ARMOperand(k_VectorList);
   2247     Op->VectorList.RegNum = RegNum;
   2248     Op->VectorList.Count = Count;
   2249     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
   2250     Op->StartLoc = S;
   2251     Op->EndLoc = E;
   2252     return Op;
   2253   }
   2254 
   2255   static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
   2256                                               bool isDoubleSpaced,
   2257                                               SMLoc S, SMLoc E) {
   2258     ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
   2259     Op->VectorList.RegNum = RegNum;
   2260     Op->VectorList.Count = Count;
   2261     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
   2262     Op->StartLoc = S;
   2263     Op->EndLoc = E;
   2264     return Op;
   2265   }
   2266 
   2267   static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
   2268                                              unsigned Index,
   2269                                              bool isDoubleSpaced,
   2270                                              SMLoc S, SMLoc E) {
   2271     ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
   2272     Op->VectorList.RegNum = RegNum;
   2273     Op->VectorList.Count = Count;
   2274     Op->VectorList.LaneIndex = Index;
   2275     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
   2276     Op->StartLoc = S;
   2277     Op->EndLoc = E;
   2278     return Op;
   2279   }
   2280 
   2281   static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
   2282                                        MCContext &Ctx) {
   2283     ARMOperand *Op = new ARMOperand(k_VectorIndex);
   2284     Op->VectorIndex.Val = Idx;
   2285     Op->StartLoc = S;
   2286     Op->EndLoc = E;
   2287     return Op;
   2288   }
   2289 
   2290   static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
   2291     ARMOperand *Op = new ARMOperand(k_Immediate);
   2292     Op->Imm.Val = Val;
   2293     Op->StartLoc = S;
   2294     Op->EndLoc = E;
   2295     return Op;
   2296   }
   2297 
   2298   static ARMOperand *CreateMem(unsigned BaseRegNum,
   2299                                const MCConstantExpr *OffsetImm,
   2300                                unsigned OffsetRegNum,
   2301                                ARM_AM::ShiftOpc ShiftType,
   2302                                unsigned ShiftImm,
   2303                                unsigned Alignment,
   2304                                bool isNegative,
   2305                                SMLoc S, SMLoc E) {
   2306     ARMOperand *Op = new ARMOperand(k_Memory);
   2307     Op->Memory.BaseRegNum = BaseRegNum;
   2308     Op->Memory.OffsetImm = OffsetImm;
   2309     Op->Memory.OffsetRegNum = OffsetRegNum;
   2310     Op->Memory.ShiftType = ShiftType;
   2311     Op->Memory.ShiftImm = ShiftImm;
   2312     Op->Memory.Alignment = Alignment;
   2313     Op->Memory.isNegative = isNegative;
   2314     Op->StartLoc = S;
   2315     Op->EndLoc = E;
   2316     return Op;
   2317   }
   2318 
   2319   static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
   2320                                       ARM_AM::ShiftOpc ShiftTy,
   2321                                       unsigned ShiftImm,
   2322                                       SMLoc S, SMLoc E) {
   2323     ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
   2324     Op->PostIdxReg.RegNum = RegNum;
   2325     Op->PostIdxReg.isAdd = isAdd;
   2326     Op->PostIdxReg.ShiftTy = ShiftTy;
   2327     Op->PostIdxReg.ShiftImm = ShiftImm;
   2328     Op->StartLoc = S;
   2329     Op->EndLoc = E;
   2330     return Op;
   2331   }
   2332 
   2333   static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
   2334     ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
   2335     Op->MBOpt.Val = Opt;
   2336     Op->StartLoc = S;
   2337     Op->EndLoc = S;
   2338     return Op;
   2339   }
   2340 
   2341   static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
   2342     ARMOperand *Op = new ARMOperand(k_ProcIFlags);
   2343     Op->IFlags.Val = IFlags;
   2344     Op->StartLoc = S;
   2345     Op->EndLoc = S;
   2346     return Op;
   2347   }
   2348 
   2349   static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
   2350     ARMOperand *Op = new ARMOperand(k_MSRMask);
   2351     Op->MMask.Val = MMask;
   2352     Op->StartLoc = S;
   2353     Op->EndLoc = S;
   2354     return Op;
   2355   }
   2356 };
   2357 
   2358 } // end anonymous namespace.
   2359 
   2360 void ARMOperand::print(raw_ostream &OS) const {
   2361   switch (Kind) {
   2362   case k_CondCode:
   2363     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
   2364     break;
   2365   case k_CCOut:
   2366     OS << "<ccout " << getReg() << ">";
   2367     break;
   2368   case k_ITCondMask: {
   2369     static const char *const MaskStr[] = {
   2370       "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
   2371       "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
   2372     };
   2373     assert((ITMask.Mask & 0xf) == ITMask.Mask);
   2374     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
   2375     break;
   2376   }
   2377   case k_CoprocNum:
   2378     OS << "<coprocessor number: " << getCoproc() << ">";
   2379     break;
   2380   case k_CoprocReg:
   2381     OS << "<coprocessor register: " << getCoproc() << ">";
   2382     break;
   2383   case k_CoprocOption:
   2384     OS << "<coprocessor option: " << CoprocOption.Val << ">";
   2385     break;
   2386   case k_MSRMask:
   2387     OS << "<mask: " << getMSRMask() << ">";
   2388     break;
   2389   case k_Immediate:
   2390     getImm()->print(OS);
   2391     break;
   2392   case k_MemBarrierOpt:
   2393     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
   2394     break;
   2395   case k_Memory:
   2396     OS << "<memory "
   2397        << " base:" << Memory.BaseRegNum;
   2398     OS << ">";
   2399     break;
   2400   case k_PostIndexRegister:
   2401     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
   2402        << PostIdxReg.RegNum;
   2403     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
   2404       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
   2405          << PostIdxReg.ShiftImm;
   2406     OS << ">";
   2407     break;
   2408   case k_ProcIFlags: {
   2409     OS << "<ARM_PROC::";
   2410     unsigned IFlags = getProcIFlags();
   2411     for (int i=2; i >= 0; --i)
   2412       if (IFlags & (1 << i))
   2413         OS << ARM_PROC::IFlagsToString(1 << i);
   2414     OS << ">";
   2415     break;
   2416   }
   2417   case k_Register:
   2418     OS << "<register " << getReg() << ">";
   2419     break;
   2420   case k_ShifterImmediate:
   2421     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
   2422        << " #" << ShifterImm.Imm << ">";
   2423     break;
   2424   case k_ShiftedRegister:
   2425     OS << "<so_reg_reg "
   2426        << RegShiftedReg.SrcReg << " "
   2427        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
   2428        << " " << RegShiftedReg.ShiftReg << ">";
   2429     break;
   2430   case k_ShiftedImmediate:
   2431     OS << "<so_reg_imm "
   2432        << RegShiftedImm.SrcReg << " "
   2433        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
   2434        << " #" << RegShiftedImm.ShiftImm << ">";
   2435     break;
   2436   case k_RotateImmediate:
   2437     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
   2438     break;
   2439   case k_BitfieldDescriptor:
   2440     OS << "<bitfield " << "lsb: " << Bitfield.LSB
   2441        << ", width: " << Bitfield.Width << ">";
   2442     break;
   2443   case k_RegisterList:
   2444   case k_DPRRegisterList:
   2445   case k_SPRRegisterList: {
   2446     OS << "<register_list ";
   2447 
   2448     const SmallVectorImpl<unsigned> &RegList = getRegList();
   2449     for (SmallVectorImpl<unsigned>::const_iterator
   2450            I = RegList.begin(), E = RegList.end(); I != E; ) {
   2451       OS << *I;
   2452       if (++I < E) OS << ", ";
   2453     }
   2454 
   2455     OS << ">";
   2456     break;
   2457   }
   2458   case k_VectorList:
   2459     OS << "<vector_list " << VectorList.Count << " * "
   2460        << VectorList.RegNum << ">";
   2461     break;
   2462   case k_VectorListAllLanes:
   2463     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
   2464        << VectorList.RegNum << ">";
   2465     break;
   2466   case k_VectorListIndexed:
   2467     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
   2468        << VectorList.Count << " * " << VectorList.RegNum << ">";
   2469     break;
   2470   case k_Token:
   2471     OS << "'" << getToken() << "'";
   2472     break;
   2473   case k_VectorIndex:
   2474     OS << "<vectorindex " << getVectorIndex() << ">";
   2475     break;
   2476   }
   2477 }
   2478 
   2479 /// @name Auto-generated Match Functions
   2480 /// {
   2481 
   2482 static unsigned MatchRegisterName(StringRef Name);
   2483 
   2484 /// }
   2485 
   2486 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
   2487                                  SMLoc &StartLoc, SMLoc &EndLoc) {
   2488   StartLoc = Parser.getTok().getLoc();
   2489   EndLoc = Parser.getTok().getEndLoc();
   2490   RegNo = tryParseRegister();
   2491 
   2492   return (RegNo == (unsigned)-1);
   2493 }
   2494 
   2495 /// Try to parse a register name.  The token must be an Identifier when called,
   2496 /// and if it is a register name the token is eaten and the register number is
   2497 /// returned.  Otherwise return -1.
   2498 ///
   2499 int ARMAsmParser::tryParseRegister() {
   2500   const AsmToken &Tok = Parser.getTok();
   2501   if (Tok.isNot(AsmToken::Identifier)) return -1;
   2502 
   2503   std::string lowerCase = Tok.getString().lower();
   2504   unsigned RegNum = MatchRegisterName(lowerCase);
   2505   if (!RegNum) {
   2506     RegNum = StringSwitch<unsigned>(lowerCase)
   2507       .Case("r13", ARM::SP)
   2508       .Case("r14", ARM::LR)
   2509       .Case("r15", ARM::PC)
   2510       .Case("ip", ARM::R12)
   2511       // Additional register name aliases for 'gas' compatibility.
   2512       .Case("a1", ARM::R0)
   2513       .Case("a2", ARM::R1)
   2514       .Case("a3", ARM::R2)
   2515       .Case("a4", ARM::R3)
   2516       .Case("v1", ARM::R4)
   2517       .Case("v2", ARM::R5)
   2518       .Case("v3", ARM::R6)
   2519       .Case("v4", ARM::R7)
   2520       .Case("v5", ARM::R8)
   2521       .Case("v6", ARM::R9)
   2522       .Case("v7", ARM::R10)
   2523       .Case("v8", ARM::R11)
   2524       .Case("sb", ARM::R9)
   2525       .Case("sl", ARM::R10)
   2526       .Case("fp", ARM::R11)
   2527       .Default(0);
   2528   }
   2529   if (!RegNum) {
   2530     // Check for aliases registered via .req. Canonicalize to lower case.
   2531     // That's more consistent since register names are case insensitive, and
   2532     // it's how the original entry was passed in from MC/MCParser/AsmParser.
   2533     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
   2534     // If no match, return failure.
   2535     if (Entry == RegisterReqs.end())
   2536       return -1;
   2537     Parser.Lex(); // Eat identifier token.
   2538     return Entry->getValue();
   2539   }
   2540 
   2541   Parser.Lex(); // Eat identifier token.
   2542 
   2543   return RegNum;
   2544 }
   2545 
   2546 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
   2547 // If a recoverable error occurs, return 1. If an irrecoverable error
   2548 // occurs, return -1. An irrecoverable error is one where tokens have been
   2549 // consumed in the process of trying to parse the shifter (i.e., when it is
   2550 // indeed a shifter operand, but malformed).
   2551 int ARMAsmParser::tryParseShiftRegister(
   2552                                SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2553   SMLoc S = Parser.getTok().getLoc();
   2554   const AsmToken &Tok = Parser.getTok();
   2555   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
   2556 
   2557   std::string lowerCase = Tok.getString().lower();
   2558   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
   2559       .Case("asl", ARM_AM::lsl)
   2560       .Case("lsl", ARM_AM::lsl)
   2561       .Case("lsr", ARM_AM::lsr)
   2562       .Case("asr", ARM_AM::asr)
   2563       .Case("ror", ARM_AM::ror)
   2564       .Case("rrx", ARM_AM::rrx)
   2565       .Default(ARM_AM::no_shift);
   2566 
   2567   if (ShiftTy == ARM_AM::no_shift)
   2568     return 1;
   2569 
   2570   Parser.Lex(); // Eat the operator.
   2571 
   2572   // The source register for the shift has already been added to the
   2573   // operand list, so we need to pop it off and combine it into the shifted
   2574   // register operand instead.
   2575   OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
   2576   if (!PrevOp->isReg())
   2577     return Error(PrevOp->getStartLoc(), "shift must be of a register");
   2578   int SrcReg = PrevOp->getReg();
   2579 
   2580   SMLoc EndLoc;
   2581   int64_t Imm = 0;
   2582   int ShiftReg = 0;
   2583   if (ShiftTy == ARM_AM::rrx) {
   2584     // RRX Doesn't have an explicit shift amount. The encoder expects
   2585     // the shift register to be the same as the source register. Seems odd,
   2586     // but OK.
   2587     ShiftReg = SrcReg;
   2588   } else {
   2589     // Figure out if this is shifted by a constant or a register (for non-RRX).
   2590     if (Parser.getTok().is(AsmToken::Hash) ||
   2591         Parser.getTok().is(AsmToken::Dollar)) {
   2592       Parser.Lex(); // Eat hash.
   2593       SMLoc ImmLoc = Parser.getTok().getLoc();
   2594       const MCExpr *ShiftExpr = 0;
   2595       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
   2596         Error(ImmLoc, "invalid immediate shift value");
   2597         return -1;
   2598       }
   2599       // The expression must be evaluatable as an immediate.
   2600       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
   2601       if (!CE) {
   2602         Error(ImmLoc, "invalid immediate shift value");
   2603         return -1;
   2604       }
   2605       // Range check the immediate.
   2606       // lsl, ror: 0 <= imm <= 31
   2607       // lsr, asr: 0 <= imm <= 32
   2608       Imm = CE->getValue();
   2609       if (Imm < 0 ||
   2610           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
   2611           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
   2612         Error(ImmLoc, "immediate shift value out of range");
   2613         return -1;
   2614       }
   2615       // shift by zero is a nop. Always send it through as lsl.
   2616       // ('as' compatibility)
   2617       if (Imm == 0)
   2618         ShiftTy = ARM_AM::lsl;
   2619     } else if (Parser.getTok().is(AsmToken::Identifier)) {
   2620       SMLoc L = Parser.getTok().getLoc();
   2621       EndLoc = Parser.getTok().getEndLoc();
   2622       ShiftReg = tryParseRegister();
   2623       if (ShiftReg == -1) {
   2624         Error (L, "expected immediate or register in shift operand");
   2625         return -1;
   2626       }
   2627     } else {
   2628       Error (Parser.getTok().getLoc(),
   2629                     "expected immediate or register in shift operand");
   2630       return -1;
   2631     }
   2632   }
   2633 
   2634   if (ShiftReg && ShiftTy != ARM_AM::rrx)
   2635     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
   2636                                                          ShiftReg, Imm,
   2637                                                          S, EndLoc));
   2638   else
   2639     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
   2640                                                           S, EndLoc));
   2641 
   2642   return 0;
   2643 }
   2644 
   2645 
   2646 /// Try to parse a register name.  The token must be an Identifier when called.
   2647 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
   2648 /// if there is a "writeback". 'true' if it's not a register.
   2649 ///
   2650 /// TODO this is likely to change to allow different register types and or to
   2651 /// parse for a specific register type.
   2652 bool ARMAsmParser::
   2653 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2654   const AsmToken &RegTok = Parser.getTok();
   2655   int RegNo = tryParseRegister();
   2656   if (RegNo == -1)
   2657     return true;
   2658 
   2659   Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
   2660                                            RegTok.getEndLoc()));
   2661 
   2662   const AsmToken &ExclaimTok = Parser.getTok();
   2663   if (ExclaimTok.is(AsmToken::Exclaim)) {
   2664     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
   2665                                                ExclaimTok.getLoc()));
   2666     Parser.Lex(); // Eat exclaim token
   2667     return false;
   2668   }
   2669 
   2670   // Also check for an index operand. This is only legal for vector registers,
   2671   // but that'll get caught OK in operand matching, so we don't need to
   2672   // explicitly filter everything else out here.
   2673   if (Parser.getTok().is(AsmToken::LBrac)) {
   2674     SMLoc SIdx = Parser.getTok().getLoc();
   2675     Parser.Lex(); // Eat left bracket token.
   2676 
   2677     const MCExpr *ImmVal;
   2678     if (getParser().parseExpression(ImmVal))
   2679       return true;
   2680     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
   2681     if (!MCE)
   2682       return TokError("immediate value expected for vector index");
   2683 
   2684     if (Parser.getTok().isNot(AsmToken::RBrac))
   2685       return Error(Parser.getTok().getLoc(), "']' expected");
   2686 
   2687     SMLoc E = Parser.getTok().getEndLoc();
   2688     Parser.Lex(); // Eat right bracket token.
   2689 
   2690     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
   2691                                                      SIdx, E,
   2692                                                      getContext()));
   2693   }
   2694 
   2695   return false;
   2696 }
   2697 
   2698 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
   2699 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
   2700 /// "c5", ...
   2701 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
   2702   // Use the same layout as the tablegen'erated register name matcher. Ugly,
   2703   // but efficient.
   2704   switch (Name.size()) {
   2705   default: return -1;
   2706   case 2:
   2707     if (Name[0] != CoprocOp)
   2708       return -1;
   2709     switch (Name[1]) {
   2710     default:  return -1;
   2711     case '0': return 0;
   2712     case '1': return 1;
   2713     case '2': return 2;
   2714     case '3': return 3;
   2715     case '4': return 4;
   2716     case '5': return 5;
   2717     case '6': return 6;
   2718     case '7': return 7;
   2719     case '8': return 8;
   2720     case '9': return 9;
   2721     }
   2722   case 3:
   2723     if (Name[0] != CoprocOp || Name[1] != '1')
   2724       return -1;
   2725     switch (Name[2]) {
   2726     default:  return -1;
   2727     case '0': return 10;
   2728     case '1': return 11;
   2729     case '2': return 12;
   2730     case '3': return 13;
   2731     case '4': return 14;
   2732     case '5': return 15;
   2733     }
   2734   }
   2735 }
   2736 
   2737 /// parseITCondCode - Try to parse a condition code for an IT instruction.
   2738 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   2739 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2740   SMLoc S = Parser.getTok().getLoc();
   2741   const AsmToken &Tok = Parser.getTok();
   2742   if (!Tok.is(AsmToken::Identifier))
   2743     return MatchOperand_NoMatch;
   2744   unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
   2745     .Case("eq", ARMCC::EQ)
   2746     .Case("ne", ARMCC::NE)
   2747     .Case("hs", ARMCC::HS)
   2748     .Case("cs", ARMCC::HS)
   2749     .Case("lo", ARMCC::LO)
   2750     .Case("cc", ARMCC::LO)
   2751     .Case("mi", ARMCC::MI)
   2752     .Case("pl", ARMCC::PL)
   2753     .Case("vs", ARMCC::VS)
   2754     .Case("vc", ARMCC::VC)
   2755     .Case("hi", ARMCC::HI)
   2756     .Case("ls", ARMCC::LS)
   2757     .Case("ge", ARMCC::GE)
   2758     .Case("lt", ARMCC::LT)
   2759     .Case("gt", ARMCC::GT)
   2760     .Case("le", ARMCC::LE)
   2761     .Case("al", ARMCC::AL)
   2762     .Default(~0U);
   2763   if (CC == ~0U)
   2764     return MatchOperand_NoMatch;
   2765   Parser.Lex(); // Eat the token.
   2766 
   2767   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
   2768 
   2769   return MatchOperand_Success;
   2770 }
   2771 
   2772 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
   2773 /// token must be an Identifier when called, and if it is a coprocessor
   2774 /// number, the token is eaten and the operand is added to the operand list.
   2775 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   2776 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2777   SMLoc S = Parser.getTok().getLoc();
   2778   const AsmToken &Tok = Parser.getTok();
   2779   if (Tok.isNot(AsmToken::Identifier))
   2780     return MatchOperand_NoMatch;
   2781 
   2782   int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
   2783   if (Num == -1)
   2784     return MatchOperand_NoMatch;
   2785 
   2786   Parser.Lex(); // Eat identifier token.
   2787   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
   2788   return MatchOperand_Success;
   2789 }
   2790 
   2791 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
   2792 /// token must be an Identifier when called, and if it is a coprocessor
   2793 /// number, the token is eaten and the operand is added to the operand list.
   2794 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   2795 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2796   SMLoc S = Parser.getTok().getLoc();
   2797   const AsmToken &Tok = Parser.getTok();
   2798   if (Tok.isNot(AsmToken::Identifier))
   2799     return MatchOperand_NoMatch;
   2800 
   2801   int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
   2802   if (Reg == -1)
   2803     return MatchOperand_NoMatch;
   2804 
   2805   Parser.Lex(); // Eat identifier token.
   2806   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
   2807   return MatchOperand_Success;
   2808 }
   2809 
   2810 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
   2811 /// coproc_option : '{' imm0_255 '}'
   2812 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   2813 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2814   SMLoc S = Parser.getTok().getLoc();
   2815 
   2816   // If this isn't a '{', this isn't a coprocessor immediate operand.
   2817   if (Parser.getTok().isNot(AsmToken::LCurly))
   2818     return MatchOperand_NoMatch;
   2819   Parser.Lex(); // Eat the '{'
   2820 
   2821   const MCExpr *Expr;
   2822   SMLoc Loc = Parser.getTok().getLoc();
   2823   if (getParser().parseExpression(Expr)) {
   2824     Error(Loc, "illegal expression");
   2825     return MatchOperand_ParseFail;
   2826   }
   2827   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
   2828   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
   2829     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
   2830     return MatchOperand_ParseFail;
   2831   }
   2832   int Val = CE->getValue();
   2833 
   2834   // Check for and consume the closing '}'
   2835   if (Parser.getTok().isNot(AsmToken::RCurly))
   2836     return MatchOperand_ParseFail;
   2837   SMLoc E = Parser.getTok().getEndLoc();
   2838   Parser.Lex(); // Eat the '}'
   2839 
   2840   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
   2841   return MatchOperand_Success;
   2842 }
   2843 
   2844 // For register list parsing, we need to map from raw GPR register numbering
   2845 // to the enumeration values. The enumeration values aren't sorted by
   2846 // register number due to our using "sp", "lr" and "pc" as canonical names.
   2847 static unsigned getNextRegister(unsigned Reg) {
   2848   // If this is a GPR, we need to do it manually, otherwise we can rely
   2849   // on the sort ordering of the enumeration since the other reg-classes
   2850   // are sane.
   2851   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
   2852     return Reg + 1;
   2853   switch(Reg) {
   2854   default: llvm_unreachable("Invalid GPR number!");
   2855   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
   2856   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
   2857   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
   2858   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
   2859   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
   2860   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
   2861   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
   2862   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
   2863   }
   2864 }
   2865 
   2866 // Return the low-subreg of a given Q register.
   2867 static unsigned getDRegFromQReg(unsigned QReg) {
   2868   switch (QReg) {
   2869   default: llvm_unreachable("expected a Q register!");
   2870   case ARM::Q0:  return ARM::D0;
   2871   case ARM::Q1:  return ARM::D2;
   2872   case ARM::Q2:  return ARM::D4;
   2873   case ARM::Q3:  return ARM::D6;
   2874   case ARM::Q4:  return ARM::D8;
   2875   case ARM::Q5:  return ARM::D10;
   2876   case ARM::Q6:  return ARM::D12;
   2877   case ARM::Q7:  return ARM::D14;
   2878   case ARM::Q8:  return ARM::D16;
   2879   case ARM::Q9:  return ARM::D18;
   2880   case ARM::Q10: return ARM::D20;
   2881   case ARM::Q11: return ARM::D22;
   2882   case ARM::Q12: return ARM::D24;
   2883   case ARM::Q13: return ARM::D26;
   2884   case ARM::Q14: return ARM::D28;
   2885   case ARM::Q15: return ARM::D30;
   2886   }
   2887 }
   2888 
   2889 /// Parse a register list.
   2890 bool ARMAsmParser::
   2891 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   2892   assert(Parser.getTok().is(AsmToken::LCurly) &&
   2893          "Token is not a Left Curly Brace");
   2894   SMLoc S = Parser.getTok().getLoc();
   2895   Parser.Lex(); // Eat '{' token.
   2896   SMLoc RegLoc = Parser.getTok().getLoc();
   2897 
   2898   // Check the first register in the list to see what register class
   2899   // this is a list of.
   2900   int Reg = tryParseRegister();
   2901   if (Reg == -1)
   2902     return Error(RegLoc, "register expected");
   2903 
   2904   // The reglist instructions have at most 16 registers, so reserve
   2905   // space for that many.
   2906   SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
   2907 
   2908   // Allow Q regs and just interpret them as the two D sub-registers.
   2909   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
   2910     Reg = getDRegFromQReg(Reg);
   2911     Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
   2912     ++Reg;
   2913   }
   2914   const MCRegisterClass *RC;
   2915   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
   2916     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
   2917   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
   2918     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
   2919   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
   2920     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
   2921   else
   2922     return Error(RegLoc, "invalid register in register list");
   2923 
   2924   // Store the register.
   2925   Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
   2926 
   2927   // This starts immediately after the first register token in the list,
   2928   // so we can see either a comma or a minus (range separator) as a legal
   2929   // next token.
   2930   while (Parser.getTok().is(AsmToken::Comma) ||
   2931          Parser.getTok().is(AsmToken::Minus)) {
   2932     if (Parser.getTok().is(AsmToken::Minus)) {
   2933       Parser.Lex(); // Eat the minus.
   2934       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
   2935       int EndReg = tryParseRegister();
   2936       if (EndReg == -1)
   2937         return Error(AfterMinusLoc, "register expected");
   2938       // Allow Q regs and just interpret them as the two D sub-registers.
   2939       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
   2940         EndReg = getDRegFromQReg(EndReg) + 1;
   2941       // If the register is the same as the start reg, there's nothing
   2942       // more to do.
   2943       if (Reg == EndReg)
   2944         continue;
   2945       // The register must be in the same register class as the first.
   2946       if (!RC->contains(EndReg))
   2947         return Error(AfterMinusLoc, "invalid register in register list");
   2948       // Ranges must go from low to high.
   2949       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
   2950         return Error(AfterMinusLoc, "bad range in register list");
   2951 
   2952       // Add all the registers in the range to the register list.
   2953       while (Reg != EndReg) {
   2954         Reg = getNextRegister(Reg);
   2955         Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
   2956       }
   2957       continue;
   2958     }
   2959     Parser.Lex(); // Eat the comma.
   2960     RegLoc = Parser.getTok().getLoc();
   2961     int OldReg = Reg;
   2962     const AsmToken RegTok = Parser.getTok();
   2963     Reg = tryParseRegister();
   2964     if (Reg == -1)
   2965       return Error(RegLoc, "register expected");
   2966     // Allow Q regs and just interpret them as the two D sub-registers.
   2967     bool isQReg = false;
   2968     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
   2969       Reg = getDRegFromQReg(Reg);
   2970       isQReg = true;
   2971     }
   2972     // The register must be in the same register class as the first.
   2973     if (!RC->contains(Reg))
   2974       return Error(RegLoc, "invalid register in register list");
   2975     // List must be monotonically increasing.
   2976     if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
   2977       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
   2978         Warning(RegLoc, "register list not in ascending order");
   2979       else
   2980         return Error(RegLoc, "register list not in ascending order");
   2981     }
   2982     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
   2983       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
   2984               ") in register list");
   2985       continue;
   2986     }
   2987     // VFP register lists must also be contiguous.
   2988     // It's OK to use the enumeration values directly here rather, as the
   2989     // VFP register classes have the enum sorted properly.
   2990     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
   2991         Reg != OldReg + 1)
   2992       return Error(RegLoc, "non-contiguous register range");
   2993     Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
   2994     if (isQReg)
   2995       Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
   2996   }
   2997 
   2998   if (Parser.getTok().isNot(AsmToken::RCurly))
   2999     return Error(Parser.getTok().getLoc(), "'}' expected");
   3000   SMLoc E = Parser.getTok().getEndLoc();
   3001   Parser.Lex(); // Eat '}' token.
   3002 
   3003   // Push the register list operand.
   3004   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
   3005 
   3006   // The ARM system instruction variants for LDM/STM have a '^' token here.
   3007   if (Parser.getTok().is(AsmToken::Caret)) {
   3008     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
   3009     Parser.Lex(); // Eat '^' token.
   3010   }
   3011 
   3012   return false;
   3013 }
   3014 
   3015 // Helper function to parse the lane index for vector lists.
   3016 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3017 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
   3018   Index = 0; // Always return a defined index value.
   3019   if (Parser.getTok().is(AsmToken::LBrac)) {
   3020     Parser.Lex(); // Eat the '['.
   3021     if (Parser.getTok().is(AsmToken::RBrac)) {
   3022       // "Dn[]" is the 'all lanes' syntax.
   3023       LaneKind = AllLanes;
   3024       EndLoc = Parser.getTok().getEndLoc();
   3025       Parser.Lex(); // Eat the ']'.
   3026       return MatchOperand_Success;
   3027     }
   3028 
   3029     // There's an optional '#' token here. Normally there wouldn't be, but
   3030     // inline assemble puts one in, and it's friendly to accept that.
   3031     if (Parser.getTok().is(AsmToken::Hash))
   3032       Parser.Lex(); // Eat the '#'
   3033 
   3034     const MCExpr *LaneIndex;
   3035     SMLoc Loc = Parser.getTok().getLoc();
   3036     if (getParser().parseExpression(LaneIndex)) {
   3037       Error(Loc, "illegal expression");
   3038       return MatchOperand_ParseFail;
   3039     }
   3040     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
   3041     if (!CE) {
   3042       Error(Loc, "lane index must be empty or an integer");
   3043       return MatchOperand_ParseFail;
   3044     }
   3045     if (Parser.getTok().isNot(AsmToken::RBrac)) {
   3046       Error(Parser.getTok().getLoc(), "']' expected");
   3047       return MatchOperand_ParseFail;
   3048     }
   3049     EndLoc = Parser.getTok().getEndLoc();
   3050     Parser.Lex(); // Eat the ']'.
   3051     int64_t Val = CE->getValue();
   3052 
   3053     // FIXME: Make this range check context sensitive for .8, .16, .32.
   3054     if (Val < 0 || Val > 7) {
   3055       Error(Parser.getTok().getLoc(), "lane index out of range");
   3056       return MatchOperand_ParseFail;
   3057     }
   3058     Index = Val;
   3059     LaneKind = IndexedLane;
   3060     return MatchOperand_Success;
   3061   }
   3062   LaneKind = NoLanes;
   3063   return MatchOperand_Success;
   3064 }
   3065 
   3066 // parse a vector register list
   3067 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3068 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3069   VectorLaneTy LaneKind;
   3070   unsigned LaneIndex;
   3071   SMLoc S = Parser.getTok().getLoc();
   3072   // As an extension (to match gas), support a plain D register or Q register
   3073   // (without encosing curly braces) as a single or double entry list,
   3074   // respectively.
   3075   if (Parser.getTok().is(AsmToken::Identifier)) {
   3076     SMLoc E = Parser.getTok().getEndLoc();
   3077     int Reg = tryParseRegister();
   3078     if (Reg == -1)
   3079       return MatchOperand_NoMatch;
   3080     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
   3081       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
   3082       if (Res != MatchOperand_Success)
   3083         return Res;
   3084       switch (LaneKind) {
   3085       case NoLanes:
   3086         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
   3087         break;
   3088       case AllLanes:
   3089         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
   3090                                                                 S, E));
   3091         break;
   3092       case IndexedLane:
   3093         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
   3094                                                                LaneIndex,
   3095                                                                false, S, E));
   3096         break;
   3097       }
   3098       return MatchOperand_Success;
   3099     }
   3100     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
   3101       Reg = getDRegFromQReg(Reg);
   3102       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
   3103       if (Res != MatchOperand_Success)
   3104         return Res;
   3105       switch (LaneKind) {
   3106       case NoLanes:
   3107         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
   3108                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
   3109         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
   3110         break;
   3111       case AllLanes:
   3112         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
   3113                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
   3114         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
   3115                                                                 S, E));
   3116         break;
   3117       case IndexedLane:
   3118         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
   3119                                                                LaneIndex,
   3120                                                                false, S, E));
   3121         break;
   3122       }
   3123       return MatchOperand_Success;
   3124     }
   3125     Error(S, "vector register expected");
   3126     return MatchOperand_ParseFail;
   3127   }
   3128 
   3129   if (Parser.getTok().isNot(AsmToken::LCurly))
   3130     return MatchOperand_NoMatch;
   3131 
   3132   Parser.Lex(); // Eat '{' token.
   3133   SMLoc RegLoc = Parser.getTok().getLoc();
   3134 
   3135   int Reg = tryParseRegister();
   3136   if (Reg == -1) {
   3137     Error(RegLoc, "register expected");
   3138     return MatchOperand_ParseFail;
   3139   }
   3140   unsigned Count = 1;
   3141   int Spacing = 0;
   3142   unsigned FirstReg = Reg;
   3143   // The list is of D registers, but we also allow Q regs and just interpret
   3144   // them as the two D sub-registers.
   3145   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
   3146     FirstReg = Reg = getDRegFromQReg(Reg);
   3147     Spacing = 1; // double-spacing requires explicit D registers, otherwise
   3148                  // it's ambiguous with four-register single spaced.
   3149     ++Reg;
   3150     ++Count;
   3151   }
   3152 
   3153   SMLoc E;
   3154   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
   3155     return MatchOperand_ParseFail;
   3156 
   3157   while (Parser.getTok().is(AsmToken::Comma) ||
   3158          Parser.getTok().is(AsmToken::Minus)) {
   3159     if (Parser.getTok().is(AsmToken::Minus)) {
   3160       if (!Spacing)
   3161         Spacing = 1; // Register range implies a single spaced list.
   3162       else if (Spacing == 2) {
   3163         Error(Parser.getTok().getLoc(),
   3164               "sequential registers in double spaced list");
   3165         return MatchOperand_ParseFail;
   3166       }
   3167       Parser.Lex(); // Eat the minus.
   3168       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
   3169       int EndReg = tryParseRegister();
   3170       if (EndReg == -1) {
   3171         Error(AfterMinusLoc, "register expected");
   3172         return MatchOperand_ParseFail;
   3173       }
   3174       // Allow Q regs and just interpret them as the two D sub-registers.
   3175       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
   3176         EndReg = getDRegFromQReg(EndReg) + 1;
   3177       // If the register is the same as the start reg, there's nothing
   3178       // more to do.
   3179       if (Reg == EndReg)
   3180         continue;
   3181       // The register must be in the same register class as the first.
   3182       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
   3183         Error(AfterMinusLoc, "invalid register in register list");
   3184         return MatchOperand_ParseFail;
   3185       }
   3186       // Ranges must go from low to high.
   3187       if (Reg > EndReg) {
   3188         Error(AfterMinusLoc, "bad range in register list");
   3189         return MatchOperand_ParseFail;
   3190       }
   3191       // Parse the lane specifier if present.
   3192       VectorLaneTy NextLaneKind;
   3193       unsigned NextLaneIndex;
   3194       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
   3195           MatchOperand_Success)
   3196         return MatchOperand_ParseFail;
   3197       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
   3198         Error(AfterMinusLoc, "mismatched lane index in register list");
   3199         return MatchOperand_ParseFail;
   3200       }
   3201 
   3202       // Add all the registers in the range to the register list.
   3203       Count += EndReg - Reg;
   3204       Reg = EndReg;
   3205       continue;
   3206     }
   3207     Parser.Lex(); // Eat the comma.
   3208     RegLoc = Parser.getTok().getLoc();
   3209     int OldReg = Reg;
   3210     Reg = tryParseRegister();
   3211     if (Reg == -1) {
   3212       Error(RegLoc, "register expected");
   3213       return MatchOperand_ParseFail;
   3214     }
   3215     // vector register lists must be contiguous.
   3216     // It's OK to use the enumeration values directly here rather, as the
   3217     // VFP register classes have the enum sorted properly.
   3218     //
   3219     // The list is of D registers, but we also allow Q regs and just interpret
   3220     // them as the two D sub-registers.
   3221     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
   3222       if (!Spacing)
   3223         Spacing = 1; // Register range implies a single spaced list.
   3224       else if (Spacing == 2) {
   3225         Error(RegLoc,
   3226               "invalid register in double-spaced list (must be 'D' register')");
   3227         return MatchOperand_ParseFail;
   3228       }
   3229       Reg = getDRegFromQReg(Reg);
   3230       if (Reg != OldReg + 1) {
   3231         Error(RegLoc, "non-contiguous register range");
   3232         return MatchOperand_ParseFail;
   3233       }
   3234       ++Reg;
   3235       Count += 2;
   3236       // Parse the lane specifier if present.
   3237       VectorLaneTy NextLaneKind;
   3238       unsigned NextLaneIndex;
   3239       SMLoc LaneLoc = Parser.getTok().getLoc();
   3240       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
   3241           MatchOperand_Success)
   3242         return MatchOperand_ParseFail;
   3243       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
   3244         Error(LaneLoc, "mismatched lane index in register list");
   3245         return MatchOperand_ParseFail;
   3246       }
   3247       continue;
   3248     }
   3249     // Normal D register.
   3250     // Figure out the register spacing (single or double) of the list if
   3251     // we don't know it already.
   3252     if (!Spacing)
   3253       Spacing = 1 + (Reg == OldReg + 2);
   3254 
   3255     // Just check that it's contiguous and keep going.
   3256     if (Reg != OldReg + Spacing) {
   3257       Error(RegLoc, "non-contiguous register range");
   3258       return MatchOperand_ParseFail;
   3259     }
   3260     ++Count;
   3261     // Parse the lane specifier if present.
   3262     VectorLaneTy NextLaneKind;
   3263     unsigned NextLaneIndex;
   3264     SMLoc EndLoc = Parser.getTok().getLoc();
   3265     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
   3266       return MatchOperand_ParseFail;
   3267     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
   3268       Error(EndLoc, "mismatched lane index in register list");
   3269       return MatchOperand_ParseFail;
   3270     }
   3271   }
   3272 
   3273   if (Parser.getTok().isNot(AsmToken::RCurly)) {
   3274     Error(Parser.getTok().getLoc(), "'}' expected");
   3275     return MatchOperand_ParseFail;
   3276   }
   3277   E = Parser.getTok().getEndLoc();
   3278   Parser.Lex(); // Eat '}' token.
   3279 
   3280   switch (LaneKind) {
   3281   case NoLanes:
   3282     // Two-register operands have been converted to the
   3283     // composite register classes.
   3284     if (Count == 2) {
   3285       const MCRegisterClass *RC = (Spacing == 1) ?
   3286         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
   3287         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
   3288       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
   3289     }
   3290 
   3291     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
   3292                                                     (Spacing == 2), S, E));
   3293     break;
   3294   case AllLanes:
   3295     // Two-register operands have been converted to the
   3296     // composite register classes.
   3297     if (Count == 2) {
   3298       const MCRegisterClass *RC = (Spacing == 1) ?
   3299         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
   3300         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
   3301       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
   3302     }
   3303     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
   3304                                                             (Spacing == 2),
   3305                                                             S, E));
   3306     break;
   3307   case IndexedLane:
   3308     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
   3309                                                            LaneIndex,
   3310                                                            (Spacing == 2),
   3311                                                            S, E));
   3312     break;
   3313   }
   3314   return MatchOperand_Success;
   3315 }
   3316 
   3317 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
   3318 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3319 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3320   SMLoc S = Parser.getTok().getLoc();
   3321   const AsmToken &Tok = Parser.getTok();
   3322   unsigned Opt;
   3323 
   3324   if (Tok.is(AsmToken::Identifier)) {
   3325     StringRef OptStr = Tok.getString();
   3326 
   3327     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
   3328       .Case("sy",    ARM_MB::SY)
   3329       .Case("st",    ARM_MB::ST)
   3330       .Case("sh",    ARM_MB::ISH)
   3331       .Case("ish",   ARM_MB::ISH)
   3332       .Case("shst",  ARM_MB::ISHST)
   3333       .Case("ishst", ARM_MB::ISHST)
   3334       .Case("nsh",   ARM_MB::NSH)
   3335       .Case("un",    ARM_MB::NSH)
   3336       .Case("nshst", ARM_MB::NSHST)
   3337       .Case("unst",  ARM_MB::NSHST)
   3338       .Case("osh",   ARM_MB::OSH)
   3339       .Case("oshst", ARM_MB::OSHST)
   3340       .Default(~0U);
   3341 
   3342     if (Opt == ~0U)
   3343       return MatchOperand_NoMatch;
   3344 
   3345     Parser.Lex(); // Eat identifier token.
   3346   } else if (Tok.is(AsmToken::Hash) ||
   3347              Tok.is(AsmToken::Dollar) ||
   3348              Tok.is(AsmToken::Integer)) {
   3349     if (Parser.getTok().isNot(AsmToken::Integer))
   3350       Parser.Lex(); // Eat the '#'.
   3351     SMLoc Loc = Parser.getTok().getLoc();
   3352 
   3353     const MCExpr *MemBarrierID;
   3354     if (getParser().parseExpression(MemBarrierID)) {
   3355       Error(Loc, "illegal expression");
   3356       return MatchOperand_ParseFail;
   3357     }
   3358 
   3359     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
   3360     if (!CE) {
   3361       Error(Loc, "constant expression expected");
   3362       return MatchOperand_ParseFail;
   3363     }
   3364 
   3365     int Val = CE->getValue();
   3366     if (Val & ~0xf) {
   3367       Error(Loc, "immediate value out of range");
   3368       return MatchOperand_ParseFail;
   3369     }
   3370 
   3371     Opt = ARM_MB::RESERVED_0 + Val;
   3372   } else
   3373     return MatchOperand_ParseFail;
   3374 
   3375   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
   3376   return MatchOperand_Success;
   3377 }
   3378 
   3379 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
   3380 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3381 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3382   SMLoc S = Parser.getTok().getLoc();
   3383   const AsmToken &Tok = Parser.getTok();
   3384   if (!Tok.is(AsmToken::Identifier))
   3385     return MatchOperand_NoMatch;
   3386   StringRef IFlagsStr = Tok.getString();
   3387 
   3388   // An iflags string of "none" is interpreted to mean that none of the AIF
   3389   // bits are set.  Not a terribly useful instruction, but a valid encoding.
   3390   unsigned IFlags = 0;
   3391   if (IFlagsStr != "none") {
   3392         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
   3393       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
   3394         .Case("a", ARM_PROC::A)
   3395         .Case("i", ARM_PROC::I)
   3396         .Case("f", ARM_PROC::F)
   3397         .Default(~0U);
   3398 
   3399       // If some specific iflag is already set, it means that some letter is
   3400       // present more than once, this is not acceptable.
   3401       if (Flag == ~0U || (IFlags & Flag))
   3402         return MatchOperand_NoMatch;
   3403 
   3404       IFlags |= Flag;
   3405     }
   3406   }
   3407 
   3408   Parser.Lex(); // Eat identifier token.
   3409   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
   3410   return MatchOperand_Success;
   3411 }
   3412 
   3413 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
   3414 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3415 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3416   SMLoc S = Parser.getTok().getLoc();
   3417   const AsmToken &Tok = Parser.getTok();
   3418   if (!Tok.is(AsmToken::Identifier))
   3419     return MatchOperand_NoMatch;
   3420   StringRef Mask = Tok.getString();
   3421 
   3422   if (isMClass()) {
   3423     // See ARMv6-M 10.1.1
   3424     std::string Name = Mask.lower();
   3425     unsigned FlagsVal = StringSwitch<unsigned>(Name)
   3426       // Note: in the documentation:
   3427       //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
   3428       //  for MSR APSR_nzcvq.
   3429       // but we do make it an alias here.  This is so to get the "mask encoding"
   3430       // bits correct on MSR APSR writes.
   3431       //
   3432       // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
   3433       // should really only be allowed when writing a special register.  Note
   3434       // they get dropped in the MRS instruction reading a special register as
   3435       // the SYSm field is only 8 bits.
   3436       //
   3437       // FIXME: the _g and _nzcvqg versions are only allowed if the processor
   3438       // includes the DSP extension but that is not checked.
   3439       .Case("apsr", 0x800)
   3440       .Case("apsr_nzcvq", 0x800)
   3441       .Case("apsr_g", 0x400)
   3442       .Case("apsr_nzcvqg", 0xc00)
   3443       .Case("iapsr", 0x801)
   3444       .Case("iapsr_nzcvq", 0x801)
   3445       .Case("iapsr_g", 0x401)
   3446       .Case("iapsr_nzcvqg", 0xc01)
   3447       .Case("eapsr", 0x802)
   3448       .Case("eapsr_nzcvq", 0x802)
   3449       .Case("eapsr_g", 0x402)
   3450       .Case("eapsr_nzcvqg", 0xc02)
   3451       .Case("xpsr", 0x803)
   3452       .Case("xpsr_nzcvq", 0x803)
   3453       .Case("xpsr_g", 0x403)
   3454       .Case("xpsr_nzcvqg", 0xc03)
   3455       .Case("ipsr", 0x805)
   3456       .Case("epsr", 0x806)
   3457       .Case("iepsr", 0x807)
   3458       .Case("msp", 0x808)
   3459       .Case("psp", 0x809)
   3460       .Case("primask", 0x810)
   3461       .Case("basepri", 0x811)
   3462       .Case("basepri_max", 0x812)
   3463       .Case("faultmask", 0x813)
   3464       .Case("control", 0x814)
   3465       .Default(~0U);
   3466 
   3467     if (FlagsVal == ~0U)
   3468       return MatchOperand_NoMatch;
   3469 
   3470     if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
   3471       // basepri, basepri_max and faultmask only valid for V7m.
   3472       return MatchOperand_NoMatch;
   3473 
   3474     Parser.Lex(); // Eat identifier token.
   3475     Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
   3476     return MatchOperand_Success;
   3477   }
   3478 
   3479   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
   3480   size_t Start = 0, Next = Mask.find('_');
   3481   StringRef Flags = "";
   3482   std::string SpecReg = Mask.slice(Start, Next).lower();
   3483   if (Next != StringRef::npos)
   3484     Flags = Mask.slice(Next+1, Mask.size());
   3485 
   3486   // FlagsVal contains the complete mask:
   3487   // 3-0: Mask
   3488   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
   3489   unsigned FlagsVal = 0;
   3490 
   3491   if (SpecReg == "apsr") {
   3492     FlagsVal = StringSwitch<unsigned>(Flags)
   3493     .Case("nzcvq",  0x8) // same as CPSR_f
   3494     .Case("g",      0x4) // same as CPSR_s
   3495     .Case("nzcvqg", 0xc) // same as CPSR_fs
   3496     .Default(~0U);
   3497 
   3498     if (FlagsVal == ~0U) {
   3499       if (!Flags.empty())
   3500         return MatchOperand_NoMatch;
   3501       else
   3502         FlagsVal = 8; // No flag
   3503     }
   3504   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
   3505     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
   3506     if (Flags == "all" || Flags == "")
   3507       Flags = "fc";
   3508     for (int i = 0, e = Flags.size(); i != e; ++i) {
   3509       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
   3510       .Case("c", 1)
   3511       .Case("x", 2)
   3512       .Case("s", 4)
   3513       .Case("f", 8)
   3514       .Default(~0U);
   3515 
   3516       // If some specific flag is already set, it means that some letter is
   3517       // present more than once, this is not acceptable.
   3518       if (FlagsVal == ~0U || (FlagsVal & Flag))
   3519         return MatchOperand_NoMatch;
   3520       FlagsVal |= Flag;
   3521     }
   3522   } else // No match for special register.
   3523     return MatchOperand_NoMatch;
   3524 
   3525   // Special register without flags is NOT equivalent to "fc" flags.
   3526   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
   3527   // two lines would enable gas compatibility at the expense of breaking
   3528   // round-tripping.
   3529   //
   3530   // if (!FlagsVal)
   3531   //  FlagsVal = 0x9;
   3532 
   3533   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
   3534   if (SpecReg == "spsr")
   3535     FlagsVal |= 16;
   3536 
   3537   Parser.Lex(); // Eat identifier token.
   3538   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
   3539   return MatchOperand_Success;
   3540 }
   3541 
   3542 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3543 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
   3544             int Low, int High) {
   3545   const AsmToken &Tok = Parser.getTok();
   3546   if (Tok.isNot(AsmToken::Identifier)) {
   3547     Error(Parser.getTok().getLoc(), Op + " operand expected.");
   3548     return MatchOperand_ParseFail;
   3549   }
   3550   StringRef ShiftName = Tok.getString();
   3551   std::string LowerOp = Op.lower();
   3552   std::string UpperOp = Op.upper();
   3553   if (ShiftName != LowerOp && ShiftName != UpperOp) {
   3554     Error(Parser.getTok().getLoc(), Op + " operand expected.");
   3555     return MatchOperand_ParseFail;
   3556   }
   3557   Parser.Lex(); // Eat shift type token.
   3558 
   3559   // There must be a '#' and a shift amount.
   3560   if (Parser.getTok().isNot(AsmToken::Hash) &&
   3561       Parser.getTok().isNot(AsmToken::Dollar)) {
   3562     Error(Parser.getTok().getLoc(), "'#' expected");
   3563     return MatchOperand_ParseFail;
   3564   }
   3565   Parser.Lex(); // Eat hash token.
   3566 
   3567   const MCExpr *ShiftAmount;
   3568   SMLoc Loc = Parser.getTok().getLoc();
   3569   SMLoc EndLoc;
   3570   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
   3571     Error(Loc, "illegal expression");
   3572     return MatchOperand_ParseFail;
   3573   }
   3574   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
   3575   if (!CE) {
   3576     Error(Loc, "constant expression expected");
   3577     return MatchOperand_ParseFail;
   3578   }
   3579   int Val = CE->getValue();
   3580   if (Val < Low || Val > High) {
   3581     Error(Loc, "immediate value out of range");
   3582     return MatchOperand_ParseFail;
   3583   }
   3584 
   3585   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
   3586 
   3587   return MatchOperand_Success;
   3588 }
   3589 
   3590 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3591 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3592   const AsmToken &Tok = Parser.getTok();
   3593   SMLoc S = Tok.getLoc();
   3594   if (Tok.isNot(AsmToken::Identifier)) {
   3595     Error(S, "'be' or 'le' operand expected");
   3596     return MatchOperand_ParseFail;
   3597   }
   3598   int Val = StringSwitch<int>(Tok.getString())
   3599     .Case("be", 1)
   3600     .Case("le", 0)
   3601     .Default(-1);
   3602   Parser.Lex(); // Eat the token.
   3603 
   3604   if (Val == -1) {
   3605     Error(S, "'be' or 'le' operand expected");
   3606     return MatchOperand_ParseFail;
   3607   }
   3608   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
   3609                                                                   getContext()),
   3610                                            S, Tok.getEndLoc()));
   3611   return MatchOperand_Success;
   3612 }
   3613 
   3614 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
   3615 /// instructions. Legal values are:
   3616 ///     lsl #n  'n' in [0,31]
   3617 ///     asr #n  'n' in [1,32]
   3618 ///             n == 32 encoded as n == 0.
   3619 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3620 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3621   const AsmToken &Tok = Parser.getTok();
   3622   SMLoc S = Tok.getLoc();
   3623   if (Tok.isNot(AsmToken::Identifier)) {
   3624     Error(S, "shift operator 'asr' or 'lsl' expected");
   3625     return MatchOperand_ParseFail;
   3626   }
   3627   StringRef ShiftName = Tok.getString();
   3628   bool isASR;
   3629   if (ShiftName == "lsl" || ShiftName == "LSL")
   3630     isASR = false;
   3631   else if (ShiftName == "asr" || ShiftName == "ASR")
   3632     isASR = true;
   3633   else {
   3634     Error(S, "shift operator 'asr' or 'lsl' expected");
   3635     return MatchOperand_ParseFail;
   3636   }
   3637   Parser.Lex(); // Eat the operator.
   3638 
   3639   // A '#' and a shift amount.
   3640   if (Parser.getTok().isNot(AsmToken::Hash) &&
   3641       Parser.getTok().isNot(AsmToken::Dollar)) {
   3642     Error(Parser.getTok().getLoc(), "'#' expected");
   3643     return MatchOperand_ParseFail;
   3644   }
   3645   Parser.Lex(); // Eat hash token.
   3646   SMLoc ExLoc = Parser.getTok().getLoc();
   3647 
   3648   const MCExpr *ShiftAmount;
   3649   SMLoc EndLoc;
   3650   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
   3651     Error(ExLoc, "malformed shift expression");
   3652     return MatchOperand_ParseFail;
   3653   }
   3654   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
   3655   if (!CE) {
   3656     Error(ExLoc, "shift amount must be an immediate");
   3657     return MatchOperand_ParseFail;
   3658   }
   3659 
   3660   int64_t Val = CE->getValue();
   3661   if (isASR) {
   3662     // Shift amount must be in [1,32]
   3663     if (Val < 1 || Val > 32) {
   3664       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
   3665       return MatchOperand_ParseFail;
   3666     }
   3667     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
   3668     if (isThumb() && Val == 32) {
   3669       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
   3670       return MatchOperand_ParseFail;
   3671     }
   3672     if (Val == 32) Val = 0;
   3673   } else {
   3674     // Shift amount must be in [1,32]
   3675     if (Val < 0 || Val > 31) {
   3676       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
   3677       return MatchOperand_ParseFail;
   3678     }
   3679   }
   3680 
   3681   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
   3682 
   3683   return MatchOperand_Success;
   3684 }
   3685 
   3686 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
   3687 /// of instructions. Legal values are:
   3688 ///     ror #n  'n' in {0, 8, 16, 24}
   3689 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3690 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3691   const AsmToken &Tok = Parser.getTok();
   3692   SMLoc S = Tok.getLoc();
   3693   if (Tok.isNot(AsmToken::Identifier))
   3694     return MatchOperand_NoMatch;
   3695   StringRef ShiftName = Tok.getString();
   3696   if (ShiftName != "ror" && ShiftName != "ROR")
   3697     return MatchOperand_NoMatch;
   3698   Parser.Lex(); // Eat the operator.
   3699 
   3700   // A '#' and a rotate amount.
   3701   if (Parser.getTok().isNot(AsmToken::Hash) &&
   3702       Parser.getTok().isNot(AsmToken::Dollar)) {
   3703     Error(Parser.getTok().getLoc(), "'#' expected");
   3704     return MatchOperand_ParseFail;
   3705   }
   3706   Parser.Lex(); // Eat hash token.
   3707   SMLoc ExLoc = Parser.getTok().getLoc();
   3708 
   3709   const MCExpr *ShiftAmount;
   3710   SMLoc EndLoc;
   3711   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
   3712     Error(ExLoc, "malformed rotate expression");
   3713     return MatchOperand_ParseFail;
   3714   }
   3715   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
   3716   if (!CE) {
   3717     Error(ExLoc, "rotate amount must be an immediate");
   3718     return MatchOperand_ParseFail;
   3719   }
   3720 
   3721   int64_t Val = CE->getValue();
   3722   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
   3723   // normally, zero is represented in asm by omitting the rotate operand
   3724   // entirely.
   3725   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
   3726     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
   3727     return MatchOperand_ParseFail;
   3728   }
   3729 
   3730   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
   3731 
   3732   return MatchOperand_Success;
   3733 }
   3734 
   3735 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3736 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3737   SMLoc S = Parser.getTok().getLoc();
   3738   // The bitfield descriptor is really two operands, the LSB and the width.
   3739   if (Parser.getTok().isNot(AsmToken::Hash) &&
   3740       Parser.getTok().isNot(AsmToken::Dollar)) {
   3741     Error(Parser.getTok().getLoc(), "'#' expected");
   3742     return MatchOperand_ParseFail;
   3743   }
   3744   Parser.Lex(); // Eat hash token.
   3745 
   3746   const MCExpr *LSBExpr;
   3747   SMLoc E = Parser.getTok().getLoc();
   3748   if (getParser().parseExpression(LSBExpr)) {
   3749     Error(E, "malformed immediate expression");
   3750     return MatchOperand_ParseFail;
   3751   }
   3752   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
   3753   if (!CE) {
   3754     Error(E, "'lsb' operand must be an immediate");
   3755     return MatchOperand_ParseFail;
   3756   }
   3757 
   3758   int64_t LSB = CE->getValue();
   3759   // The LSB must be in the range [0,31]
   3760   if (LSB < 0 || LSB > 31) {
   3761     Error(E, "'lsb' operand must be in the range [0,31]");
   3762     return MatchOperand_ParseFail;
   3763   }
   3764   E = Parser.getTok().getLoc();
   3765 
   3766   // Expect another immediate operand.
   3767   if (Parser.getTok().isNot(AsmToken::Comma)) {
   3768     Error(Parser.getTok().getLoc(), "too few operands");
   3769     return MatchOperand_ParseFail;
   3770   }
   3771   Parser.Lex(); // Eat hash token.
   3772   if (Parser.getTok().isNot(AsmToken::Hash) &&
   3773       Parser.getTok().isNot(AsmToken::Dollar)) {
   3774     Error(Parser.getTok().getLoc(), "'#' expected");
   3775     return MatchOperand_ParseFail;
   3776   }
   3777   Parser.Lex(); // Eat hash token.
   3778 
   3779   const MCExpr *WidthExpr;
   3780   SMLoc EndLoc;
   3781   if (getParser().parseExpression(WidthExpr, EndLoc)) {
   3782     Error(E, "malformed immediate expression");
   3783     return MatchOperand_ParseFail;
   3784   }
   3785   CE = dyn_cast<MCConstantExpr>(WidthExpr);
   3786   if (!CE) {
   3787     Error(E, "'width' operand must be an immediate");
   3788     return MatchOperand_ParseFail;
   3789   }
   3790 
   3791   int64_t Width = CE->getValue();
   3792   // The LSB must be in the range [1,32-lsb]
   3793   if (Width < 1 || Width > 32 - LSB) {
   3794     Error(E, "'width' operand must be in the range [1,32-lsb]");
   3795     return MatchOperand_ParseFail;
   3796   }
   3797 
   3798   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
   3799 
   3800   return MatchOperand_Success;
   3801 }
   3802 
   3803 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3804 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3805   // Check for a post-index addressing register operand. Specifically:
   3806   // postidx_reg := '+' register {, shift}
   3807   //              | '-' register {, shift}
   3808   //              | register {, shift}
   3809 
   3810   // This method must return MatchOperand_NoMatch without consuming any tokens
   3811   // in the case where there is no match, as other alternatives take other
   3812   // parse methods.
   3813   AsmToken Tok = Parser.getTok();
   3814   SMLoc S = Tok.getLoc();
   3815   bool haveEaten = false;
   3816   bool isAdd = true;
   3817   if (Tok.is(AsmToken::Plus)) {
   3818     Parser.Lex(); // Eat the '+' token.
   3819     haveEaten = true;
   3820   } else if (Tok.is(AsmToken::Minus)) {
   3821     Parser.Lex(); // Eat the '-' token.
   3822     isAdd = false;
   3823     haveEaten = true;
   3824   }
   3825 
   3826   SMLoc E = Parser.getTok().getEndLoc();
   3827   int Reg = tryParseRegister();
   3828   if (Reg == -1) {
   3829     if (!haveEaten)
   3830       return MatchOperand_NoMatch;
   3831     Error(Parser.getTok().getLoc(), "register expected");
   3832     return MatchOperand_ParseFail;
   3833   }
   3834 
   3835   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
   3836   unsigned ShiftImm = 0;
   3837   if (Parser.getTok().is(AsmToken::Comma)) {
   3838     Parser.Lex(); // Eat the ','.
   3839     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
   3840       return MatchOperand_ParseFail;
   3841 
   3842     // FIXME: Only approximates end...may include intervening whitespace.
   3843     E = Parser.getTok().getLoc();
   3844   }
   3845 
   3846   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
   3847                                                   ShiftImm, S, E));
   3848 
   3849   return MatchOperand_Success;
   3850 }
   3851 
   3852 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   3853 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3854   // Check for a post-index addressing register operand. Specifically:
   3855   // am3offset := '+' register
   3856   //              | '-' register
   3857   //              | register
   3858   //              | # imm
   3859   //              | # + imm
   3860   //              | # - imm
   3861 
   3862   // This method must return MatchOperand_NoMatch without consuming any tokens
   3863   // in the case where there is no match, as other alternatives take other
   3864   // parse methods.
   3865   AsmToken Tok = Parser.getTok();
   3866   SMLoc S = Tok.getLoc();
   3867 
   3868   // Do immediates first, as we always parse those if we have a '#'.
   3869   if (Parser.getTok().is(AsmToken::Hash) ||
   3870       Parser.getTok().is(AsmToken::Dollar)) {
   3871     Parser.Lex(); // Eat the '#'.
   3872     // Explicitly look for a '-', as we need to encode negative zero
   3873     // differently.
   3874     bool isNegative = Parser.getTok().is(AsmToken::Minus);
   3875     const MCExpr *Offset;
   3876     SMLoc E;
   3877     if (getParser().parseExpression(Offset, E))
   3878       return MatchOperand_ParseFail;
   3879     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
   3880     if (!CE) {
   3881       Error(S, "constant expression expected");
   3882       return MatchOperand_ParseFail;
   3883     }
   3884     // Negative zero is encoded as the flag value INT32_MIN.
   3885     int32_t Val = CE->getValue();
   3886     if (isNegative && Val == 0)
   3887       Val = INT32_MIN;
   3888 
   3889     Operands.push_back(
   3890       ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
   3891 
   3892     return MatchOperand_Success;
   3893   }
   3894 
   3895 
   3896   bool haveEaten = false;
   3897   bool isAdd = true;
   3898   if (Tok.is(AsmToken::Plus)) {
   3899     Parser.Lex(); // Eat the '+' token.
   3900     haveEaten = true;
   3901   } else if (Tok.is(AsmToken::Minus)) {
   3902     Parser.Lex(); // Eat the '-' token.
   3903     isAdd = false;
   3904     haveEaten = true;
   3905   }
   3906 
   3907   Tok = Parser.getTok();
   3908   int Reg = tryParseRegister();
   3909   if (Reg == -1) {
   3910     if (!haveEaten)
   3911       return MatchOperand_NoMatch;
   3912     Error(Tok.getLoc(), "register expected");
   3913     return MatchOperand_ParseFail;
   3914   }
   3915 
   3916   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
   3917                                                   0, S, Tok.getEndLoc()));
   3918 
   3919   return MatchOperand_Success;
   3920 }
   3921 
   3922 /// cvtT2LdrdPre - Convert parsed operands to MCInst.
   3923 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   3924 /// when they refer multiple MIOperands inside a single one.
   3925 void ARMAsmParser::
   3926 cvtT2LdrdPre(MCInst &Inst,
   3927              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3928   // Rt, Rt2
   3929   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   3930   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
   3931   // Create a writeback register dummy placeholder.
   3932   Inst.addOperand(MCOperand::CreateReg(0));
   3933   // addr
   3934   ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
   3935   // pred
   3936   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   3937 }
   3938 
   3939 /// cvtT2StrdPre - Convert parsed operands to MCInst.
   3940 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   3941 /// when they refer multiple MIOperands inside a single one.
   3942 void ARMAsmParser::
   3943 cvtT2StrdPre(MCInst &Inst,
   3944              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3945   // Create a writeback register dummy placeholder.
   3946   Inst.addOperand(MCOperand::CreateReg(0));
   3947   // Rt, Rt2
   3948   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   3949   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
   3950   // addr
   3951   ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
   3952   // pred
   3953   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   3954 }
   3955 
   3956 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
   3957 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   3958 /// when they refer multiple MIOperands inside a single one.
   3959 void ARMAsmParser::
   3960 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
   3961                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3962   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   3963 
   3964   // Create a writeback register dummy placeholder.
   3965   Inst.addOperand(MCOperand::CreateImm(0));
   3966 
   3967   ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
   3968   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   3969 }
   3970 
   3971 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
   3972 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   3973 /// when they refer multiple MIOperands inside a single one.
   3974 void ARMAsmParser::
   3975 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
   3976                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3977   // Create a writeback register dummy placeholder.
   3978   Inst.addOperand(MCOperand::CreateImm(0));
   3979   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   3980   ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
   3981   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   3982 }
   3983 
   3984 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
   3985 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   3986 /// when they refer multiple MIOperands inside a single one.
   3987 void ARMAsmParser::
   3988 cvtLdWriteBackRegAddrMode2(MCInst &Inst,
   3989                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   3990   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   3991 
   3992   // Create a writeback register dummy placeholder.
   3993   Inst.addOperand(MCOperand::CreateImm(0));
   3994 
   3995   ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
   3996   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   3997 }
   3998 
   3999 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
   4000 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4001 /// when they refer multiple MIOperands inside a single one.
   4002 void ARMAsmParser::
   4003 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
   4004                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4005   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4006 
   4007   // Create a writeback register dummy placeholder.
   4008   Inst.addOperand(MCOperand::CreateImm(0));
   4009 
   4010   ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
   4011   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4012 }
   4013 
   4014 
   4015 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
   4016 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4017 /// when they refer multiple MIOperands inside a single one.
   4018 void ARMAsmParser::
   4019 cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
   4020                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4021   // Create a writeback register dummy placeholder.
   4022   Inst.addOperand(MCOperand::CreateImm(0));
   4023   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4024   ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
   4025   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4026 }
   4027 
   4028 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
   4029 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4030 /// when they refer multiple MIOperands inside a single one.
   4031 void ARMAsmParser::
   4032 cvtStWriteBackRegAddrMode2(MCInst &Inst,
   4033                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4034   // Create a writeback register dummy placeholder.
   4035   Inst.addOperand(MCOperand::CreateImm(0));
   4036   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4037   ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
   4038   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4039 }
   4040 
   4041 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
   4042 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4043 /// when they refer multiple MIOperands inside a single one.
   4044 void ARMAsmParser::
   4045 cvtStWriteBackRegAddrMode3(MCInst &Inst,
   4046                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4047   // Create a writeback register dummy placeholder.
   4048   Inst.addOperand(MCOperand::CreateImm(0));
   4049   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4050   ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
   4051   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4052 }
   4053 
   4054 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
   4055 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4056 /// when they refer multiple MIOperands inside a single one.
   4057 void ARMAsmParser::
   4058 cvtLdExtTWriteBackImm(MCInst &Inst,
   4059                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4060   // Rt
   4061   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4062   // Create a writeback register dummy placeholder.
   4063   Inst.addOperand(MCOperand::CreateImm(0));
   4064   // addr
   4065   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
   4066   // offset
   4067   ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
   4068   // pred
   4069   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4070 }
   4071 
   4072 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
   4073 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4074 /// when they refer multiple MIOperands inside a single one.
   4075 void ARMAsmParser::
   4076 cvtLdExtTWriteBackReg(MCInst &Inst,
   4077                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4078   // Rt
   4079   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4080   // Create a writeback register dummy placeholder.
   4081   Inst.addOperand(MCOperand::CreateImm(0));
   4082   // addr
   4083   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
   4084   // offset
   4085   ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
   4086   // pred
   4087   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4088 }
   4089 
   4090 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
   4091 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4092 /// when they refer multiple MIOperands inside a single one.
   4093 void ARMAsmParser::
   4094 cvtStExtTWriteBackImm(MCInst &Inst,
   4095                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4096   // Create a writeback register dummy placeholder.
   4097   Inst.addOperand(MCOperand::CreateImm(0));
   4098   // Rt
   4099   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4100   // addr
   4101   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
   4102   // offset
   4103   ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
   4104   // pred
   4105   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4106 }
   4107 
   4108 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
   4109 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4110 /// when they refer multiple MIOperands inside a single one.
   4111 void ARMAsmParser::
   4112 cvtStExtTWriteBackReg(MCInst &Inst,
   4113                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4114   // Create a writeback register dummy placeholder.
   4115   Inst.addOperand(MCOperand::CreateImm(0));
   4116   // Rt
   4117   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4118   // addr
   4119   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
   4120   // offset
   4121   ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
   4122   // pred
   4123   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4124 }
   4125 
   4126 /// cvtLdrdPre - Convert parsed operands to MCInst.
   4127 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4128 /// when they refer multiple MIOperands inside a single one.
   4129 void ARMAsmParser::
   4130 cvtLdrdPre(MCInst &Inst,
   4131            const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4132   // Rt, Rt2
   4133   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4134   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
   4135   // Create a writeback register dummy placeholder.
   4136   Inst.addOperand(MCOperand::CreateImm(0));
   4137   // addr
   4138   ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
   4139   // pred
   4140   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4141 }
   4142 
   4143 /// cvtStrdPre - Convert parsed operands to MCInst.
   4144 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4145 /// when they refer multiple MIOperands inside a single one.
   4146 void ARMAsmParser::
   4147 cvtStrdPre(MCInst &Inst,
   4148            const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4149   // Create a writeback register dummy placeholder.
   4150   Inst.addOperand(MCOperand::CreateImm(0));
   4151   // Rt, Rt2
   4152   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4153   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
   4154   // addr
   4155   ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
   4156   // pred
   4157   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4158 }
   4159 
   4160 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
   4161 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4162 /// when they refer multiple MIOperands inside a single one.
   4163 void ARMAsmParser::
   4164 cvtLdWriteBackRegAddrMode3(MCInst &Inst,
   4165                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4166   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
   4167   // Create a writeback register dummy placeholder.
   4168   Inst.addOperand(MCOperand::CreateImm(0));
   4169   ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
   4170   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4171 }
   4172 
   4173 /// cvtThumbMultiply - Convert parsed operands to MCInst.
   4174 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
   4175 /// when they refer multiple MIOperands inside a single one.
   4176 void ARMAsmParser::
   4177 cvtThumbMultiply(MCInst &Inst,
   4178            const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4179   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
   4180   ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
   4181   // If we have a three-operand form, make sure to set Rn to be the operand
   4182   // that isn't the same as Rd.
   4183   unsigned RegOp = 4;
   4184   if (Operands.size() == 6 &&
   4185       ((ARMOperand*)Operands[4])->getReg() ==
   4186         ((ARMOperand*)Operands[3])->getReg())
   4187     RegOp = 5;
   4188   ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
   4189   Inst.addOperand(Inst.getOperand(0));
   4190   ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
   4191 }
   4192 
   4193 void ARMAsmParser::
   4194 cvtVLDwbFixed(MCInst &Inst,
   4195               const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4196   // Vd
   4197   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
   4198   // Create a writeback register dummy placeholder.
   4199   Inst.addOperand(MCOperand::CreateImm(0));
   4200   // Vn
   4201   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
   4202   // pred
   4203   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4204 }
   4205 
   4206 void ARMAsmParser::
   4207 cvtVLDwbRegister(MCInst &Inst,
   4208                  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4209   // Vd
   4210   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
   4211   // Create a writeback register dummy placeholder.
   4212   Inst.addOperand(MCOperand::CreateImm(0));
   4213   // Vn
   4214   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
   4215   // Vm
   4216   ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
   4217   // pred
   4218   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4219 }
   4220 
   4221 void ARMAsmParser::
   4222 cvtVSTwbFixed(MCInst &Inst,
   4223               const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4224   // Create a writeback register dummy placeholder.
   4225   Inst.addOperand(MCOperand::CreateImm(0));
   4226   // Vn
   4227   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
   4228   // Vt
   4229   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
   4230   // pred
   4231   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4232 }
   4233 
   4234 void ARMAsmParser::
   4235 cvtVSTwbRegister(MCInst &Inst,
   4236                  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4237   // Create a writeback register dummy placeholder.
   4238   Inst.addOperand(MCOperand::CreateImm(0));
   4239   // Vn
   4240   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
   4241   // Vm
   4242   ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
   4243   // Vt
   4244   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
   4245   // pred
   4246   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
   4247 }
   4248 
   4249 /// Parse an ARM memory expression, return false if successful else return true
   4250 /// or an error.  The first token must be a '[' when called.
   4251 bool ARMAsmParser::
   4252 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4253   SMLoc S, E;
   4254   assert(Parser.getTok().is(AsmToken::LBrac) &&
   4255          "Token is not a Left Bracket");
   4256   S = Parser.getTok().getLoc();
   4257   Parser.Lex(); // Eat left bracket token.
   4258 
   4259   const AsmToken &BaseRegTok = Parser.getTok();
   4260   int BaseRegNum = tryParseRegister();
   4261   if (BaseRegNum == -1)
   4262     return Error(BaseRegTok.getLoc(), "register expected");
   4263 
   4264   // The next token must either be a comma, a colon or a closing bracket.
   4265   const AsmToken &Tok = Parser.getTok();
   4266   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
   4267       !Tok.is(AsmToken::RBrac))
   4268     return Error(Tok.getLoc(), "malformed memory operand");
   4269 
   4270   if (Tok.is(AsmToken::RBrac)) {
   4271     E = Tok.getEndLoc();
   4272     Parser.Lex(); // Eat right bracket token.
   4273 
   4274     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
   4275                                              0, 0, false, S, E));
   4276 
   4277     // If there's a pre-indexing writeback marker, '!', just add it as a token
   4278     // operand. It's rather odd, but syntactically valid.
   4279     if (Parser.getTok().is(AsmToken::Exclaim)) {
   4280       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
   4281       Parser.Lex(); // Eat the '!'.
   4282     }
   4283 
   4284     return false;
   4285   }
   4286 
   4287   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
   4288          "Lost colon or comma in memory operand?!");
   4289   if (Tok.is(AsmToken::Comma)) {
   4290     Parser.Lex(); // Eat the comma.
   4291   }
   4292 
   4293   // If we have a ':', it's an alignment specifier.
   4294   if (Parser.getTok().is(AsmToken::Colon)) {
   4295     Parser.Lex(); // Eat the ':'.
   4296     E = Parser.getTok().getLoc();
   4297 
   4298     const MCExpr *Expr;
   4299     if (getParser().parseExpression(Expr))
   4300      return true;
   4301 
   4302     // The expression has to be a constant. Memory references with relocations
   4303     // don't come through here, as they use the <label> forms of the relevant
   4304     // instructions.
   4305     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
   4306     if (!CE)
   4307       return Error (E, "constant expression expected");
   4308 
   4309     unsigned Align = 0;
   4310     switch (CE->getValue()) {
   4311     default:
   4312       return Error(E,
   4313                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
   4314     case 16:  Align = 2; break;
   4315     case 32:  Align = 4; break;
   4316     case 64:  Align = 8; break;
   4317     case 128: Align = 16; break;
   4318     case 256: Align = 32; break;
   4319     }
   4320 
   4321     // Now we should have the closing ']'
   4322     if (Parser.getTok().isNot(AsmToken::RBrac))
   4323       return Error(Parser.getTok().getLoc(), "']' expected");
   4324     E = Parser.getTok().getEndLoc();
   4325     Parser.Lex(); // Eat right bracket token.
   4326 
   4327     // Don't worry about range checking the value here. That's handled by
   4328     // the is*() predicates.
   4329     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
   4330                                              ARM_AM::no_shift, 0, Align,
   4331                                              false, S, E));
   4332 
   4333     // If there's a pre-indexing writeback marker, '!', just add it as a token
   4334     // operand.
   4335     if (Parser.getTok().is(AsmToken::Exclaim)) {
   4336       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
   4337       Parser.Lex(); // Eat the '!'.
   4338     }
   4339 
   4340     return false;
   4341   }
   4342 
   4343   // If we have a '#', it's an immediate offset, else assume it's a register
   4344   // offset. Be friendly and also accept a plain integer (without a leading
   4345   // hash) for gas compatibility.
   4346   if (Parser.getTok().is(AsmToken::Hash) ||
   4347       Parser.getTok().is(AsmToken::Dollar) ||
   4348       Parser.getTok().is(AsmToken::Integer)) {
   4349     if (Parser.getTok().isNot(AsmToken::Integer))
   4350       Parser.Lex(); // Eat the '#'.
   4351     E = Parser.getTok().getLoc();
   4352 
   4353     bool isNegative = getParser().getTok().is(AsmToken::Minus);
   4354     const MCExpr *Offset;
   4355     if (getParser().parseExpression(Offset))
   4356      return true;
   4357 
   4358     // The expression has to be a constant. Memory references with relocations
   4359     // don't come through here, as they use the <label> forms of the relevant
   4360     // instructions.
   4361     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
   4362     if (!CE)
   4363       return Error (E, "constant expression expected");
   4364 
   4365     // If the constant was #-0, represent it as INT32_MIN.
   4366     int32_t Val = CE->getValue();
   4367     if (isNegative && Val == 0)
   4368       CE = MCConstantExpr::Create(INT32_MIN, getContext());
   4369 
   4370     // Now we should have the closing ']'
   4371     if (Parser.getTok().isNot(AsmToken::RBrac))
   4372       return Error(Parser.getTok().getLoc(), "']' expected");
   4373     E = Parser.getTok().getEndLoc();
   4374     Parser.Lex(); // Eat right bracket token.
   4375 
   4376     // Don't worry about range checking the value here. That's handled by
   4377     // the is*() predicates.
   4378     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
   4379                                              ARM_AM::no_shift, 0, 0,
   4380                                              false, S, E));
   4381 
   4382     // If there's a pre-indexing writeback marker, '!', just add it as a token
   4383     // operand.
   4384     if (Parser.getTok().is(AsmToken::Exclaim)) {
   4385       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
   4386       Parser.Lex(); // Eat the '!'.
   4387     }
   4388 
   4389     return false;
   4390   }
   4391 
   4392   // The register offset is optionally preceded by a '+' or '-'
   4393   bool isNegative = false;
   4394   if (Parser.getTok().is(AsmToken::Minus)) {
   4395     isNegative = true;
   4396     Parser.Lex(); // Eat the '-'.
   4397   } else if (Parser.getTok().is(AsmToken::Plus)) {
   4398     // Nothing to do.
   4399     Parser.Lex(); // Eat the '+'.
   4400   }
   4401 
   4402   E = Parser.getTok().getLoc();
   4403   int OffsetRegNum = tryParseRegister();
   4404   if (OffsetRegNum == -1)
   4405     return Error(E, "register expected");
   4406 
   4407   // If there's a shift operator, handle it.
   4408   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
   4409   unsigned ShiftImm = 0;
   4410   if (Parser.getTok().is(AsmToken::Comma)) {
   4411     Parser.Lex(); // Eat the ','.
   4412     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
   4413       return true;
   4414   }
   4415 
   4416   // Now we should have the closing ']'
   4417   if (Parser.getTok().isNot(AsmToken::RBrac))
   4418     return Error(Parser.getTok().getLoc(), "']' expected");
   4419   E = Parser.getTok().getEndLoc();
   4420   Parser.Lex(); // Eat right bracket token.
   4421 
   4422   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
   4423                                            ShiftType, ShiftImm, 0, isNegative,
   4424                                            S, E));
   4425 
   4426   // If there's a pre-indexing writeback marker, '!', just add it as a token
   4427   // operand.
   4428   if (Parser.getTok().is(AsmToken::Exclaim)) {
   4429     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
   4430     Parser.Lex(); // Eat the '!'.
   4431   }
   4432 
   4433   return false;
   4434 }
   4435 
   4436 /// parseMemRegOffsetShift - one of these two:
   4437 ///   ( lsl | lsr | asr | ror ) , # shift_amount
   4438 ///   rrx
   4439 /// return true if it parses a shift otherwise it returns false.
   4440 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
   4441                                           unsigned &Amount) {
   4442   SMLoc Loc = Parser.getTok().getLoc();
   4443   const AsmToken &Tok = Parser.getTok();
   4444   if (Tok.isNot(AsmToken::Identifier))
   4445     return true;
   4446   StringRef ShiftName = Tok.getString();
   4447   if (ShiftName == "lsl" || ShiftName == "LSL" ||
   4448       ShiftName == "asl" || ShiftName == "ASL")
   4449     St = ARM_AM::lsl;
   4450   else if (ShiftName == "lsr" || ShiftName == "LSR")
   4451     St = ARM_AM::lsr;
   4452   else if (ShiftName == "asr" || ShiftName == "ASR")
   4453     St = ARM_AM::asr;
   4454   else if (ShiftName == "ror" || ShiftName == "ROR")
   4455     St = ARM_AM::ror;
   4456   else if (ShiftName == "rrx" || ShiftName == "RRX")
   4457     St = ARM_AM::rrx;
   4458   else
   4459     return Error(Loc, "illegal shift operator");
   4460   Parser.Lex(); // Eat shift type token.
   4461 
   4462   // rrx stands alone.
   4463   Amount = 0;
   4464   if (St != ARM_AM::rrx) {
   4465     Loc = Parser.getTok().getLoc();
   4466     // A '#' and a shift amount.
   4467     const AsmToken &HashTok = Parser.getTok();
   4468     if (HashTok.isNot(AsmToken::Hash) &&
   4469         HashTok.isNot(AsmToken::Dollar))
   4470       return Error(HashTok.getLoc(), "'#' expected");
   4471     Parser.Lex(); // Eat hash token.
   4472 
   4473     const MCExpr *Expr;
   4474     if (getParser().parseExpression(Expr))
   4475       return true;
   4476     // Range check the immediate.
   4477     // lsl, ror: 0 <= imm <= 31
   4478     // lsr, asr: 0 <= imm <= 32
   4479     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
   4480     if (!CE)
   4481       return Error(Loc, "shift amount must be an immediate");
   4482     int64_t Imm = CE->getValue();
   4483     if (Imm < 0 ||
   4484         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
   4485         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
   4486       return Error(Loc, "immediate shift value out of range");
   4487     // If <ShiftTy> #0, turn it into a no_shift.
   4488     if (Imm == 0)
   4489       St = ARM_AM::lsl;
   4490     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
   4491     if (Imm == 32)
   4492       Imm = 0;
   4493     Amount = Imm;
   4494   }
   4495 
   4496   return false;
   4497 }
   4498 
   4499 /// parseFPImm - A floating point immediate expression operand.
   4500 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
   4501 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4502   // Anything that can accept a floating point constant as an operand
   4503   // needs to go through here, as the regular parseExpression is
   4504   // integer only.
   4505   //
   4506   // This routine still creates a generic Immediate operand, containing
   4507   // a bitcast of the 64-bit floating point value. The various operands
   4508   // that accept floats can check whether the value is valid for them
   4509   // via the standard is*() predicates.
   4510 
   4511   SMLoc S = Parser.getTok().getLoc();
   4512 
   4513   if (Parser.getTok().isNot(AsmToken::Hash) &&
   4514       Parser.getTok().isNot(AsmToken::Dollar))
   4515     return MatchOperand_NoMatch;
   4516 
   4517   // Disambiguate the VMOV forms that can accept an FP immediate.
   4518   // vmov.f32 <sreg>, #imm
   4519   // vmov.f64 <dreg>, #imm
   4520   // vmov.f32 <dreg>, #imm  @ vector f32x2
   4521   // vmov.f32 <qreg>, #imm  @ vector f32x4
   4522   //
   4523   // There are also the NEON VMOV instructions which expect an
   4524   // integer constant. Make sure we don't try to parse an FPImm
   4525   // for these:
   4526   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
   4527   ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
   4528   if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
   4529                            TyOp->getToken() != ".f64"))
   4530     return MatchOperand_NoMatch;
   4531 
   4532   Parser.Lex(); // Eat the '#'.
   4533 
   4534   // Handle negation, as that still comes through as a separate token.
   4535   bool isNegative = false;
   4536   if (Parser.getTok().is(AsmToken::Minus)) {
   4537     isNegative = true;
   4538     Parser.Lex();
   4539   }
   4540   const AsmToken &Tok = Parser.getTok();
   4541   SMLoc Loc = Tok.getLoc();
   4542   if (Tok.is(AsmToken::Real)) {
   4543     APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
   4544     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
   4545     // If we had a '-' in front, toggle the sign bit.
   4546     IntVal ^= (uint64_t)isNegative << 31;
   4547     Parser.Lex(); // Eat the token.
   4548     Operands.push_back(ARMOperand::CreateImm(
   4549           MCConstantExpr::Create(IntVal, getContext()),
   4550           S, Parser.getTok().getLoc()));
   4551     return MatchOperand_Success;
   4552   }
   4553   // Also handle plain integers. Instructions which allow floating point
   4554   // immediates also allow a raw encoded 8-bit value.
   4555   if (Tok.is(AsmToken::Integer)) {
   4556     int64_t Val = Tok.getIntVal();
   4557     Parser.Lex(); // Eat the token.
   4558     if (Val > 255 || Val < 0) {
   4559       Error(Loc, "encoded floating point value out of range");
   4560       return MatchOperand_ParseFail;
   4561     }
   4562     double RealVal = ARM_AM::getFPImmFloat(Val);
   4563     Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
   4564     Operands.push_back(ARMOperand::CreateImm(
   4565         MCConstantExpr::Create(Val, getContext()), S,
   4566         Parser.getTok().getLoc()));
   4567     return MatchOperand_Success;
   4568   }
   4569 
   4570   Error(Loc, "invalid floating point immediate");
   4571   return MatchOperand_ParseFail;
   4572 }
   4573 
   4574 /// Parse a arm instruction operand.  For now this parses the operand regardless
   4575 /// of the mnemonic.
   4576 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
   4577                                 StringRef Mnemonic) {
   4578   SMLoc S, E;
   4579 
   4580   // Check if the current operand has a custom associated parser, if so, try to
   4581   // custom parse the operand, or fallback to the general approach.
   4582   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
   4583   if (ResTy == MatchOperand_Success)
   4584     return false;
   4585   // If there wasn't a custom match, try the generic matcher below. Otherwise,
   4586   // there was a match, but an error occurred, in which case, just return that
   4587   // the operand parsing failed.
   4588   if (ResTy == MatchOperand_ParseFail)
   4589     return true;
   4590 
   4591   switch (getLexer().getKind()) {
   4592   default:
   4593     Error(Parser.getTok().getLoc(), "unexpected token in operand");
   4594     return true;
   4595   case AsmToken::Identifier: {
   4596     if (!tryParseRegisterWithWriteBack(Operands))
   4597       return false;
   4598     int Res = tryParseShiftRegister(Operands);
   4599     if (Res == 0) // success
   4600       return false;
   4601     else if (Res == -1) // irrecoverable error
   4602       return true;
   4603     // If this is VMRS, check for the apsr_nzcv operand.
   4604     if (Mnemonic == "vmrs" &&
   4605         Parser.getTok().getString().equals_lower("apsr_nzcv")) {
   4606       S = Parser.getTok().getLoc();
   4607       Parser.Lex();
   4608       Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
   4609       return false;
   4610     }
   4611 
   4612     // Fall though for the Identifier case that is not a register or a
   4613     // special name.
   4614   }
   4615   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
   4616   case AsmToken::Integer: // things like 1f and 2b as a branch targets
   4617   case AsmToken::String:  // quoted label names.
   4618   case AsmToken::Dot: {   // . as a branch target
   4619     // This was not a register so parse other operands that start with an
   4620     // identifier (like labels) as expressions and create them as immediates.
   4621     const MCExpr *IdVal;
   4622     S = Parser.getTok().getLoc();
   4623     if (getParser().parseExpression(IdVal))
   4624       return true;
   4625     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
   4626     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
   4627     return false;
   4628   }
   4629   case AsmToken::LBrac:
   4630     return parseMemory(Operands);
   4631   case AsmToken::LCurly:
   4632     return parseRegisterList(Operands);
   4633   case AsmToken::Dollar:
   4634   case AsmToken::Hash: {
   4635     // #42 -> immediate.
   4636     S = Parser.getTok().getLoc();
   4637     Parser.Lex();
   4638 
   4639     if (Parser.getTok().isNot(AsmToken::Colon)) {
   4640       bool isNegative = Parser.getTok().is(AsmToken::Minus);
   4641       const MCExpr *ImmVal;
   4642       if (getParser().parseExpression(ImmVal))
   4643         return true;
   4644       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
   4645       if (CE) {
   4646         int32_t Val = CE->getValue();
   4647         if (isNegative && Val == 0)
   4648           ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
   4649       }
   4650       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
   4651       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
   4652 
   4653       // There can be a trailing '!' on operands that we want as a separate
   4654       // '!' Token operand. Handle that here. For example, the compatibilty
   4655       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
   4656       if (Parser.getTok().is(AsmToken::Exclaim)) {
   4657         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
   4658                                                    Parser.getTok().getLoc()));
   4659         Parser.Lex(); // Eat exclaim token
   4660       }
   4661       return false;
   4662     }
   4663     // w/ a ':' after the '#', it's just like a plain ':'.
   4664     // FALLTHROUGH
   4665   }
   4666   case AsmToken::Colon: {
   4667     // ":lower16:" and ":upper16:" expression prefixes
   4668     // FIXME: Check it's an expression prefix,
   4669     // e.g. (FOO - :lower16:BAR) isn't legal.
   4670     ARMMCExpr::VariantKind RefKind;
   4671     if (parsePrefix(RefKind))
   4672       return true;
   4673 
   4674     const MCExpr *SubExprVal;
   4675     if (getParser().parseExpression(SubExprVal))
   4676       return true;
   4677 
   4678     const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
   4679                                               getContext());
   4680     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
   4681     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
   4682     return false;
   4683   }
   4684   }
   4685 }
   4686 
   4687 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
   4688 //  :lower16: and :upper16:.
   4689 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
   4690   RefKind = ARMMCExpr::VK_ARM_None;
   4691 
   4692   // :lower16: and :upper16: modifiers
   4693   assert(getLexer().is(AsmToken::Colon) && "expected a :");
   4694   Parser.Lex(); // Eat ':'
   4695 
   4696   if (getLexer().isNot(AsmToken::Identifier)) {
   4697     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
   4698     return true;
   4699   }
   4700 
   4701   StringRef IDVal = Parser.getTok().getIdentifier();
   4702   if (IDVal == "lower16") {
   4703     RefKind = ARMMCExpr::VK_ARM_LO16;
   4704   } else if (IDVal == "upper16") {
   4705     RefKind = ARMMCExpr::VK_ARM_HI16;
   4706   } else {
   4707     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
   4708     return true;
   4709   }
   4710   Parser.Lex();
   4711 
   4712   if (getLexer().isNot(AsmToken::Colon)) {
   4713     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
   4714     return true;
   4715   }
   4716   Parser.Lex(); // Eat the last ':'
   4717   return false;
   4718 }
   4719 
   4720 /// \brief Given a mnemonic, split out possible predication code and carry
   4721 /// setting letters to form a canonical mnemonic and flags.
   4722 //
   4723 // FIXME: Would be nice to autogen this.
   4724 // FIXME: This is a bit of a maze of special cases.
   4725 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
   4726                                       unsigned &PredicationCode,
   4727                                       bool &CarrySetting,
   4728                                       unsigned &ProcessorIMod,
   4729                                       StringRef &ITMask) {
   4730   PredicationCode = ARMCC::AL;
   4731   CarrySetting = false;
   4732   ProcessorIMod = 0;
   4733 
   4734   // Ignore some mnemonics we know aren't predicated forms.
   4735   //
   4736   // FIXME: Would be nice to autogen this.
   4737   if ((Mnemonic == "movs" && isThumb()) ||
   4738       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
   4739       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
   4740       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
   4741       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
   4742       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
   4743       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
   4744       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
   4745       Mnemonic == "fmuls")
   4746     return Mnemonic;
   4747 
   4748   // First, split out any predication code. Ignore mnemonics we know aren't
   4749   // predicated but do have a carry-set and so weren't caught above.
   4750   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
   4751       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
   4752       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
   4753       Mnemonic != "sbcs" && Mnemonic != "rscs") {
   4754     unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
   4755       .Case("eq", ARMCC::EQ)
   4756       .Case("ne", ARMCC::NE)
   4757       .Case("hs", ARMCC::HS)
   4758       .Case("cs", ARMCC::HS)
   4759       .Case("lo", ARMCC::LO)
   4760       .Case("cc", ARMCC::LO)
   4761       .Case("mi", ARMCC::MI)
   4762       .Case("pl", ARMCC::PL)
   4763       .Case("vs", ARMCC::VS)
   4764       .Case("vc", ARMCC::VC)
   4765       .Case("hi", ARMCC::HI)
   4766       .Case("ls", ARMCC::LS)
   4767       .Case("ge", ARMCC::GE)
   4768       .Case("lt", ARMCC::LT)
   4769       .Case("gt", ARMCC::GT)
   4770       .Case("le", ARMCC::LE)
   4771       .Case("al", ARMCC::AL)
   4772       .Default(~0U);
   4773     if (CC != ~0U) {
   4774       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
   4775       PredicationCode = CC;
   4776     }
   4777   }
   4778 
   4779   // Next, determine if we have a carry setting bit. We explicitly ignore all
   4780   // the instructions we know end in 's'.
   4781   if (Mnemonic.endswith("s") &&
   4782       !(Mnemonic == "cps" || Mnemonic == "mls" ||
   4783         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
   4784         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
   4785         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
   4786         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
   4787         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
   4788         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
   4789         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
   4790         Mnemonic == "vfms" || Mnemonic == "vfnms" ||
   4791         (Mnemonic == "movs" && isThumb()))) {
   4792     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
   4793     CarrySetting = true;
   4794   }
   4795 
   4796   // The "cps" instruction can have a interrupt mode operand which is glued into
   4797   // the mnemonic. Check if this is the case, split it and parse the imod op
   4798   if (Mnemonic.startswith("cps")) {
   4799     // Split out any imod code.
   4800     unsigned IMod =
   4801       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
   4802       .Case("ie", ARM_PROC::IE)
   4803       .Case("id", ARM_PROC::ID)
   4804       .Default(~0U);
   4805     if (IMod != ~0U) {
   4806       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
   4807       ProcessorIMod = IMod;
   4808     }
   4809   }
   4810 
   4811   // The "it" instruction has the condition mask on the end of the mnemonic.
   4812   if (Mnemonic.startswith("it")) {
   4813     ITMask = Mnemonic.slice(2, Mnemonic.size());
   4814     Mnemonic = Mnemonic.slice(0, 2);
   4815   }
   4816 
   4817   return Mnemonic;
   4818 }
   4819 
   4820 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
   4821 /// inclusion of carry set or predication code operands.
   4822 //
   4823 // FIXME: It would be nice to autogen this.
   4824 void ARMAsmParser::
   4825 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
   4826                       bool &CanAcceptPredicationCode) {
   4827   if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
   4828       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
   4829       Mnemonic == "add" || Mnemonic == "adc" ||
   4830       Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
   4831       Mnemonic == "orr" || Mnemonic == "mvn" ||
   4832       Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
   4833       Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
   4834       Mnemonic == "vfm" || Mnemonic == "vfnm" ||
   4835       (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
   4836                       Mnemonic == "mla" || Mnemonic == "smlal" ||
   4837                       Mnemonic == "umlal" || Mnemonic == "umull"))) {
   4838     CanAcceptCarrySet = true;
   4839   } else
   4840     CanAcceptCarrySet = false;
   4841 
   4842   if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
   4843       Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
   4844       Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
   4845       Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
   4846       Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
   4847       (Mnemonic == "clrex" && !isThumb()) ||
   4848       (Mnemonic == "nop" && isThumbOne()) ||
   4849       ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
   4850         Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
   4851         Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
   4852       ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
   4853        !isThumb()) ||
   4854       Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
   4855     CanAcceptPredicationCode = false;
   4856   } else
   4857     CanAcceptPredicationCode = true;
   4858 
   4859   if (isThumb()) {
   4860     if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
   4861         Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
   4862       CanAcceptPredicationCode = false;
   4863   }
   4864 }
   4865 
   4866 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
   4867                                SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   4868   // FIXME: This is all horribly hacky. We really need a better way to deal
   4869   // with optional operands like this in the matcher table.
   4870 
   4871   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
   4872   // another does not. Specifically, the MOVW instruction does not. So we
   4873   // special case it here and remove the defaulted (non-setting) cc_out
   4874   // operand if that's the instruction we're trying to match.
   4875   //
   4876   // We do this as post-processing of the explicit operands rather than just
   4877   // conditionally adding the cc_out in the first place because we need
   4878   // to check the type of the parsed immediate operand.
   4879   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
   4880       !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
   4881       static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
   4882       static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
   4883     return true;
   4884 
   4885   // Register-register 'add' for thumb does not have a cc_out operand
   4886   // when there are only two register operands.
   4887   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
   4888       static_cast<ARMOperand*>(Operands[3])->isReg() &&
   4889       static_cast<ARMOperand*>(Operands[4])->isReg() &&
   4890       static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
   4891     return true;
   4892   // Register-register 'add' for thumb does not have a cc_out operand
   4893   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
   4894   // have to check the immediate range here since Thumb2 has a variant
   4895   // that can handle a different range and has a cc_out operand.
   4896   if (((isThumb() && Mnemonic == "add") ||
   4897        (isThumbTwo() && Mnemonic == "sub")) &&
   4898       Operands.size() == 6 &&
   4899       static_cast<ARMOperand*>(Operands[3])->isReg() &&
   4900       static_cast<ARMOperand*>(Operands[4])->isReg() &&
   4901       static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
   4902       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
   4903       ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
   4904        static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
   4905     return true;
   4906   // For Thumb2, add/sub immediate does not have a cc_out operand for the
   4907   // imm0_4095 variant. That's the least-preferred variant when
   4908   // selecting via the generic "add" mnemonic, so to know that we
   4909   // should remove the cc_out operand, we have to explicitly check that
   4910   // it's not one of the other variants. Ugh.
   4911   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
   4912       Operands.size() == 6 &&
   4913       static_cast<ARMOperand*>(Operands[3])->isReg() &&
   4914       static_cast<ARMOperand*>(Operands[4])->isReg() &&
   4915       static_cast<ARMOperand*>(Operands[5])->isImm()) {
   4916     // Nest conditions rather than one big 'if' statement for readability.
   4917     //
   4918     // If either register is a high reg, it's either one of the SP
   4919     // variants (handled above) or a 32-bit encoding, so we just
   4920     // check against T3. If the second register is the PC, this is an
   4921     // alternate form of ADR, which uses encoding T4, so check for that too.
   4922     if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
   4923          !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
   4924         static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
   4925         static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
   4926       return false;
   4927     // If both registers are low, we're in an IT block, and the immediate is
   4928     // in range, we should use encoding T1 instead, which has a cc_out.
   4929     if (inITBlock() &&
   4930         isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
   4931         isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
   4932         static_cast<ARMOperand*>(Operands[5])->isImm0_7())
   4933       return false;
   4934 
   4935     // Otherwise, we use encoding T4, which does not have a cc_out
   4936     // operand.
   4937     return true;
   4938   }
   4939 
   4940   // The thumb2 multiply instruction doesn't have a CCOut register, so
   4941   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
   4942   // use the 16-bit encoding or not.
   4943   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
   4944       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
   4945       static_cast<ARMOperand*>(Operands[3])->isReg() &&
   4946       static_cast<ARMOperand*>(Operands[4])->isReg() &&
   4947       static_cast<ARMOperand*>(Operands[5])->isReg() &&
   4948       // If the registers aren't low regs, the destination reg isn't the
   4949       // same as one of the source regs, or the cc_out operand is zero
   4950       // outside of an IT block, we have to use the 32-bit encoding, so
   4951       // remove the cc_out operand.
   4952       (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
   4953        !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
   4954        !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
   4955        !inITBlock() ||
   4956        (static_cast<ARMOperand*>(Operands[3])->getReg() !=
   4957         static_cast<ARMOperand*>(Operands[5])->getReg() &&
   4958         static_cast<ARMOperand*>(Operands[3])->getReg() !=
   4959         static_cast<ARMOperand*>(Operands[4])->getReg())))
   4960     return true;
   4961 
   4962   // Also check the 'mul' syntax variant that doesn't specify an explicit
   4963   // destination register.
   4964   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
   4965       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
   4966       static_cast<ARMOperand*>(Operands[3])->isReg() &&
   4967       static_cast<ARMOperand*>(Operands[4])->isReg() &&
   4968       // If the registers aren't low regs  or the cc_out operand is zero
   4969       // outside of an IT block, we have to use the 32-bit encoding, so
   4970       // remove the cc_out operand.
   4971       (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
   4972        !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
   4973        !inITBlock()))
   4974     return true;
   4975 
   4976 
   4977 
   4978   // Register-register 'add/sub' for thumb does not have a cc_out operand
   4979   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
   4980   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
   4981   // right, this will result in better diagnostics (which operand is off)
   4982   // anyway.
   4983   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
   4984       (Operands.size() == 5 || Operands.size() == 6) &&
   4985       static_cast<ARMOperand*>(Operands[3])->isReg() &&
   4986       static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
   4987       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
   4988       (static_cast<ARMOperand*>(Operands[4])->isImm() ||
   4989        (Operands.size() == 6 &&
   4990         static_cast<ARMOperand*>(Operands[5])->isImm())))
   4991     return true;
   4992 
   4993   return false;
   4994 }
   4995 
   4996 static bool isDataTypeToken(StringRef Tok) {
   4997   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
   4998     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
   4999     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
   5000     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
   5001     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
   5002     Tok == ".f" || Tok == ".d";
   5003 }
   5004 
   5005 // FIXME: This bit should probably be handled via an explicit match class
   5006 // in the .td files that matches the suffix instead of having it be
   5007 // a literal string token the way it is now.
   5008 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
   5009   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
   5010 }
   5011 
   5012 static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
   5013 /// Parse an arm instruction mnemonic followed by its operands.
   5014 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
   5015                                     SMLoc NameLoc,
   5016                                SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   5017   // Apply mnemonic aliases before doing anything else, as the destination
   5018   // mnemnonic may include suffices and we want to handle them normally.
   5019   // The generic tblgen'erated code does this later, at the start of
   5020   // MatchInstructionImpl(), but that's too late for aliases that include
   5021   // any sort of suffix.
   5022   unsigned AvailableFeatures = getAvailableFeatures();
   5023   applyMnemonicAliases(Name, AvailableFeatures);
   5024 
   5025   // First check for the ARM-specific .req directive.
   5026   if (Parser.getTok().is(AsmToken::Identifier) &&
   5027       Parser.getTok().getIdentifier() == ".req") {
   5028     parseDirectiveReq(Name, NameLoc);
   5029     // We always return 'error' for this, as we're done with this
   5030     // statement and don't need to match the 'instruction."
   5031     return true;
   5032   }
   5033 
   5034   // Create the leading tokens for the mnemonic, split by '.' characters.
   5035   size_t Start = 0, Next = Name.find('.');
   5036   StringRef Mnemonic = Name.slice(Start, Next);
   5037 
   5038   // Split out the predication code and carry setting flag from the mnemonic.
   5039   unsigned PredicationCode;
   5040   unsigned ProcessorIMod;
   5041   bool CarrySetting;
   5042   StringRef ITMask;
   5043   Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
   5044                            ProcessorIMod, ITMask);
   5045 
   5046   // In Thumb1, only the branch (B) instruction can be predicated.
   5047   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
   5048     Parser.eatToEndOfStatement();
   5049     return Error(NameLoc, "conditional execution not supported in Thumb1");
   5050   }
   5051 
   5052   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
   5053 
   5054   // Handle the IT instruction ITMask. Convert it to a bitmask. This
   5055   // is the mask as it will be for the IT encoding if the conditional
   5056   // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
   5057   // where the conditional bit0 is zero, the instruction post-processing
   5058   // will adjust the mask accordingly.
   5059   if (Mnemonic == "it") {
   5060     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
   5061     if (ITMask.size() > 3) {
   5062       Parser.eatToEndOfStatement();
   5063       return Error(Loc, "too many conditions on IT instruction");
   5064     }
   5065     unsigned Mask = 8;
   5066     for (unsigned i = ITMask.size(); i != 0; --i) {
   5067       char pos = ITMask[i - 1];
   5068       if (pos != 't' && pos != 'e') {
   5069         Parser.eatToEndOfStatement();
   5070         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
   5071       }
   5072       Mask >>= 1;
   5073       if (ITMask[i - 1] == 't')
   5074         Mask |= 8;
   5075     }
   5076     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
   5077   }
   5078 
   5079   // FIXME: This is all a pretty gross hack. We should automatically handle
   5080   // optional operands like this via tblgen.
   5081 
   5082   // Next, add the CCOut and ConditionCode operands, if needed.
   5083   //
   5084   // For mnemonics which can ever incorporate a carry setting bit or predication
   5085   // code, our matching model involves us always generating CCOut and
   5086   // ConditionCode operands to match the mnemonic "as written" and then we let
   5087   // the matcher deal with finding the right instruction or generating an
   5088   // appropriate error.
   5089   bool CanAcceptCarrySet, CanAcceptPredicationCode;
   5090   getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
   5091 
   5092   // If we had a carry-set on an instruction that can't do that, issue an
   5093   // error.
   5094   if (!CanAcceptCarrySet && CarrySetting) {
   5095     Parser.eatToEndOfStatement();
   5096     return Error(NameLoc, "instruction '" + Mnemonic +
   5097                  "' can not set flags, but 's' suffix specified");
   5098   }
   5099   // If we had a predication code on an instruction that can't do that, issue an
   5100   // error.
   5101   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
   5102     Parser.eatToEndOfStatement();
   5103     return Error(NameLoc, "instruction '" + Mnemonic +
   5104                  "' is not predicable, but condition code specified");
   5105   }
   5106 
   5107   // Add the carry setting operand, if necessary.
   5108   if (CanAcceptCarrySet) {
   5109     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
   5110     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
   5111                                                Loc));
   5112   }
   5113 
   5114   // Add the predication code operand, if necessary.
   5115   if (CanAcceptPredicationCode) {
   5116     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
   5117                                       CarrySetting);
   5118     Operands.push_back(ARMOperand::CreateCondCode(
   5119                          ARMCC::CondCodes(PredicationCode), Loc));
   5120   }
   5121 
   5122   // Add the processor imod operand, if necessary.
   5123   if (ProcessorIMod) {
   5124     Operands.push_back(ARMOperand::CreateImm(
   5125           MCConstantExpr::Create(ProcessorIMod, getContext()),
   5126                                  NameLoc, NameLoc));
   5127   }
   5128 
   5129   // Add the remaining tokens in the mnemonic.
   5130   while (Next != StringRef::npos) {
   5131     Start = Next;
   5132     Next = Name.find('.', Start + 1);
   5133     StringRef ExtraToken = Name.slice(Start, Next);
   5134 
   5135     // Some NEON instructions have an optional datatype suffix that is
   5136     // completely ignored. Check for that.
   5137     if (isDataTypeToken(ExtraToken) &&
   5138         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
   5139       continue;
   5140 
   5141     if (ExtraToken != ".n") {
   5142       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
   5143       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
   5144     }
   5145   }
   5146 
   5147   // Read the remaining operands.
   5148   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   5149     // Read the first operand.
   5150     if (parseOperand(Operands, Mnemonic)) {
   5151       Parser.eatToEndOfStatement();
   5152       return true;
   5153     }
   5154 
   5155     while (getLexer().is(AsmToken::Comma)) {
   5156       Parser.Lex();  // Eat the comma.
   5157 
   5158       // Parse and remember the operand.
   5159       if (parseOperand(Operands, Mnemonic)) {
   5160         Parser.eatToEndOfStatement();
   5161         return true;
   5162       }
   5163     }
   5164   }
   5165 
   5166   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   5167     SMLoc Loc = getLexer().getLoc();
   5168     Parser.eatToEndOfStatement();
   5169     return Error(Loc, "unexpected token in argument list");
   5170   }
   5171 
   5172   Parser.Lex(); // Consume the EndOfStatement
   5173 
   5174   // Some instructions, mostly Thumb, have forms for the same mnemonic that
   5175   // do and don't have a cc_out optional-def operand. With some spot-checks
   5176   // of the operand list, we can figure out which variant we're trying to
   5177   // parse and adjust accordingly before actually matching. We shouldn't ever
   5178   // try to remove a cc_out operand that was explicitly set on the the
   5179   // mnemonic, of course (CarrySetting == true). Reason number #317 the
   5180   // table driven matcher doesn't fit well with the ARM instruction set.
   5181   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
   5182     ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
   5183     Operands.erase(Operands.begin() + 1);
   5184     delete Op;
   5185   }
   5186 
   5187   // ARM mode 'blx' need special handling, as the register operand version
   5188   // is predicable, but the label operand version is not. So, we can't rely
   5189   // on the Mnemonic based checking to correctly figure out when to put
   5190   // a k_CondCode operand in the list. If we're trying to match the label
   5191   // version, remove the k_CondCode operand here.
   5192   if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
   5193       static_cast<ARMOperand*>(Operands[2])->isImm()) {
   5194     ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
   5195     Operands.erase(Operands.begin() + 1);
   5196     delete Op;
   5197   }
   5198 
   5199   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
   5200   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
   5201   // a single GPRPair reg operand is used in the .td file to replace the two
   5202   // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
   5203   // expressed as a GPRPair, so we have to manually merge them.
   5204   // FIXME: We would really like to be able to tablegen'erate this.
   5205   if (!isThumb() && Operands.size() > 4 &&
   5206       (Mnemonic == "ldrexd" || Mnemonic == "strexd")) {
   5207     bool isLoad = (Mnemonic == "ldrexd");
   5208     unsigned Idx = isLoad ? 2 : 3;
   5209     ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
   5210     ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
   5211 
   5212     const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
   5213     // Adjust only if Op1 and Op2 are GPRs.
   5214     if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
   5215         MRC.contains(Op2->getReg())) {
   5216       unsigned Reg1 = Op1->getReg();
   5217       unsigned Reg2 = Op2->getReg();
   5218       unsigned Rt = MRI->getEncodingValue(Reg1);
   5219       unsigned Rt2 = MRI->getEncodingValue(Reg2);
   5220 
   5221       // Rt2 must be Rt + 1 and Rt must be even.
   5222       if (Rt + 1 != Rt2 || (Rt & 1)) {
   5223         Error(Op2->getStartLoc(), isLoad ?
   5224             "destination operands must be sequential" :
   5225             "source operands must be sequential");
   5226         return true;
   5227       }
   5228       unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
   5229           &(MRI->getRegClass(ARM::GPRPairRegClassID)));
   5230       Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
   5231       Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
   5232             NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
   5233       delete Op1;
   5234       delete Op2;
   5235     }
   5236   }
   5237 
   5238   return false;
   5239 }
   5240 
   5241 // Validate context-sensitive operand constraints.
   5242 
   5243 // return 'true' if register list contains non-low GPR registers,
   5244 // 'false' otherwise. If Reg is in the register list or is HiReg, set
   5245 // 'containsReg' to true.
   5246 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
   5247                                  unsigned HiReg, bool &containsReg) {
   5248   containsReg = false;
   5249   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
   5250     unsigned OpReg = Inst.getOperand(i).getReg();
   5251     if (OpReg == Reg)
   5252       containsReg = true;
   5253     // Anything other than a low register isn't legal here.
   5254     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
   5255       return true;
   5256   }
   5257   return false;
   5258 }
   5259 
   5260 // Check if the specified regisgter is in the register list of the inst,
   5261 // starting at the indicated operand number.
   5262 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
   5263   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
   5264     unsigned OpReg = Inst.getOperand(i).getReg();
   5265     if (OpReg == Reg)
   5266       return true;
   5267   }
   5268   return false;
   5269 }
   5270 
   5271 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around
   5272 // the ARMInsts array) instead. Getting that here requires awkward
   5273 // API changes, though. Better way?
   5274 namespace llvm {
   5275 extern const MCInstrDesc ARMInsts[];
   5276 }
   5277 static const MCInstrDesc &getInstDesc(unsigned Opcode) {
   5278   return ARMInsts[Opcode];
   5279 }
   5280 
   5281 // FIXME: We would really like to be able to tablegen'erate this.
   5282 bool ARMAsmParser::
   5283 validateInstruction(MCInst &Inst,
   5284                     const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   5285   const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
   5286   SMLoc Loc = Operands[0]->getStartLoc();
   5287   // Check the IT block state first.
   5288   // NOTE: BKPT instruction has the interesting property of being
   5289   // allowed in IT blocks, but not being predicable.  It just always
   5290   // executes.
   5291   if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
   5292       Inst.getOpcode() != ARM::BKPT) {
   5293     unsigned bit = 1;
   5294     if (ITState.FirstCond)
   5295       ITState.FirstCond = false;
   5296     else
   5297       bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
   5298     // The instruction must be predicable.
   5299     if (!MCID.isPredicable())
   5300       return Error(Loc, "instructions in IT block must be predicable");
   5301     unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
   5302     unsigned ITCond = bit ? ITState.Cond :
   5303       ARMCC::getOppositeCondition(ITState.Cond);
   5304     if (Cond != ITCond) {
   5305       // Find the condition code Operand to get its SMLoc information.
   5306       SMLoc CondLoc;
   5307       for (unsigned i = 1; i < Operands.size(); ++i)
   5308         if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
   5309           CondLoc = Operands[i]->getStartLoc();
   5310       return Error(CondLoc, "incorrect condition in IT block; got '" +
   5311                    StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
   5312                    "', but expected '" +
   5313                    ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
   5314     }
   5315   // Check for non-'al' condition codes outside of the IT block.
   5316   } else if (isThumbTwo() && MCID.isPredicable() &&
   5317              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
   5318              ARMCC::AL && Inst.getOpcode() != ARM::tB &&
   5319              Inst.getOpcode() != ARM::t2B)
   5320     return Error(Loc, "predicated instructions must be in IT block");
   5321 
   5322   switch (Inst.getOpcode()) {
   5323   case ARM::LDRD:
   5324   case ARM::LDRD_PRE:
   5325   case ARM::LDRD_POST: {
   5326     // Rt2 must be Rt + 1.
   5327     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
   5328     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
   5329     if (Rt2 != Rt + 1)
   5330       return Error(Operands[3]->getStartLoc(),
   5331                    "destination operands must be sequential");
   5332     return false;
   5333   }
   5334   case ARM::STRD: {
   5335     // Rt2 must be Rt + 1.
   5336     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
   5337     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
   5338     if (Rt2 != Rt + 1)
   5339       return Error(Operands[3]->getStartLoc(),
   5340                    "source operands must be sequential");
   5341     return false;
   5342   }
   5343   case ARM::STRD_PRE:
   5344   case ARM::STRD_POST: {
   5345     // Rt2 must be Rt + 1.
   5346     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
   5347     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
   5348     if (Rt2 != Rt + 1)
   5349       return Error(Operands[3]->getStartLoc(),
   5350                    "source operands must be sequential");
   5351     return false;
   5352   }
   5353   case ARM::SBFX:
   5354   case ARM::UBFX: {
   5355     // width must be in range [1, 32-lsb]
   5356     unsigned lsb = Inst.getOperand(2).getImm();
   5357     unsigned widthm1 = Inst.getOperand(3).getImm();
   5358     if (widthm1 >= 32 - lsb)
   5359       return Error(Operands[5]->getStartLoc(),
   5360                    "bitfield width must be in range [1,32-lsb]");
   5361     return false;
   5362   }
   5363   case ARM::tLDMIA: {
   5364     // If we're parsing Thumb2, the .w variant is available and handles
   5365     // most cases that are normally illegal for a Thumb1 LDM
   5366     // instruction. We'll make the transformation in processInstruction()
   5367     // if necessary.
   5368     //
   5369     // Thumb LDM instructions are writeback iff the base register is not
   5370     // in the register list.
   5371     unsigned Rn = Inst.getOperand(0).getReg();
   5372     bool hasWritebackToken =
   5373       (static_cast<ARMOperand*>(Operands[3])->isToken() &&
   5374        static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
   5375     bool listContainsBase;
   5376     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
   5377       return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
   5378                    "registers must be in range r0-r7");
   5379     // If we should have writeback, then there should be a '!' token.
   5380     if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
   5381       return Error(Operands[2]->getStartLoc(),
   5382                    "writeback operator '!' expected");
   5383     // If we should not have writeback, there must not be a '!'. This is
   5384     // true even for the 32-bit wide encodings.
   5385     if (listContainsBase && hasWritebackToken)
   5386       return Error(Operands[3]->getStartLoc(),
   5387                    "writeback operator '!' not allowed when base register "
   5388                    "in register list");
   5389 
   5390     break;
   5391   }
   5392   case ARM::t2LDMIA_UPD: {
   5393     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
   5394       return Error(Operands[4]->getStartLoc(),
   5395                    "writeback operator '!' not allowed when base register "
   5396                    "in register list");
   5397     break;
   5398   }
   5399   case ARM::tMUL: {
   5400     // The second source operand must be the same register as the destination
   5401     // operand.
   5402     //
   5403     // In this case, we must directly check the parsed operands because the
   5404     // cvtThumbMultiply() function is written in such a way that it guarantees
   5405     // this first statement is always true for the new Inst.  Essentially, the
   5406     // destination is unconditionally copied into the second source operand
   5407     // without checking to see if it matches what we actually parsed.
   5408     if (Operands.size() == 6 &&
   5409         (((ARMOperand*)Operands[3])->getReg() !=
   5410          ((ARMOperand*)Operands[5])->getReg()) &&
   5411         (((ARMOperand*)Operands[3])->getReg() !=
   5412          ((ARMOperand*)Operands[4])->getReg())) {
   5413       return Error(Operands[3]->getStartLoc(),
   5414                    "destination register must match source register");
   5415     }
   5416     break;
   5417   }
   5418   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
   5419   // so only issue a diagnostic for thumb1. The instructions will be
   5420   // switched to the t2 encodings in processInstruction() if necessary.
   5421   case ARM::tPOP: {
   5422     bool listContainsBase;
   5423     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
   5424         !isThumbTwo())
   5425       return Error(Operands[2]->getStartLoc(),
   5426                    "registers must be in range r0-r7 or pc");
   5427     break;
   5428   }
   5429   case ARM::tPUSH: {
   5430     bool listContainsBase;
   5431     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
   5432         !isThumbTwo())
   5433       return Error(Operands[2]->getStartLoc(),
   5434                    "registers must be in range r0-r7 or lr");
   5435     break;
   5436   }
   5437   case ARM::tSTMIA_UPD: {
   5438     bool listContainsBase;
   5439     if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
   5440       return Error(Operands[4]->getStartLoc(),
   5441                    "registers must be in range r0-r7");
   5442     break;
   5443   }
   5444   case ARM::tADDrSP: {
   5445     // If the non-SP source operand and the destination operand are not the
   5446     // same, we need thumb2 (for the wide encoding), or we have an error.
   5447     if (!isThumbTwo() &&
   5448         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
   5449       return Error(Operands[4]->getStartLoc(),
   5450                    "source register must be the same as destination");
   5451     }
   5452     break;
   5453   }
   5454   }
   5455 
   5456   return false;
   5457 }
   5458 
   5459 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
   5460   switch(Opc) {
   5461   default: llvm_unreachable("unexpected opcode!");
   5462   // VST1LN
   5463   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
   5464   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
   5465   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
   5466   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
   5467   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
   5468   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
   5469   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
   5470   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
   5471   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
   5472 
   5473   // VST2LN
   5474   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
   5475   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
   5476   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
   5477   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
   5478   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
   5479 
   5480   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
   5481   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
   5482   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
   5483   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
   5484   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
   5485 
   5486   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
   5487   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
   5488   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
   5489   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
   5490   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
   5491 
   5492   // VST3LN
   5493   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
   5494   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
   5495   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
   5496   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
   5497   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
   5498   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
   5499   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
   5500   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
   5501   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
   5502   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
   5503   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
   5504   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
   5505   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
   5506   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
   5507   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
   5508 
   5509   // VST3
   5510   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
   5511   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
   5512   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
   5513   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
   5514   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
   5515   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
   5516   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
   5517   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
   5518   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
   5519   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
   5520   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
   5521   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
   5522   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
   5523   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
   5524   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
   5525   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
   5526   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
   5527   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
   5528 
   5529   // VST4LN
   5530   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
   5531   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
   5532   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
   5533   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
   5534   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
   5535   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
   5536   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
   5537   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
   5538   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
   5539   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
   5540   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
   5541   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
   5542   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
   5543   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
   5544   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
   5545 
   5546   // VST4
   5547   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
   5548   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
   5549   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
   5550   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
   5551   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
   5552   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
   5553   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
   5554   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
   5555   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
   5556   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
   5557   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
   5558   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
   5559   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
   5560   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
   5561   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
   5562   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
   5563   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
   5564   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
   5565   }
   5566 }
   5567 
   5568 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
   5569   switch(Opc) {
   5570   default: llvm_unreachable("unexpected opcode!");
   5571   // VLD1LN
   5572   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
   5573   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
   5574   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
   5575   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
   5576   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
   5577   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
   5578   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
   5579   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
   5580   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
   5581 
   5582   // VLD2LN
   5583   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
   5584   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
   5585   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
   5586   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
   5587   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
   5588   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
   5589   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
   5590   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
   5591   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
   5592   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
   5593   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
   5594   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
   5595   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
   5596   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
   5597   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
   5598 
   5599   // VLD3DUP
   5600   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
   5601   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
   5602   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
   5603   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
   5604   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
   5605   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
   5606   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
   5607   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
   5608   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
   5609   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
   5610   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
   5611   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
   5612   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
   5613   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
   5614   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
   5615   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
   5616   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
   5617   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
   5618 
   5619   // VLD3LN
   5620   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
   5621   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
   5622   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
   5623   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
   5624   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
   5625   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
   5626   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
   5627   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
   5628   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
   5629   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
   5630   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
   5631   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
   5632   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
   5633   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
   5634   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
   5635 
   5636   // VLD3
   5637   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
   5638   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
   5639   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
   5640   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
   5641   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
   5642   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
   5643   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
   5644   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
   5645   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
   5646   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
   5647   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
   5648   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
   5649   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
   5650   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
   5651   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
   5652   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
   5653   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
   5654   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
   5655 
   5656   // VLD4LN
   5657   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
   5658   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
   5659   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
   5660   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
   5661   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
   5662   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
   5663   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
   5664   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
   5665   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
   5666   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
   5667   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
   5668   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
   5669   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
   5670   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
   5671   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
   5672 
   5673   // VLD4DUP
   5674   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
   5675   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
   5676   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
   5677   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
   5678   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
   5679   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
   5680   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
   5681   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
   5682   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
   5683   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
   5684   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
   5685   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
   5686   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
   5687   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
   5688   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
   5689   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
   5690   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
   5691   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
   5692 
   5693   // VLD4
   5694   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
   5695   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
   5696   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
   5697   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
   5698   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
   5699   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
   5700   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
   5701   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
   5702   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
   5703   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
   5704   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
   5705   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
   5706   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
   5707   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
   5708   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
   5709   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
   5710   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
   5711   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
   5712   }
   5713 }
   5714 
   5715 bool ARMAsmParser::
   5716 processInstruction(MCInst &Inst,
   5717                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
   5718   switch (Inst.getOpcode()) {
   5719   // Alias for alternate form of 'ADR Rd, #imm' instruction.
   5720   case ARM::ADDri: {
   5721     if (Inst.getOperand(1).getReg() != ARM::PC ||
   5722         Inst.getOperand(5).getReg() != 0)
   5723       return false;
   5724     MCInst TmpInst;
   5725     TmpInst.setOpcode(ARM::ADR);
   5726     TmpInst.addOperand(Inst.getOperand(0));
   5727     TmpInst.addOperand(Inst.getOperand(2));
   5728     TmpInst.addOperand(Inst.getOperand(3));
   5729     TmpInst.addOperand(Inst.getOperand(4));
   5730     Inst = TmpInst;
   5731     return true;
   5732   }
   5733   // Aliases for alternate PC+imm syntax of LDR instructions.
   5734   case ARM::t2LDRpcrel:
   5735     // Select the narrow version if the immediate will fit.
   5736     if (Inst.getOperand(1).getImm() > 0 &&
   5737         Inst.getOperand(1).getImm() <= 0xff)
   5738       Inst.setOpcode(ARM::tLDRpci);
   5739     else
   5740       Inst.setOpcode(ARM::t2LDRpci);
   5741     return true;
   5742   case ARM::t2LDRBpcrel:
   5743     Inst.setOpcode(ARM::t2LDRBpci);
   5744     return true;
   5745   case ARM::t2LDRHpcrel:
   5746     Inst.setOpcode(ARM::t2LDRHpci);
   5747     return true;
   5748   case ARM::t2LDRSBpcrel:
   5749     Inst.setOpcode(ARM::t2LDRSBpci);
   5750     return true;
   5751   case ARM::t2LDRSHpcrel:
   5752     Inst.setOpcode(ARM::t2LDRSHpci);
   5753     return true;
   5754   // Handle NEON VST complex aliases.
   5755   case ARM::VST1LNdWB_register_Asm_8:
   5756   case ARM::VST1LNdWB_register_Asm_16:
   5757   case ARM::VST1LNdWB_register_Asm_32: {
   5758     MCInst TmpInst;
   5759     // Shuffle the operands around so the lane index operand is in the
   5760     // right place.
   5761     unsigned Spacing;
   5762     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5763     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5764     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5765     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5766     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   5767     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5768     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5769     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   5770     TmpInst.addOperand(Inst.getOperand(6));
   5771     Inst = TmpInst;
   5772     return true;
   5773   }
   5774 
   5775   case ARM::VST2LNdWB_register_Asm_8:
   5776   case ARM::VST2LNdWB_register_Asm_16:
   5777   case ARM::VST2LNdWB_register_Asm_32:
   5778   case ARM::VST2LNqWB_register_Asm_16:
   5779   case ARM::VST2LNqWB_register_Asm_32: {
   5780     MCInst TmpInst;
   5781     // Shuffle the operands around so the lane index operand is in the
   5782     // right place.
   5783     unsigned Spacing;
   5784     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5785     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5786     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5787     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5788     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   5789     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5790     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5791                                             Spacing));
   5792     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5793     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   5794     TmpInst.addOperand(Inst.getOperand(6));
   5795     Inst = TmpInst;
   5796     return true;
   5797   }
   5798 
   5799   case ARM::VST3LNdWB_register_Asm_8:
   5800   case ARM::VST3LNdWB_register_Asm_16:
   5801   case ARM::VST3LNdWB_register_Asm_32:
   5802   case ARM::VST3LNqWB_register_Asm_16:
   5803   case ARM::VST3LNqWB_register_Asm_32: {
   5804     MCInst TmpInst;
   5805     // Shuffle the operands around so the lane index operand is in the
   5806     // right place.
   5807     unsigned Spacing;
   5808     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5809     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5810     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5811     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5812     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   5813     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5814     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5815                                             Spacing));
   5816     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5817                                             Spacing * 2));
   5818     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5819     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   5820     TmpInst.addOperand(Inst.getOperand(6));
   5821     Inst = TmpInst;
   5822     return true;
   5823   }
   5824 
   5825   case ARM::VST4LNdWB_register_Asm_8:
   5826   case ARM::VST4LNdWB_register_Asm_16:
   5827   case ARM::VST4LNdWB_register_Asm_32:
   5828   case ARM::VST4LNqWB_register_Asm_16:
   5829   case ARM::VST4LNqWB_register_Asm_32: {
   5830     MCInst TmpInst;
   5831     // Shuffle the operands around so the lane index operand is in the
   5832     // right place.
   5833     unsigned Spacing;
   5834     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5835     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5836     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5837     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5838     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   5839     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5840     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5841                                             Spacing));
   5842     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5843                                             Spacing * 2));
   5844     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5845                                             Spacing * 3));
   5846     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5847     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   5848     TmpInst.addOperand(Inst.getOperand(6));
   5849     Inst = TmpInst;
   5850     return true;
   5851   }
   5852 
   5853   case ARM::VST1LNdWB_fixed_Asm_8:
   5854   case ARM::VST1LNdWB_fixed_Asm_16:
   5855   case ARM::VST1LNdWB_fixed_Asm_32: {
   5856     MCInst TmpInst;
   5857     // Shuffle the operands around so the lane index operand is in the
   5858     // right place.
   5859     unsigned Spacing;
   5860     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5861     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5862     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5863     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5864     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   5865     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5866     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5867     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   5868     TmpInst.addOperand(Inst.getOperand(5));
   5869     Inst = TmpInst;
   5870     return true;
   5871   }
   5872 
   5873   case ARM::VST2LNdWB_fixed_Asm_8:
   5874   case ARM::VST2LNdWB_fixed_Asm_16:
   5875   case ARM::VST2LNdWB_fixed_Asm_32:
   5876   case ARM::VST2LNqWB_fixed_Asm_16:
   5877   case ARM::VST2LNqWB_fixed_Asm_32: {
   5878     MCInst TmpInst;
   5879     // Shuffle the operands around so the lane index operand is in the
   5880     // right place.
   5881     unsigned Spacing;
   5882     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5883     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5884     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5885     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5886     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   5887     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5888     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5889                                             Spacing));
   5890     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5891     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   5892     TmpInst.addOperand(Inst.getOperand(5));
   5893     Inst = TmpInst;
   5894     return true;
   5895   }
   5896 
   5897   case ARM::VST3LNdWB_fixed_Asm_8:
   5898   case ARM::VST3LNdWB_fixed_Asm_16:
   5899   case ARM::VST3LNdWB_fixed_Asm_32:
   5900   case ARM::VST3LNqWB_fixed_Asm_16:
   5901   case ARM::VST3LNqWB_fixed_Asm_32: {
   5902     MCInst TmpInst;
   5903     // Shuffle the operands around so the lane index operand is in the
   5904     // right place.
   5905     unsigned Spacing;
   5906     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5907     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5908     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5909     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5910     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   5911     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5912     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5913                                             Spacing));
   5914     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5915                                             Spacing * 2));
   5916     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5917     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   5918     TmpInst.addOperand(Inst.getOperand(5));
   5919     Inst = TmpInst;
   5920     return true;
   5921   }
   5922 
   5923   case ARM::VST4LNdWB_fixed_Asm_8:
   5924   case ARM::VST4LNdWB_fixed_Asm_16:
   5925   case ARM::VST4LNdWB_fixed_Asm_32:
   5926   case ARM::VST4LNqWB_fixed_Asm_16:
   5927   case ARM::VST4LNqWB_fixed_Asm_32: {
   5928     MCInst TmpInst;
   5929     // Shuffle the operands around so the lane index operand is in the
   5930     // right place.
   5931     unsigned Spacing;
   5932     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5933     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   5934     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5935     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5936     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   5937     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5938     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5939                                             Spacing));
   5940     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5941                                             Spacing * 2));
   5942     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5943                                             Spacing * 3));
   5944     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5945     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   5946     TmpInst.addOperand(Inst.getOperand(5));
   5947     Inst = TmpInst;
   5948     return true;
   5949   }
   5950 
   5951   case ARM::VST1LNdAsm_8:
   5952   case ARM::VST1LNdAsm_16:
   5953   case ARM::VST1LNdAsm_32: {
   5954     MCInst TmpInst;
   5955     // Shuffle the operands around so the lane index operand is in the
   5956     // right place.
   5957     unsigned Spacing;
   5958     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5959     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5960     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5961     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5962     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5963     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   5964     TmpInst.addOperand(Inst.getOperand(5));
   5965     Inst = TmpInst;
   5966     return true;
   5967   }
   5968 
   5969   case ARM::VST2LNdAsm_8:
   5970   case ARM::VST2LNdAsm_16:
   5971   case ARM::VST2LNdAsm_32:
   5972   case ARM::VST2LNqAsm_16:
   5973   case ARM::VST2LNqAsm_32: {
   5974     MCInst TmpInst;
   5975     // Shuffle the operands around so the lane index operand is in the
   5976     // right place.
   5977     unsigned Spacing;
   5978     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   5979     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   5980     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   5981     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   5982     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   5983                                             Spacing));
   5984     TmpInst.addOperand(Inst.getOperand(1)); // lane
   5985     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   5986     TmpInst.addOperand(Inst.getOperand(5));
   5987     Inst = TmpInst;
   5988     return true;
   5989   }
   5990 
   5991   case ARM::VST3LNdAsm_8:
   5992   case ARM::VST3LNdAsm_16:
   5993   case ARM::VST3LNdAsm_32:
   5994   case ARM::VST3LNqAsm_16:
   5995   case ARM::VST3LNqAsm_32: {
   5996     MCInst TmpInst;
   5997     // Shuffle the operands around so the lane index operand is in the
   5998     // right place.
   5999     unsigned Spacing;
   6000     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6001     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6002     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6003     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6004     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6005                                             Spacing));
   6006     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6007                                             Spacing * 2));
   6008     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6009     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6010     TmpInst.addOperand(Inst.getOperand(5));
   6011     Inst = TmpInst;
   6012     return true;
   6013   }
   6014 
   6015   case ARM::VST4LNdAsm_8:
   6016   case ARM::VST4LNdAsm_16:
   6017   case ARM::VST4LNdAsm_32:
   6018   case ARM::VST4LNqAsm_16:
   6019   case ARM::VST4LNqAsm_32: {
   6020     MCInst TmpInst;
   6021     // Shuffle the operands around so the lane index operand is in the
   6022     // right place.
   6023     unsigned Spacing;
   6024     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6025     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6026     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6027     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6028     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6029                                             Spacing));
   6030     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6031                                             Spacing * 2));
   6032     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6033                                             Spacing * 3));
   6034     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6035     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6036     TmpInst.addOperand(Inst.getOperand(5));
   6037     Inst = TmpInst;
   6038     return true;
   6039   }
   6040 
   6041   // Handle NEON VLD complex aliases.
   6042   case ARM::VLD1LNdWB_register_Asm_8:
   6043   case ARM::VLD1LNdWB_register_Asm_16:
   6044   case ARM::VLD1LNdWB_register_Asm_32: {
   6045     MCInst TmpInst;
   6046     // Shuffle the operands around so the lane index operand is in the
   6047     // right place.
   6048     unsigned Spacing;
   6049     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6050     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6051     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6052     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6053     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6054     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   6055     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6056     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6057     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   6058     TmpInst.addOperand(Inst.getOperand(6));
   6059     Inst = TmpInst;
   6060     return true;
   6061   }
   6062 
   6063   case ARM::VLD2LNdWB_register_Asm_8:
   6064   case ARM::VLD2LNdWB_register_Asm_16:
   6065   case ARM::VLD2LNdWB_register_Asm_32:
   6066   case ARM::VLD2LNqWB_register_Asm_16:
   6067   case ARM::VLD2LNqWB_register_Asm_32: {
   6068     MCInst TmpInst;
   6069     // Shuffle the operands around so the lane index operand is in the
   6070     // right place.
   6071     unsigned Spacing;
   6072     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6073     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6074     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6075                                             Spacing));
   6076     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6077     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6078     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6079     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   6080     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6081     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6082                                             Spacing));
   6083     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6084     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   6085     TmpInst.addOperand(Inst.getOperand(6));
   6086     Inst = TmpInst;
   6087     return true;
   6088   }
   6089 
   6090   case ARM::VLD3LNdWB_register_Asm_8:
   6091   case ARM::VLD3LNdWB_register_Asm_16:
   6092   case ARM::VLD3LNdWB_register_Asm_32:
   6093   case ARM::VLD3LNqWB_register_Asm_16:
   6094   case ARM::VLD3LNqWB_register_Asm_32: {
   6095     MCInst TmpInst;
   6096     // Shuffle the operands around so the lane index operand is in the
   6097     // right place.
   6098     unsigned Spacing;
   6099     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6100     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6101     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6102                                             Spacing));
   6103     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6104                                             Spacing * 2));
   6105     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6106     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6107     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6108     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   6109     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6110     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6111                                             Spacing));
   6112     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6113                                             Spacing * 2));
   6114     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6115     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   6116     TmpInst.addOperand(Inst.getOperand(6));
   6117     Inst = TmpInst;
   6118     return true;
   6119   }
   6120 
   6121   case ARM::VLD4LNdWB_register_Asm_8:
   6122   case ARM::VLD4LNdWB_register_Asm_16:
   6123   case ARM::VLD4LNdWB_register_Asm_32:
   6124   case ARM::VLD4LNqWB_register_Asm_16:
   6125   case ARM::VLD4LNqWB_register_Asm_32: {
   6126     MCInst TmpInst;
   6127     // Shuffle the operands around so the lane index operand is in the
   6128     // right place.
   6129     unsigned Spacing;
   6130     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6131     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6132     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6133                                             Spacing));
   6134     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6135                                             Spacing * 2));
   6136     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6137                                             Spacing * 3));
   6138     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6139     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6140     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6141     TmpInst.addOperand(Inst.getOperand(4)); // Rm
   6142     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6143     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6144                                             Spacing));
   6145     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6146                                             Spacing * 2));
   6147     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6148                                             Spacing * 3));
   6149     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6150     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
   6151     TmpInst.addOperand(Inst.getOperand(6));
   6152     Inst = TmpInst;
   6153     return true;
   6154   }
   6155 
   6156   case ARM::VLD1LNdWB_fixed_Asm_8:
   6157   case ARM::VLD1LNdWB_fixed_Asm_16:
   6158   case ARM::VLD1LNdWB_fixed_Asm_32: {
   6159     MCInst TmpInst;
   6160     // Shuffle the operands around so the lane index operand is in the
   6161     // right place.
   6162     unsigned Spacing;
   6163     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6164     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6165     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6166     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6167     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6168     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6169     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6170     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6171     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6172     TmpInst.addOperand(Inst.getOperand(5));
   6173     Inst = TmpInst;
   6174     return true;
   6175   }
   6176 
   6177   case ARM::VLD2LNdWB_fixed_Asm_8:
   6178   case ARM::VLD2LNdWB_fixed_Asm_16:
   6179   case ARM::VLD2LNdWB_fixed_Asm_32:
   6180   case ARM::VLD2LNqWB_fixed_Asm_16:
   6181   case ARM::VLD2LNqWB_fixed_Asm_32: {
   6182     MCInst TmpInst;
   6183     // Shuffle the operands around so the lane index operand is in the
   6184     // right place.
   6185     unsigned Spacing;
   6186     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6187     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6188     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6189                                             Spacing));
   6190     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6191     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6192     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6193     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6194     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6195     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6196                                             Spacing));
   6197     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6198     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6199     TmpInst.addOperand(Inst.getOperand(5));
   6200     Inst = TmpInst;
   6201     return true;
   6202   }
   6203 
   6204   case ARM::VLD3LNdWB_fixed_Asm_8:
   6205   case ARM::VLD3LNdWB_fixed_Asm_16:
   6206   case ARM::VLD3LNdWB_fixed_Asm_32:
   6207   case ARM::VLD3LNqWB_fixed_Asm_16:
   6208   case ARM::VLD3LNqWB_fixed_Asm_32: {
   6209     MCInst TmpInst;
   6210     // Shuffle the operands around so the lane index operand is in the
   6211     // right place.
   6212     unsigned Spacing;
   6213     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6214     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6215     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6216                                             Spacing));
   6217     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6218                                             Spacing * 2));
   6219     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6220     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6221     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6222     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6223     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6224     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6225                                             Spacing));
   6226     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6227                                             Spacing * 2));
   6228     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6229     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6230     TmpInst.addOperand(Inst.getOperand(5));
   6231     Inst = TmpInst;
   6232     return true;
   6233   }
   6234 
   6235   case ARM::VLD4LNdWB_fixed_Asm_8:
   6236   case ARM::VLD4LNdWB_fixed_Asm_16:
   6237   case ARM::VLD4LNdWB_fixed_Asm_32:
   6238   case ARM::VLD4LNqWB_fixed_Asm_16:
   6239   case ARM::VLD4LNqWB_fixed_Asm_32: {
   6240     MCInst TmpInst;
   6241     // Shuffle the operands around so the lane index operand is in the
   6242     // right place.
   6243     unsigned Spacing;
   6244     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6245     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6246     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6247                                             Spacing));
   6248     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6249                                             Spacing * 2));
   6250     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6251                                             Spacing * 3));
   6252     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
   6253     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6254     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6255     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6256     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6257     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6258                                             Spacing));
   6259     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6260                                             Spacing * 2));
   6261     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6262                                             Spacing * 3));
   6263     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6264     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6265     TmpInst.addOperand(Inst.getOperand(5));
   6266     Inst = TmpInst;
   6267     return true;
   6268   }
   6269 
   6270   case ARM::VLD1LNdAsm_8:
   6271   case ARM::VLD1LNdAsm_16:
   6272   case ARM::VLD1LNdAsm_32: {
   6273     MCInst TmpInst;
   6274     // Shuffle the operands around so the lane index operand is in the
   6275     // right place.
   6276     unsigned Spacing;
   6277     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6278     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6279     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6280     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6281     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6282     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6283     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6284     TmpInst.addOperand(Inst.getOperand(5));
   6285     Inst = TmpInst;
   6286     return true;
   6287   }
   6288 
   6289   case ARM::VLD2LNdAsm_8:
   6290   case ARM::VLD2LNdAsm_16:
   6291   case ARM::VLD2LNdAsm_32:
   6292   case ARM::VLD2LNqAsm_16:
   6293   case ARM::VLD2LNqAsm_32: {
   6294     MCInst TmpInst;
   6295     // Shuffle the operands around so the lane index operand is in the
   6296     // right place.
   6297     unsigned Spacing;
   6298     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6299     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6300     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6301                                             Spacing));
   6302     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6303     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6304     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6305     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6306                                             Spacing));
   6307     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6308     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6309     TmpInst.addOperand(Inst.getOperand(5));
   6310     Inst = TmpInst;
   6311     return true;
   6312   }
   6313 
   6314   case ARM::VLD3LNdAsm_8:
   6315   case ARM::VLD3LNdAsm_16:
   6316   case ARM::VLD3LNdAsm_32:
   6317   case ARM::VLD3LNqAsm_16:
   6318   case ARM::VLD3LNqAsm_32: {
   6319     MCInst TmpInst;
   6320     // Shuffle the operands around so the lane index operand is in the
   6321     // right place.
   6322     unsigned Spacing;
   6323     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6324     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6325     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6326                                             Spacing));
   6327     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6328                                             Spacing * 2));
   6329     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6330     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6331     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6332     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6333                                             Spacing));
   6334     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6335                                             Spacing * 2));
   6336     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6337     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6338     TmpInst.addOperand(Inst.getOperand(5));
   6339     Inst = TmpInst;
   6340     return true;
   6341   }
   6342 
   6343   case ARM::VLD4LNdAsm_8:
   6344   case ARM::VLD4LNdAsm_16:
   6345   case ARM::VLD4LNdAsm_32:
   6346   case ARM::VLD4LNqAsm_16:
   6347   case ARM::VLD4LNqAsm_32: {
   6348     MCInst TmpInst;
   6349     // Shuffle the operands around so the lane index operand is in the
   6350     // right place.
   6351     unsigned Spacing;
   6352     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6353     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6354     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6355                                             Spacing));
   6356     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6357                                             Spacing * 2));
   6358     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6359                                             Spacing * 3));
   6360     TmpInst.addOperand(Inst.getOperand(2)); // Rn
   6361     TmpInst.addOperand(Inst.getOperand(3)); // alignment
   6362     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
   6363     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6364                                             Spacing));
   6365     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6366                                             Spacing * 2));
   6367     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6368                                             Spacing * 3));
   6369     TmpInst.addOperand(Inst.getOperand(1)); // lane
   6370     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6371     TmpInst.addOperand(Inst.getOperand(5));
   6372     Inst = TmpInst;
   6373     return true;
   6374   }
   6375 
   6376   // VLD3DUP single 3-element structure to all lanes instructions.
   6377   case ARM::VLD3DUPdAsm_8:
   6378   case ARM::VLD3DUPdAsm_16:
   6379   case ARM::VLD3DUPdAsm_32:
   6380   case ARM::VLD3DUPqAsm_8:
   6381   case ARM::VLD3DUPqAsm_16:
   6382   case ARM::VLD3DUPqAsm_32: {
   6383     MCInst TmpInst;
   6384     unsigned Spacing;
   6385     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6386     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6387     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6388                                             Spacing));
   6389     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6390                                             Spacing * 2));
   6391     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6392     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6393     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6394     TmpInst.addOperand(Inst.getOperand(4));
   6395     Inst = TmpInst;
   6396     return true;
   6397   }
   6398 
   6399   case ARM::VLD3DUPdWB_fixed_Asm_8:
   6400   case ARM::VLD3DUPdWB_fixed_Asm_16:
   6401   case ARM::VLD3DUPdWB_fixed_Asm_32:
   6402   case ARM::VLD3DUPqWB_fixed_Asm_8:
   6403   case ARM::VLD3DUPqWB_fixed_Asm_16:
   6404   case ARM::VLD3DUPqWB_fixed_Asm_32: {
   6405     MCInst TmpInst;
   6406     unsigned Spacing;
   6407     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6408     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6409     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6410                                             Spacing));
   6411     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6412                                             Spacing * 2));
   6413     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6414     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6415     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6416     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6417     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6418     TmpInst.addOperand(Inst.getOperand(4));
   6419     Inst = TmpInst;
   6420     return true;
   6421   }
   6422 
   6423   case ARM::VLD3DUPdWB_register_Asm_8:
   6424   case ARM::VLD3DUPdWB_register_Asm_16:
   6425   case ARM::VLD3DUPdWB_register_Asm_32:
   6426   case ARM::VLD3DUPqWB_register_Asm_8:
   6427   case ARM::VLD3DUPqWB_register_Asm_16:
   6428   case ARM::VLD3DUPqWB_register_Asm_32: {
   6429     MCInst TmpInst;
   6430     unsigned Spacing;
   6431     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6432     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6433     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6434                                             Spacing));
   6435     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6436                                             Spacing * 2));
   6437     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6438     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6439     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6440     TmpInst.addOperand(Inst.getOperand(3)); // Rm
   6441     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6442     TmpInst.addOperand(Inst.getOperand(5));
   6443     Inst = TmpInst;
   6444     return true;
   6445   }
   6446 
   6447   // VLD3 multiple 3-element structure instructions.
   6448   case ARM::VLD3dAsm_8:
   6449   case ARM::VLD3dAsm_16:
   6450   case ARM::VLD3dAsm_32:
   6451   case ARM::VLD3qAsm_8:
   6452   case ARM::VLD3qAsm_16:
   6453   case ARM::VLD3qAsm_32: {
   6454     MCInst TmpInst;
   6455     unsigned Spacing;
   6456     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6457     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6458     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6459                                             Spacing));
   6460     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6461                                             Spacing * 2));
   6462     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6463     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6464     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6465     TmpInst.addOperand(Inst.getOperand(4));
   6466     Inst = TmpInst;
   6467     return true;
   6468   }
   6469 
   6470   case ARM::VLD3dWB_fixed_Asm_8:
   6471   case ARM::VLD3dWB_fixed_Asm_16:
   6472   case ARM::VLD3dWB_fixed_Asm_32:
   6473   case ARM::VLD3qWB_fixed_Asm_8:
   6474   case ARM::VLD3qWB_fixed_Asm_16:
   6475   case ARM::VLD3qWB_fixed_Asm_32: {
   6476     MCInst TmpInst;
   6477     unsigned Spacing;
   6478     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6479     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6480     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6481                                             Spacing));
   6482     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6483                                             Spacing * 2));
   6484     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6485     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6486     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6487     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6488     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6489     TmpInst.addOperand(Inst.getOperand(4));
   6490     Inst = TmpInst;
   6491     return true;
   6492   }
   6493 
   6494   case ARM::VLD3dWB_register_Asm_8:
   6495   case ARM::VLD3dWB_register_Asm_16:
   6496   case ARM::VLD3dWB_register_Asm_32:
   6497   case ARM::VLD3qWB_register_Asm_8:
   6498   case ARM::VLD3qWB_register_Asm_16:
   6499   case ARM::VLD3qWB_register_Asm_32: {
   6500     MCInst TmpInst;
   6501     unsigned Spacing;
   6502     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6503     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6504     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6505                                             Spacing));
   6506     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6507                                             Spacing * 2));
   6508     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6509     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6510     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6511     TmpInst.addOperand(Inst.getOperand(3)); // Rm
   6512     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6513     TmpInst.addOperand(Inst.getOperand(5));
   6514     Inst = TmpInst;
   6515     return true;
   6516   }
   6517 
   6518   // VLD4DUP single 3-element structure to all lanes instructions.
   6519   case ARM::VLD4DUPdAsm_8:
   6520   case ARM::VLD4DUPdAsm_16:
   6521   case ARM::VLD4DUPdAsm_32:
   6522   case ARM::VLD4DUPqAsm_8:
   6523   case ARM::VLD4DUPqAsm_16:
   6524   case ARM::VLD4DUPqAsm_32: {
   6525     MCInst TmpInst;
   6526     unsigned Spacing;
   6527     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6528     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6529     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6530                                             Spacing));
   6531     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6532                                             Spacing * 2));
   6533     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6534                                             Spacing * 3));
   6535     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6536     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6537     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6538     TmpInst.addOperand(Inst.getOperand(4));
   6539     Inst = TmpInst;
   6540     return true;
   6541   }
   6542 
   6543   case ARM::VLD4DUPdWB_fixed_Asm_8:
   6544   case ARM::VLD4DUPdWB_fixed_Asm_16:
   6545   case ARM::VLD4DUPdWB_fixed_Asm_32:
   6546   case ARM::VLD4DUPqWB_fixed_Asm_8:
   6547   case ARM::VLD4DUPqWB_fixed_Asm_16:
   6548   case ARM::VLD4DUPqWB_fixed_Asm_32: {
   6549     MCInst TmpInst;
   6550     unsigned Spacing;
   6551     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6552     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6553     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6554                                             Spacing));
   6555     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6556                                             Spacing * 2));
   6557     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6558                                             Spacing * 3));
   6559     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6560     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6561     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6562     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6563     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6564     TmpInst.addOperand(Inst.getOperand(4));
   6565     Inst = TmpInst;
   6566     return true;
   6567   }
   6568 
   6569   case ARM::VLD4DUPdWB_register_Asm_8:
   6570   case ARM::VLD4DUPdWB_register_Asm_16:
   6571   case ARM::VLD4DUPdWB_register_Asm_32:
   6572   case ARM::VLD4DUPqWB_register_Asm_8:
   6573   case ARM::VLD4DUPqWB_register_Asm_16:
   6574   case ARM::VLD4DUPqWB_register_Asm_32: {
   6575     MCInst TmpInst;
   6576     unsigned Spacing;
   6577     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6578     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6579     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6580                                             Spacing));
   6581     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6582                                             Spacing * 2));
   6583     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6584                                             Spacing * 3));
   6585     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6586     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6587     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6588     TmpInst.addOperand(Inst.getOperand(3)); // Rm
   6589     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6590     TmpInst.addOperand(Inst.getOperand(5));
   6591     Inst = TmpInst;
   6592     return true;
   6593   }
   6594 
   6595   // VLD4 multiple 4-element structure instructions.
   6596   case ARM::VLD4dAsm_8:
   6597   case ARM::VLD4dAsm_16:
   6598   case ARM::VLD4dAsm_32:
   6599   case ARM::VLD4qAsm_8:
   6600   case ARM::VLD4qAsm_16:
   6601   case ARM::VLD4qAsm_32: {
   6602     MCInst TmpInst;
   6603     unsigned Spacing;
   6604     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6605     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6606     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6607                                             Spacing));
   6608     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6609                                             Spacing * 2));
   6610     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6611                                             Spacing * 3));
   6612     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6613     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6614     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6615     TmpInst.addOperand(Inst.getOperand(4));
   6616     Inst = TmpInst;
   6617     return true;
   6618   }
   6619 
   6620   case ARM::VLD4dWB_fixed_Asm_8:
   6621   case ARM::VLD4dWB_fixed_Asm_16:
   6622   case ARM::VLD4dWB_fixed_Asm_32:
   6623   case ARM::VLD4qWB_fixed_Asm_8:
   6624   case ARM::VLD4qWB_fixed_Asm_16:
   6625   case ARM::VLD4qWB_fixed_Asm_32: {
   6626     MCInst TmpInst;
   6627     unsigned Spacing;
   6628     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6629     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6630     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6631                                             Spacing));
   6632     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6633                                             Spacing * 2));
   6634     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6635                                             Spacing * 3));
   6636     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6637     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6638     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6639     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6640     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6641     TmpInst.addOperand(Inst.getOperand(4));
   6642     Inst = TmpInst;
   6643     return true;
   6644   }
   6645 
   6646   case ARM::VLD4dWB_register_Asm_8:
   6647   case ARM::VLD4dWB_register_Asm_16:
   6648   case ARM::VLD4dWB_register_Asm_32:
   6649   case ARM::VLD4qWB_register_Asm_8:
   6650   case ARM::VLD4qWB_register_Asm_16:
   6651   case ARM::VLD4qWB_register_Asm_32: {
   6652     MCInst TmpInst;
   6653     unsigned Spacing;
   6654     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
   6655     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6656     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6657                                             Spacing));
   6658     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6659                                             Spacing * 2));
   6660     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6661                                             Spacing * 3));
   6662     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6663     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6664     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6665     TmpInst.addOperand(Inst.getOperand(3)); // Rm
   6666     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6667     TmpInst.addOperand(Inst.getOperand(5));
   6668     Inst = TmpInst;
   6669     return true;
   6670   }
   6671 
   6672   // VST3 multiple 3-element structure instructions.
   6673   case ARM::VST3dAsm_8:
   6674   case ARM::VST3dAsm_16:
   6675   case ARM::VST3dAsm_32:
   6676   case ARM::VST3qAsm_8:
   6677   case ARM::VST3qAsm_16:
   6678   case ARM::VST3qAsm_32: {
   6679     MCInst TmpInst;
   6680     unsigned Spacing;
   6681     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6682     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6683     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6684     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6685     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6686                                             Spacing));
   6687     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6688                                             Spacing * 2));
   6689     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6690     TmpInst.addOperand(Inst.getOperand(4));
   6691     Inst = TmpInst;
   6692     return true;
   6693   }
   6694 
   6695   case ARM::VST3dWB_fixed_Asm_8:
   6696   case ARM::VST3dWB_fixed_Asm_16:
   6697   case ARM::VST3dWB_fixed_Asm_32:
   6698   case ARM::VST3qWB_fixed_Asm_8:
   6699   case ARM::VST3qWB_fixed_Asm_16:
   6700   case ARM::VST3qWB_fixed_Asm_32: {
   6701     MCInst TmpInst;
   6702     unsigned Spacing;
   6703     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6704     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6705     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6706     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6707     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6708     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6709     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6710                                             Spacing));
   6711     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6712                                             Spacing * 2));
   6713     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6714     TmpInst.addOperand(Inst.getOperand(4));
   6715     Inst = TmpInst;
   6716     return true;
   6717   }
   6718 
   6719   case ARM::VST3dWB_register_Asm_8:
   6720   case ARM::VST3dWB_register_Asm_16:
   6721   case ARM::VST3dWB_register_Asm_32:
   6722   case ARM::VST3qWB_register_Asm_8:
   6723   case ARM::VST3qWB_register_Asm_16:
   6724   case ARM::VST3qWB_register_Asm_32: {
   6725     MCInst TmpInst;
   6726     unsigned Spacing;
   6727     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6728     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6729     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6730     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6731     TmpInst.addOperand(Inst.getOperand(3)); // Rm
   6732     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6733     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6734                                             Spacing));
   6735     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6736                                             Spacing * 2));
   6737     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6738     TmpInst.addOperand(Inst.getOperand(5));
   6739     Inst = TmpInst;
   6740     return true;
   6741   }
   6742 
   6743   // VST4 multiple 3-element structure instructions.
   6744   case ARM::VST4dAsm_8:
   6745   case ARM::VST4dAsm_16:
   6746   case ARM::VST4dAsm_32:
   6747   case ARM::VST4qAsm_8:
   6748   case ARM::VST4qAsm_16:
   6749   case ARM::VST4qAsm_32: {
   6750     MCInst TmpInst;
   6751     unsigned Spacing;
   6752     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6753     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6754     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6755     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6756     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6757                                             Spacing));
   6758     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6759                                             Spacing * 2));
   6760     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6761                                             Spacing * 3));
   6762     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6763     TmpInst.addOperand(Inst.getOperand(4));
   6764     Inst = TmpInst;
   6765     return true;
   6766   }
   6767 
   6768   case ARM::VST4dWB_fixed_Asm_8:
   6769   case ARM::VST4dWB_fixed_Asm_16:
   6770   case ARM::VST4dWB_fixed_Asm_32:
   6771   case ARM::VST4qWB_fixed_Asm_8:
   6772   case ARM::VST4qWB_fixed_Asm_16:
   6773   case ARM::VST4qWB_fixed_Asm_32: {
   6774     MCInst TmpInst;
   6775     unsigned Spacing;
   6776     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6777     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6778     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6779     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6780     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
   6781     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6782     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6783                                             Spacing));
   6784     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6785                                             Spacing * 2));
   6786     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6787                                             Spacing * 3));
   6788     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6789     TmpInst.addOperand(Inst.getOperand(4));
   6790     Inst = TmpInst;
   6791     return true;
   6792   }
   6793 
   6794   case ARM::VST4dWB_register_Asm_8:
   6795   case ARM::VST4dWB_register_Asm_16:
   6796   case ARM::VST4dWB_register_Asm_32:
   6797   case ARM::VST4qWB_register_Asm_8:
   6798   case ARM::VST4qWB_register_Asm_16:
   6799   case ARM::VST4qWB_register_Asm_32: {
   6800     MCInst TmpInst;
   6801     unsigned Spacing;
   6802     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
   6803     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6804     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
   6805     TmpInst.addOperand(Inst.getOperand(2)); // alignment
   6806     TmpInst.addOperand(Inst.getOperand(3)); // Rm
   6807     TmpInst.addOperand(Inst.getOperand(0)); // Vd
   6808     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6809                                             Spacing));
   6810     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6811                                             Spacing * 2));
   6812     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
   6813                                             Spacing * 3));
   6814     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6815     TmpInst.addOperand(Inst.getOperand(5));
   6816     Inst = TmpInst;
   6817     return true;
   6818   }
   6819 
   6820   // Handle encoding choice for the shift-immediate instructions.
   6821   case ARM::t2LSLri:
   6822   case ARM::t2LSRri:
   6823   case ARM::t2ASRri: {
   6824     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
   6825         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
   6826         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
   6827         !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
   6828          static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
   6829       unsigned NewOpc;
   6830       switch (Inst.getOpcode()) {
   6831       default: llvm_unreachable("unexpected opcode");
   6832       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
   6833       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
   6834       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
   6835       }
   6836       // The Thumb1 operands aren't in the same order. Awesome, eh?
   6837       MCInst TmpInst;
   6838       TmpInst.setOpcode(NewOpc);
   6839       TmpInst.addOperand(Inst.getOperand(0));
   6840       TmpInst.addOperand(Inst.getOperand(5));
   6841       TmpInst.addOperand(Inst.getOperand(1));
   6842       TmpInst.addOperand(Inst.getOperand(2));
   6843       TmpInst.addOperand(Inst.getOperand(3));
   6844       TmpInst.addOperand(Inst.getOperand(4));
   6845       Inst = TmpInst;
   6846       return true;
   6847     }
   6848     return false;
   6849   }
   6850 
   6851   // Handle the Thumb2 mode MOV complex aliases.
   6852   case ARM::t2MOVsr:
   6853   case ARM::t2MOVSsr: {
   6854     // Which instruction to expand to depends on the CCOut operand and
   6855     // whether we're in an IT block if the register operands are low
   6856     // registers.
   6857     bool isNarrow = false;
   6858     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
   6859         isARMLowRegister(Inst.getOperand(1).getReg()) &&
   6860         isARMLowRegister(Inst.getOperand(2).getReg()) &&
   6861         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
   6862         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
   6863       isNarrow = true;
   6864     MCInst TmpInst;
   6865     unsigned newOpc;
   6866     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
   6867     default: llvm_unreachable("unexpected opcode!");
   6868     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
   6869     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
   6870     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
   6871     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
   6872     }
   6873     TmpInst.setOpcode(newOpc);
   6874     TmpInst.addOperand(Inst.getOperand(0)); // Rd
   6875     if (isNarrow)
   6876       TmpInst.addOperand(MCOperand::CreateReg(
   6877           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
   6878     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6879     TmpInst.addOperand(Inst.getOperand(2)); // Rm
   6880     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
   6881     TmpInst.addOperand(Inst.getOperand(5));
   6882     if (!isNarrow)
   6883       TmpInst.addOperand(MCOperand::CreateReg(
   6884           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
   6885     Inst = TmpInst;
   6886     return true;
   6887   }
   6888   case ARM::t2MOVsi:
   6889   case ARM::t2MOVSsi: {
   6890     // Which instruction to expand to depends on the CCOut operand and
   6891     // whether we're in an IT block if the register operands are low
   6892     // registers.
   6893     bool isNarrow = false;
   6894     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
   6895         isARMLowRegister(Inst.getOperand(1).getReg()) &&
   6896         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
   6897       isNarrow = true;
   6898     MCInst TmpInst;
   6899     unsigned newOpc;
   6900     switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
   6901     default: llvm_unreachable("unexpected opcode!");
   6902     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
   6903     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
   6904     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
   6905     case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
   6906     case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
   6907     }
   6908     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
   6909     if (Amount == 32) Amount = 0;
   6910     TmpInst.setOpcode(newOpc);
   6911     TmpInst.addOperand(Inst.getOperand(0)); // Rd
   6912     if (isNarrow)
   6913       TmpInst.addOperand(MCOperand::CreateReg(
   6914           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
   6915     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6916     if (newOpc != ARM::t2RRX)
   6917       TmpInst.addOperand(MCOperand::CreateImm(Amount));
   6918     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6919     TmpInst.addOperand(Inst.getOperand(4));
   6920     if (!isNarrow)
   6921       TmpInst.addOperand(MCOperand::CreateReg(
   6922           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
   6923     Inst = TmpInst;
   6924     return true;
   6925   }
   6926   // Handle the ARM mode MOV complex aliases.
   6927   case ARM::ASRr:
   6928   case ARM::LSRr:
   6929   case ARM::LSLr:
   6930   case ARM::RORr: {
   6931     ARM_AM::ShiftOpc ShiftTy;
   6932     switch(Inst.getOpcode()) {
   6933     default: llvm_unreachable("unexpected opcode!");
   6934     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
   6935     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
   6936     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
   6937     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
   6938     }
   6939     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
   6940     MCInst TmpInst;
   6941     TmpInst.setOpcode(ARM::MOVsr);
   6942     TmpInst.addOperand(Inst.getOperand(0)); // Rd
   6943     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6944     TmpInst.addOperand(Inst.getOperand(2)); // Rm
   6945     TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
   6946     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6947     TmpInst.addOperand(Inst.getOperand(4));
   6948     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
   6949     Inst = TmpInst;
   6950     return true;
   6951   }
   6952   case ARM::ASRi:
   6953   case ARM::LSRi:
   6954   case ARM::LSLi:
   6955   case ARM::RORi: {
   6956     ARM_AM::ShiftOpc ShiftTy;
   6957     switch(Inst.getOpcode()) {
   6958     default: llvm_unreachable("unexpected opcode!");
   6959     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
   6960     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
   6961     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
   6962     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
   6963     }
   6964     // A shift by zero is a plain MOVr, not a MOVsi.
   6965     unsigned Amt = Inst.getOperand(2).getImm();
   6966     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
   6967     // A shift by 32 should be encoded as 0 when permitted
   6968     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
   6969       Amt = 0;
   6970     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
   6971     MCInst TmpInst;
   6972     TmpInst.setOpcode(Opc);
   6973     TmpInst.addOperand(Inst.getOperand(0)); // Rd
   6974     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6975     if (Opc == ARM::MOVsi)
   6976       TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
   6977     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
   6978     TmpInst.addOperand(Inst.getOperand(4));
   6979     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
   6980     Inst = TmpInst;
   6981     return true;
   6982   }
   6983   case ARM::RRXi: {
   6984     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
   6985     MCInst TmpInst;
   6986     TmpInst.setOpcode(ARM::MOVsi);
   6987     TmpInst.addOperand(Inst.getOperand(0)); // Rd
   6988     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   6989     TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
   6990     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
   6991     TmpInst.addOperand(Inst.getOperand(3));
   6992     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
   6993     Inst = TmpInst;
   6994     return true;
   6995   }
   6996   case ARM::t2LDMIA_UPD: {
   6997     // If this is a load of a single register, then we should use
   6998     // a post-indexed LDR instruction instead, per the ARM ARM.
   6999     if (Inst.getNumOperands() != 5)
   7000       return false;
   7001     MCInst TmpInst;
   7002     TmpInst.setOpcode(ARM::t2LDR_POST);
   7003     TmpInst.addOperand(Inst.getOperand(4)); // Rt
   7004     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
   7005     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   7006     TmpInst.addOperand(MCOperand::CreateImm(4));
   7007     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
   7008     TmpInst.addOperand(Inst.getOperand(3));
   7009     Inst = TmpInst;
   7010     return true;
   7011   }
   7012   case ARM::t2STMDB_UPD: {
   7013     // If this is a store of a single register, then we should use
   7014     // a pre-indexed STR instruction instead, per the ARM ARM.
   7015     if (Inst.getNumOperands() != 5)
   7016       return false;
   7017     MCInst TmpInst;
   7018     TmpInst.setOpcode(ARM::t2STR_PRE);
   7019     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
   7020     TmpInst.addOperand(Inst.getOperand(4)); // Rt
   7021     TmpInst.addOperand(Inst.getOperand(1)); // Rn
   7022     TmpInst.addOperand(MCOperand::CreateImm(-4));
   7023     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
   7024     TmpInst.addOperand(Inst.getOperand(3));
   7025     Inst = TmpInst;
   7026     return true;
   7027   }
   7028   case ARM::LDMIA_UPD:
   7029     // If this is a load of a single register via a 'pop', then we should use
   7030     // a post-indexed LDR instruction instead, per the ARM ARM.
   7031     if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
   7032         Inst.getNumOperands() == 5) {
   7033       MCInst TmpInst;
   7034       TmpInst.setOpcode(ARM::LDR_POST_IMM);
   7035       TmpInst.addOperand(Inst.getOperand(4)); // Rt
   7036       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
   7037       TmpInst.addOperand(Inst.getOperand(1)); // Rn
   7038       TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
   7039       TmpInst.addOperand(MCOperand::CreateImm(4));
   7040       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
   7041       TmpInst.addOperand(Inst.getOperand(3));
   7042       Inst = TmpInst;
   7043       return true;
   7044     }
   7045     break;
   7046   case ARM::STMDB_UPD:
   7047     // If this is a store of a single register via a 'push', then we should use
   7048     // a pre-indexed STR instruction instead, per the ARM ARM.
   7049     if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
   7050         Inst.getNumOperands() == 5) {
   7051       MCInst TmpInst;
   7052       TmpInst.setOpcode(ARM::STR_PRE_IMM);
   7053       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
   7054       TmpInst.addOperand(Inst.getOperand(4)); // Rt
   7055       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
   7056       TmpInst.addOperand(MCOperand::CreateImm(-4));
   7057       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
   7058       TmpInst.addOperand(Inst.getOperand(3));
   7059       Inst = TmpInst;
   7060     }
   7061     break;
   7062   case ARM::t2ADDri12:
   7063     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
   7064     // mnemonic was used (not "addw"), encoding T3 is preferred.
   7065     if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
   7066         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
   7067       break;
   7068     Inst.setOpcode(ARM::t2ADDri);
   7069     Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
   7070     break;
   7071   case ARM::t2SUBri12:
   7072     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
   7073     // mnemonic was used (not "subw"), encoding T3 is preferred.
   7074     if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
   7075         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
   7076       break;
   7077     Inst.setOpcode(ARM::t2SUBri);
   7078     Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
   7079     break;
   7080   case ARM::tADDi8:
   7081     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
   7082     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
   7083     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
   7084     // to encoding T1 if <Rd> is omitted."
   7085     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
   7086       Inst.setOpcode(ARM::tADDi3);
   7087       return true;
   7088     }
   7089     break;
   7090   case ARM::tSUBi8:
   7091     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
   7092     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
   7093     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
   7094     // to encoding T1 if <Rd> is omitted."
   7095     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
   7096       Inst.setOpcode(ARM::tSUBi3);
   7097       return true;
   7098     }
   7099     break;
   7100   case ARM::t2ADDri:
   7101   case ARM::t2SUBri: {
   7102     // If the destination and first source operand are the same, and
   7103     // the flags are compatible with the current IT status, use encoding T2
   7104     // instead of T3. For compatibility with the system 'as'. Make sure the
   7105     // wide encoding wasn't explicit.
   7106     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
   7107         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
   7108         (unsigned)Inst.getOperand(2).getImm() > 255 ||
   7109         ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
   7110         (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
   7111         (static_cast<ARMOperand*>(Operands[3])->isToken() &&
   7112          static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
   7113       break;
   7114     MCInst TmpInst;
   7115     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
   7116                       ARM::tADDi8 : ARM::tSUBi8);
   7117     TmpInst.addOperand(Inst.getOperand(0));
   7118     TmpInst.addOperand(Inst.getOperand(5));
   7119     TmpInst.addOperand(Inst.getOperand(0));
   7120     TmpInst.addOperand(Inst.getOperand(2));
   7121     TmpInst.addOperand(Inst.getOperand(3));
   7122     TmpInst.addOperand(Inst.getOperand(4));
   7123     Inst = TmpInst;
   7124     return true;
   7125   }
   7126   case ARM::t2ADDrr: {
   7127     // If the destination and first source operand are the same, and
   7128     // there's no setting of the flags, use encoding T2 instead of T3.
   7129     // Note that this is only for ADD, not SUB. This mirrors the system
   7130     // 'as' behaviour. Make sure the wide encoding wasn't explicit.
   7131     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
   7132         Inst.getOperand(5).getReg() != 0 ||
   7133         (static_cast<ARMOperand*>(Operands[3])->isToken() &&
   7134          static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
   7135       break;
   7136     MCInst TmpInst;
   7137     TmpInst.setOpcode(ARM::tADDhirr);
   7138     TmpInst.addOperand(Inst.getOperand(0));
   7139     TmpInst.addOperand(Inst.getOperand(0));
   7140     TmpInst.addOperand(Inst.getOperand(2));
   7141     TmpInst.addOperand(Inst.getOperand(3));
   7142     TmpInst.addOperand(Inst.getOperand(4));
   7143     Inst = TmpInst;
   7144     return true;
   7145   }
   7146   case ARM::tADDrSP: {
   7147     // If the non-SP source operand and the destination operand are not the
   7148     // same, we need to use the 32-bit encoding if it's available.
   7149     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
   7150       Inst.setOpcode(ARM::t2ADDrr);
   7151       Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
   7152       return true;
   7153     }
   7154     break;
   7155   }
   7156   case ARM::tB:
   7157     // A Thumb conditional branch outside of an IT block is a tBcc.
   7158     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
   7159       Inst.setOpcode(ARM::tBcc);
   7160       return true;
   7161     }
   7162     break;
   7163   case ARM::t2B:
   7164     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
   7165     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
   7166       Inst.setOpcode(ARM::t2Bcc);
   7167       return true;
   7168     }
   7169     break;
   7170   case ARM::t2Bcc:
   7171     // If the conditional is AL or we're in an IT block, we really want t2B.
   7172     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
   7173       Inst.setOpcode(ARM::t2B);
   7174       return true;
   7175     }
   7176     break;
   7177   case ARM::tBcc:
   7178     // If the conditional is AL, we really want tB.
   7179     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
   7180       Inst.setOpcode(ARM::tB);
   7181       return true;
   7182     }
   7183     break;
   7184   case ARM::tLDMIA: {
   7185     // If the register list contains any high registers, or if the writeback
   7186     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
   7187     // instead if we're in Thumb2. Otherwise, this should have generated
   7188     // an error in validateInstruction().
   7189     unsigned Rn = Inst.getOperand(0).getReg();
   7190     bool hasWritebackToken =
   7191       (static_cast<ARMOperand*>(Operands[3])->isToken() &&
   7192        static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
   7193     bool listContainsBase;
   7194     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
   7195         (!listContainsBase && !hasWritebackToken) ||
   7196         (listContainsBase && hasWritebackToken)) {
   7197       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
   7198       assert (isThumbTwo());
   7199       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
   7200       // If we're switching to the updating version, we need to insert
   7201       // the writeback tied operand.
   7202       if (hasWritebackToken)
   7203         Inst.insert(Inst.begin(),
   7204                     MCOperand::CreateReg(Inst.getOperand(0).getReg()));
   7205       return true;
   7206     }
   7207     break;
   7208   }
   7209   case ARM::tSTMIA_UPD: {
   7210     // If the register list contains any high registers, we need to use
   7211     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
   7212     // should have generated an error in validateInstruction().
   7213     unsigned Rn = Inst.getOperand(0).getReg();
   7214     bool listContainsBase;
   7215     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
   7216       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
   7217       assert (isThumbTwo());
   7218       Inst.setOpcode(ARM::t2STMIA_UPD);
   7219       return true;
   7220     }
   7221     break;
   7222   }
   7223   case ARM::tPOP: {
   7224     bool listContainsBase;
   7225     // If the register list contains any high registers, we need to use
   7226     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
   7227     // should have generated an error in validateInstruction().
   7228     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
   7229       return false;
   7230     assert (isThumbTwo());
   7231     Inst.setOpcode(ARM::t2LDMIA_UPD);
   7232     // Add the base register and writeback operands.
   7233     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
   7234     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
   7235     return true;
   7236   }
   7237   case ARM::tPUSH: {
   7238     bool listContainsBase;
   7239     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
   7240       return false;
   7241     assert (isThumbTwo());
   7242     Inst.setOpcode(ARM::t2STMDB_UPD);
   7243     // Add the base register and writeback operands.
   7244     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
   7245     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
   7246     return true;
   7247   }
   7248   case ARM::t2MOVi: {
   7249     // If we can use the 16-bit encoding and the user didn't explicitly
   7250     // request the 32-bit variant, transform it here.
   7251     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
   7252         (unsigned)Inst.getOperand(1).getImm() <= 255 &&
   7253         ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
   7254          Inst.getOperand(4).getReg() == ARM::CPSR) ||
   7255         (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
   7256         (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
   7257          static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
   7258       // The operands aren't in the same order for tMOVi8...
   7259       MCInst TmpInst;
   7260       TmpInst.setOpcode(ARM::tMOVi8);
   7261       TmpInst.addOperand(Inst.getOperand(0));
   7262       TmpInst.addOperand(Inst.getOperand(4));
   7263       TmpInst.addOperand(Inst.getOperand(1));
   7264       TmpInst.addOperand(Inst.getOperand(2));
   7265       TmpInst.addOperand(Inst.getOperand(3));
   7266       Inst = TmpInst;
   7267       return true;
   7268     }
   7269     break;
   7270   }
   7271   case ARM::t2MOVr: {
   7272     // If we can use the 16-bit encoding and the user didn't explicitly
   7273     // request the 32-bit variant, transform it here.
   7274     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
   7275         isARMLowRegister(Inst.getOperand(1).getReg()) &&
   7276         Inst.getOperand(2).getImm() == ARMCC::AL &&
   7277         Inst.getOperand(4).getReg() == ARM::CPSR &&
   7278         (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
   7279          static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
   7280       // The operands aren't the same for tMOV[S]r... (no cc_out)
   7281       MCInst TmpInst;
   7282       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
   7283       TmpInst.addOperand(Inst.getOperand(0));
   7284       TmpInst.addOperand(Inst.getOperand(1));
   7285       TmpInst.addOperand(Inst.getOperand(2));
   7286       TmpInst.addOperand(Inst.getOperand(3));
   7287       Inst = TmpInst;
   7288       return true;
   7289     }
   7290     break;
   7291   }
   7292   case ARM::t2SXTH:
   7293   case ARM::t2SXTB:
   7294   case ARM::t2UXTH:
   7295   case ARM::t2UXTB: {
   7296     // If we can use the 16-bit encoding and the user didn't explicitly
   7297     // request the 32-bit variant, transform it here.
   7298     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
   7299         isARMLowRegister(Inst.getOperand(1).getReg()) &&
   7300         Inst.getOperand(2).getImm() == 0 &&
   7301         (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
   7302          static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
   7303       unsigned NewOpc;
   7304       switch (Inst.getOpcode()) {
   7305       default: llvm_unreachable("Illegal opcode!");
   7306       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
   7307       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
   7308       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
   7309       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
   7310       }
   7311       // The operands aren't the same for thumb1 (no rotate operand).
   7312       MCInst TmpInst;
   7313       TmpInst.setOpcode(NewOpc);
   7314       TmpInst.addOperand(Inst.getOperand(0));
   7315       TmpInst.addOperand(Inst.getOperand(1));
   7316       TmpInst.addOperand(Inst.getOperand(3));
   7317       TmpInst.addOperand(Inst.getOperand(4));
   7318       Inst = TmpInst;
   7319       return true;
   7320     }
   7321     break;
   7322   }
   7323   case ARM::MOVsi: {
   7324     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
   7325     // rrx shifts and asr/lsr of #32 is encoded as 0
   7326     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
   7327       return false;
   7328     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
   7329       // Shifting by zero is accepted as a vanilla 'MOVr'
   7330       MCInst TmpInst;
   7331       TmpInst.setOpcode(ARM::MOVr);
   7332       TmpInst.addOperand(Inst.getOperand(0));
   7333       TmpInst.addOperand(Inst.getOperand(1));
   7334       TmpInst.addOperand(Inst.getOperand(3));
   7335       TmpInst.addOperand(Inst.getOperand(4));
   7336       TmpInst.addOperand(Inst.getOperand(5));
   7337       Inst = TmpInst;
   7338       return true;
   7339     }
   7340     return false;
   7341   }
   7342   case ARM::ANDrsi:
   7343   case ARM::ORRrsi:
   7344   case ARM::EORrsi:
   7345   case ARM::BICrsi:
   7346   case ARM::SUBrsi:
   7347   case ARM::ADDrsi: {
   7348     unsigned newOpc;
   7349     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
   7350     if (SOpc == ARM_AM::rrx) return false;
   7351     switch (Inst.getOpcode()) {
   7352     default: llvm_unreachable("unexpected opcode!");
   7353     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
   7354     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
   7355     case ARM::EORrsi: newOpc = ARM::EORrr; break;
   7356     case ARM::BICrsi: newOpc = ARM::BICrr; break;
   7357     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
   7358     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
   7359     }
   7360     // If the shift is by zero, use the non-shifted instruction definition.
   7361     // The exception is for right shifts, where 0 == 32
   7362     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
   7363         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
   7364       MCInst TmpInst;
   7365       TmpInst.setOpcode(newOpc);
   7366       TmpInst.addOperand(Inst.getOperand(0));
   7367       TmpInst.addOperand(Inst.getOperand(1));
   7368       TmpInst.addOperand(Inst.getOperand(2));
   7369       TmpInst.addOperand(Inst.getOperand(4));
   7370       TmpInst.addOperand(Inst.getOperand(5));
   7371       TmpInst.addOperand(Inst.getOperand(6));
   7372       Inst = TmpInst;
   7373       return true;
   7374     }
   7375     return false;
   7376   }
   7377   case ARM::ITasm:
   7378   case ARM::t2IT: {
   7379     // The mask bits for all but the first condition are represented as
   7380     // the low bit of the condition code value implies 't'. We currently
   7381     // always have 1 implies 't', so XOR toggle the bits if the low bit
   7382     // of the condition code is zero.
   7383     MCOperand &MO = Inst.getOperand(1);
   7384     unsigned Mask = MO.getImm();
   7385     unsigned OrigMask = Mask;
   7386     unsigned TZ = CountTrailingZeros_32(Mask);
   7387     if ((Inst.getOperand(0).getImm() & 1) == 0) {
   7388       assert(Mask && TZ <= 3 && "illegal IT mask value!");
   7389       for (unsigned i = 3; i != TZ; --i)
   7390         Mask ^= 1 << i;
   7391     }
   7392     MO.setImm(Mask);
   7393 
   7394     // Set up the IT block state according to the IT instruction we just
   7395     // matched.
   7396     assert(!inITBlock() && "nested IT blocks?!");
   7397     ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
   7398     ITState.Mask = OrigMask; // Use the original mask, not the updated one.
   7399     ITState.CurPosition = 0;
   7400     ITState.FirstCond = true;
   7401     break;
   7402   }
   7403   case ARM::t2LSLrr:
   7404   case ARM::t2LSRrr:
   7405   case ARM::t2ASRrr:
   7406   case ARM::t2SBCrr:
   7407   case ARM::t2RORrr:
   7408   case ARM::t2BICrr:
   7409   {
   7410     // Assemblers should use the narrow encodings of these instructions when permissible.
   7411     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
   7412          isARMLowRegister(Inst.getOperand(2).getReg())) &&
   7413         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
   7414         ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
   7415          (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
   7416         (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
   7417          !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
   7418       unsigned NewOpc;
   7419       switch (Inst.getOpcode()) {
   7420         default: llvm_unreachable("unexpected opcode");
   7421         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
   7422         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
   7423         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
   7424         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
   7425         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
   7426         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
   7427       }
   7428       MCInst TmpInst;
   7429       TmpInst.setOpcode(NewOpc);
   7430       TmpInst.addOperand(Inst.getOperand(0));
   7431       TmpInst.addOperand(Inst.getOperand(5));
   7432       TmpInst.addOperand(Inst.getOperand(1));
   7433       TmpInst.addOperand(Inst.getOperand(2));
   7434       TmpInst.addOperand(Inst.getOperand(3));
   7435       TmpInst.addOperand(Inst.getOperand(4));
   7436       Inst = TmpInst;
   7437       return true;
   7438     }
   7439     return false;
   7440   }
   7441   case ARM::t2ANDrr:
   7442   case ARM::t2EORrr:
   7443   case ARM::t2ADCrr:
   7444   case ARM::t2ORRrr:
   7445   {
   7446     // Assemblers should use the narrow encodings of these instructions when permissible.
   7447     // These instructions are special in that they are commutable, so shorter encodings
   7448     // are available more often.
   7449     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
   7450          isARMLowRegister(Inst.getOperand(2).getReg())) &&
   7451         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
   7452          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
   7453         ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
   7454          (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
   7455         (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
   7456          !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
   7457       unsigned NewOpc;
   7458       switch (Inst.getOpcode()) {
   7459         default: llvm_unreachable("unexpected opcode");
   7460         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
   7461         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
   7462         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
   7463         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
   7464       }
   7465       MCInst TmpInst;
   7466       TmpInst.setOpcode(NewOpc);
   7467       TmpInst.addOperand(Inst.getOperand(0));
   7468       TmpInst.addOperand(Inst.getOperand(5));
   7469       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
   7470         TmpInst.addOperand(Inst.getOperand(1));
   7471         TmpInst.addOperand(Inst.getOperand(2));
   7472       } else {
   7473         TmpInst.addOperand(Inst.getOperand(2));
   7474         TmpInst.addOperand(Inst.getOperand(1));
   7475       }
   7476       TmpInst.addOperand(Inst.getOperand(3));
   7477       TmpInst.addOperand(Inst.getOperand(4));
   7478       Inst = TmpInst;
   7479       return true;
   7480     }
   7481     return false;
   7482   }
   7483   }
   7484   return false;
   7485 }
   7486 
   7487 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
   7488   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
   7489   // suffix depending on whether they're in an IT block or not.
   7490   unsigned Opc = Inst.getOpcode();
   7491   const MCInstrDesc &MCID = getInstDesc(Opc);
   7492   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
   7493     assert(MCID.hasOptionalDef() &&
   7494            "optionally flag setting instruction missing optional def operand");
   7495     assert(MCID.NumOperands == Inst.getNumOperands() &&
   7496            "operand count mismatch!");
   7497     // Find the optional-def operand (cc_out).
   7498     unsigned OpNo;
   7499     for (OpNo = 0;
   7500          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
   7501          ++OpNo)
   7502       ;
   7503     // If we're parsing Thumb1, reject it completely.
   7504     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
   7505       return Match_MnemonicFail;
   7506     // If we're parsing Thumb2, which form is legal depends on whether we're
   7507     // in an IT block.
   7508     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
   7509         !inITBlock())
   7510       return Match_RequiresITBlock;
   7511     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
   7512         inITBlock())
   7513       return Match_RequiresNotITBlock;
   7514   }
   7515   // Some high-register supporting Thumb1 encodings only allow both registers
   7516   // to be from r0-r7 when in Thumb2.
   7517   else if (Opc == ARM::tADDhirr && isThumbOne() &&
   7518            isARMLowRegister(Inst.getOperand(1).getReg()) &&
   7519            isARMLowRegister(Inst.getOperand(2).getReg()))
   7520     return Match_RequiresThumb2;
   7521   // Others only require ARMv6 or later.
   7522   else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
   7523            isARMLowRegister(Inst.getOperand(0).getReg()) &&
   7524            isARMLowRegister(Inst.getOperand(1).getReg()))
   7525     return Match_RequiresV6;
   7526   return Match_Success;
   7527 }
   7528 
   7529 static const char *getSubtargetFeatureName(unsigned Val);
   7530 bool ARMAsmParser::
   7531 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
   7532                         SmallVectorImpl<MCParsedAsmOperand*> &Operands,
   7533                         MCStreamer &Out, unsigned &ErrorInfo,
   7534                         bool MatchingInlineAsm) {
   7535   MCInst Inst;
   7536   unsigned MatchResult;
   7537 
   7538   MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
   7539                                      MatchingInlineAsm);
   7540   switch (MatchResult) {
   7541   default: break;
   7542   case Match_Success:
   7543     // Context sensitive operand constraints aren't handled by the matcher,
   7544     // so check them here.
   7545     if (validateInstruction(Inst, Operands)) {
   7546       // Still progress the IT block, otherwise one wrong condition causes
   7547       // nasty cascading errors.
   7548       forwardITPosition();
   7549       return true;
   7550     }
   7551 
   7552     // Some instructions need post-processing to, for example, tweak which
   7553     // encoding is selected. Loop on it while changes happen so the
   7554     // individual transformations can chain off each other. E.g.,
   7555     // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
   7556     while (processInstruction(Inst, Operands))
   7557       ;
   7558 
   7559     // Only move forward at the very end so that everything in validate
   7560     // and process gets a consistent answer about whether we're in an IT
   7561     // block.
   7562     forwardITPosition();
   7563 
   7564     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
   7565     // doesn't actually encode.
   7566     if (Inst.getOpcode() == ARM::ITasm)
   7567       return false;
   7568 
   7569     Inst.setLoc(IDLoc);
   7570     Out.EmitInstruction(Inst);
   7571     return false;
   7572   case Match_MissingFeature: {
   7573     assert(ErrorInfo && "Unknown missing feature!");
   7574     // Special case the error message for the very common case where only
   7575     // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
   7576     std::string Msg = "instruction requires:";
   7577     unsigned Mask = 1;
   7578     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
   7579       if (ErrorInfo & Mask) {
   7580         Msg += " ";
   7581         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
   7582       }
   7583       Mask <<= 1;
   7584     }
   7585     return Error(IDLoc, Msg);
   7586   }
   7587   case Match_InvalidOperand: {
   7588     SMLoc ErrorLoc = IDLoc;
   7589     if (ErrorInfo != ~0U) {
   7590       if (ErrorInfo >= Operands.size())
   7591         return Error(IDLoc, "too few operands for instruction");
   7592 
   7593       ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
   7594       if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
   7595     }
   7596 
   7597     return Error(ErrorLoc, "invalid operand for instruction");
   7598   }
   7599   case Match_MnemonicFail:
   7600     return Error(IDLoc, "invalid instruction",
   7601                  ((ARMOperand*)Operands[0])->getLocRange());
   7602   case Match_RequiresNotITBlock:
   7603     return Error(IDLoc, "flag setting instruction only valid outside IT block");
   7604   case Match_RequiresITBlock:
   7605     return Error(IDLoc, "instruction only valid inside IT block");
   7606   case Match_RequiresV6:
   7607     return Error(IDLoc, "instruction variant requires ARMv6 or later");
   7608   case Match_RequiresThumb2:
   7609     return Error(IDLoc, "instruction variant requires Thumb2");
   7610   case Match_ImmRange0_15: {
   7611     SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
   7612     if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
   7613     return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
   7614   }
   7615   }
   7616 
   7617   llvm_unreachable("Implement any new match types added!");
   7618 }
   7619 
   7620 /// parseDirective parses the arm specific directives
   7621 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
   7622   StringRef IDVal = DirectiveID.getIdentifier();
   7623   if (IDVal == ".word")
   7624     return parseDirectiveWord(4, DirectiveID.getLoc());
   7625   else if (IDVal == ".thumb")
   7626     return parseDirectiveThumb(DirectiveID.getLoc());
   7627   else if (IDVal == ".arm")
   7628     return parseDirectiveARM(DirectiveID.getLoc());
   7629   else if (IDVal == ".thumb_func")
   7630     return parseDirectiveThumbFunc(DirectiveID.getLoc());
   7631   else if (IDVal == ".code")
   7632     return parseDirectiveCode(DirectiveID.getLoc());
   7633   else if (IDVal == ".syntax")
   7634     return parseDirectiveSyntax(DirectiveID.getLoc());
   7635   else if (IDVal == ".unreq")
   7636     return parseDirectiveUnreq(DirectiveID.getLoc());
   7637   else if (IDVal == ".arch")
   7638     return parseDirectiveArch(DirectiveID.getLoc());
   7639   else if (IDVal == ".eabi_attribute")
   7640     return parseDirectiveEabiAttr(DirectiveID.getLoc());
   7641   return true;
   7642 }
   7643 
   7644 /// parseDirectiveWord
   7645 ///  ::= .word [ expression (, expression)* ]
   7646 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
   7647   if (getLexer().isNot(AsmToken::EndOfStatement)) {
   7648     for (;;) {
   7649       const MCExpr *Value;
   7650       if (getParser().parseExpression(Value))
   7651         return true;
   7652 
   7653       getParser().getStreamer().EmitValue(Value, Size);
   7654 
   7655       if (getLexer().is(AsmToken::EndOfStatement))
   7656         break;
   7657 
   7658       // FIXME: Improve diagnostic.
   7659       if (getLexer().isNot(AsmToken::Comma))
   7660         return Error(L, "unexpected token in directive");
   7661       Parser.Lex();
   7662     }
   7663   }
   7664 
   7665   Parser.Lex();
   7666   return false;
   7667 }
   7668 
   7669 /// parseDirectiveThumb
   7670 ///  ::= .thumb
   7671 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
   7672   if (getLexer().isNot(AsmToken::EndOfStatement))
   7673     return Error(L, "unexpected token in directive");
   7674   Parser.Lex();
   7675 
   7676   if (!isThumb())
   7677     SwitchMode();
   7678   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
   7679   return false;
   7680 }
   7681 
   7682 /// parseDirectiveARM
   7683 ///  ::= .arm
   7684 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
   7685   if (getLexer().isNot(AsmToken::EndOfStatement))
   7686     return Error(L, "unexpected token in directive");
   7687   Parser.Lex();
   7688 
   7689   if (isThumb())
   7690     SwitchMode();
   7691   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
   7692   return false;
   7693 }
   7694 
   7695 /// parseDirectiveThumbFunc
   7696 ///  ::= .thumbfunc symbol_name
   7697 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
   7698   const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
   7699   bool isMachO = MAI.hasSubsectionsViaSymbols();
   7700   StringRef Name;
   7701   bool needFuncName = true;
   7702 
   7703   // Darwin asm has (optionally) function name after .thumb_func direction
   7704   // ELF doesn't
   7705   if (isMachO) {
   7706     const AsmToken &Tok = Parser.getTok();
   7707     if (Tok.isNot(AsmToken::EndOfStatement)) {
   7708       if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
   7709         return Error(L, "unexpected token in .thumb_func directive");
   7710       Name = Tok.getIdentifier();
   7711       Parser.Lex(); // Consume the identifier token.
   7712       needFuncName = false;
   7713     }
   7714   }
   7715 
   7716   if (getLexer().isNot(AsmToken::EndOfStatement))
   7717     return Error(L, "unexpected token in directive");
   7718 
   7719   // Eat the end of statement and any blank lines that follow.
   7720   while (getLexer().is(AsmToken::EndOfStatement))
   7721     Parser.Lex();
   7722 
   7723   // FIXME: assuming function name will be the line following .thumb_func
   7724   // We really should be checking the next symbol definition even if there's
   7725   // stuff in between.
   7726   if (needFuncName) {
   7727     Name = Parser.getTok().getIdentifier();
   7728   }
   7729 
   7730   // Mark symbol as a thumb symbol.
   7731   MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
   7732   getParser().getStreamer().EmitThumbFunc(Func);
   7733   return false;
   7734 }
   7735 
   7736 /// parseDirectiveSyntax
   7737 ///  ::= .syntax unified | divided
   7738 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
   7739   const AsmToken &Tok = Parser.getTok();
   7740   if (Tok.isNot(AsmToken::Identifier))
   7741     return Error(L, "unexpected token in .syntax directive");
   7742   StringRef Mode = Tok.getString();
   7743   if (Mode == "unified" || Mode == "UNIFIED")
   7744     Parser.Lex();
   7745   else if (Mode == "divided" || Mode == "DIVIDED")
   7746     return Error(L, "'.syntax divided' arm asssembly not supported");
   7747   else
   7748     return Error(L, "unrecognized syntax mode in .syntax directive");
   7749 
   7750   if (getLexer().isNot(AsmToken::EndOfStatement))
   7751     return Error(Parser.getTok().getLoc(), "unexpected token in directive");
   7752   Parser.Lex();
   7753 
   7754   // TODO tell the MC streamer the mode
   7755   // getParser().getStreamer().Emit???();
   7756   return false;
   7757 }
   7758 
   7759 /// parseDirectiveCode
   7760 ///  ::= .code 16 | 32
   7761 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
   7762   const AsmToken &Tok = Parser.getTok();
   7763   if (Tok.isNot(AsmToken::Integer))
   7764     return Error(L, "unexpected token in .code directive");
   7765   int64_t Val = Parser.getTok().getIntVal();
   7766   if (Val == 16)
   7767     Parser.Lex();
   7768   else if (Val == 32)
   7769     Parser.Lex();
   7770   else
   7771     return Error(L, "invalid operand to .code directive");
   7772 
   7773   if (getLexer().isNot(AsmToken::EndOfStatement))
   7774     return Error(Parser.getTok().getLoc(), "unexpected token in directive");
   7775   Parser.Lex();
   7776 
   7777   if (Val == 16) {
   7778     if (!isThumb())
   7779       SwitchMode();
   7780     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
   7781   } else {
   7782     if (isThumb())
   7783       SwitchMode();
   7784     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
   7785   }
   7786 
   7787   return false;
   7788 }
   7789 
   7790 /// parseDirectiveReq
   7791 ///  ::= name .req registername
   7792 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
   7793   Parser.Lex(); // Eat the '.req' token.
   7794   unsigned Reg;
   7795   SMLoc SRegLoc, ERegLoc;
   7796   if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
   7797     Parser.eatToEndOfStatement();
   7798     return Error(SRegLoc, "register name expected");
   7799   }
   7800 
   7801   // Shouldn't be anything else.
   7802   if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
   7803     Parser.eatToEndOfStatement();
   7804     return Error(Parser.getTok().getLoc(),
   7805                  "unexpected input in .req directive.");
   7806   }
   7807 
   7808   Parser.Lex(); // Consume the EndOfStatement
   7809 
   7810   if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
   7811     return Error(SRegLoc, "redefinition of '" + Name +
   7812                           "' does not match original.");
   7813 
   7814   return false;
   7815 }
   7816 
   7817 /// parseDirectiveUneq
   7818 ///  ::= .unreq registername
   7819 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
   7820   if (Parser.getTok().isNot(AsmToken::Identifier)) {
   7821     Parser.eatToEndOfStatement();
   7822     return Error(L, "unexpected input in .unreq directive.");
   7823   }
   7824   RegisterReqs.erase(Parser.getTok().getIdentifier());
   7825   Parser.Lex(); // Eat the identifier.
   7826   return false;
   7827 }
   7828 
   7829 /// parseDirectiveArch
   7830 ///  ::= .arch token
   7831 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
   7832   return true;
   7833 }
   7834 
   7835 /// parseDirectiveEabiAttr
   7836 ///  ::= .eabi_attribute int, int
   7837 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
   7838   return true;
   7839 }
   7840 
   7841 /// Force static initialization.
   7842 extern "C" void LLVMInitializeARMAsmParser() {
   7843   RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
   7844   RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
   7845 }
   7846 
   7847 #define GET_REGISTER_MATCHER
   7848 #define GET_SUBTARGET_FEATURE_NAME
   7849 #define GET_MATCHER_IMPLEMENTATION
   7850 #include "ARMGenAsmMatcher.inc"
   7851 
   7852 // Define this matcher function after the auto-generated include so we
   7853 // have the match class enum definitions.
   7854 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
   7855                                                   unsigned Kind) {
   7856   ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
   7857   // If the kind is a token for a literal immediate, check if our asm
   7858   // operand matches. This is for InstAliases which have a fixed-value
   7859   // immediate in the syntax.
   7860   if (Kind == MCK__35_0 && Op->isImm()) {
   7861     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
   7862     if (!CE)
   7863       return Match_InvalidOperand;
   7864     if (CE->getValue() == 0)
   7865       return Match_Success;
   7866   }
   7867   return Match_InvalidOperand;
   7868 }
   7869