Home | History | Annotate | Download | only in AArch64
      1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines an instruction selector for the AArch64 target.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #define DEBUG_TYPE "aarch64-isel"
     15 #include "AArch64.h"
     16 #include "AArch64InstrInfo.h"
     17 #include "AArch64Subtarget.h"
     18 #include "AArch64TargetMachine.h"
     19 #include "Utils/AArch64BaseInfo.h"
     20 #include "llvm/ADT/APSInt.h"
     21 #include "llvm/CodeGen/SelectionDAGISel.h"
     22 #include "llvm/IR/GlobalValue.h"
     23 #include "llvm/Support/Debug.h"
     24 #include "llvm/Support/raw_ostream.h"
     25 
     26 using namespace llvm;
     27 
     28 //===--------------------------------------------------------------------===//
     29 /// AArch64 specific code to select AArch64 machine instructions for
     30 /// SelectionDAG operations.
     31 ///
     32 namespace {
     33 
     34 class AArch64DAGToDAGISel : public SelectionDAGISel {
     35   AArch64TargetMachine &TM;
     36 
     37   /// Keep a pointer to the AArch64Subtarget around so that we can
     38   /// make the right decision when generating code for different targets.
     39   const AArch64Subtarget *Subtarget;
     40 
     41 public:
     42   explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
     43                                CodeGenOpt::Level OptLevel)
     44     : SelectionDAGISel(tm, OptLevel), TM(tm),
     45       Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
     46   }
     47 
     48   virtual const char *getPassName() const {
     49     return "AArch64 Instruction Selection";
     50   }
     51 
     52   // Include the pieces autogenerated from the target description.
     53 #include "AArch64GenDAGISel.inc"
     54 
     55   template<unsigned MemSize>
     56   bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
     57     const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
     58     if (!CN || CN->getZExtValue() % MemSize != 0
     59         || CN->getZExtValue() / MemSize > 0xfff)
     60       return false;
     61 
     62     UImm12 =  CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
     63     return true;
     64   }
     65 
     66   template<unsigned RegWidth>
     67   bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
     68     return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
     69   }
     70 
     71   /// Used for pre-lowered address-reference nodes, so we already know
     72   /// the fields match. This operand's job is simply to add an
     73   /// appropriate shift operand to the MOVZ/MOVK instruction.
     74   template<unsigned LogShift>
     75   bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
     76     Imm = N;
     77     Shift = CurDAG->getTargetConstant(LogShift, MVT::i32);
     78     return true;
     79   }
     80 
     81   bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
     82 
     83   bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
     84                                 unsigned RegWidth);
     85 
     86   bool SelectInlineAsmMemoryOperand(const SDValue &Op,
     87                                     char ConstraintCode,
     88                                     std::vector<SDValue> &OutOps);
     89 
     90   bool SelectLogicalImm(SDValue N, SDValue &Imm);
     91 
     92   template<unsigned RegWidth>
     93   bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
     94     return SelectTSTBOperand(N, FixedPos, RegWidth);
     95   }
     96 
     97   bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
     98 
     99   SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32,
    100                        unsigned Op64);
    101 
    102   /// Put the given constant into a pool and return a DAG which will give its
    103   /// address.
    104   SDValue getConstantPoolItemAddress(SDLoc DL, const Constant *CV);
    105 
    106   SDNode *TrySelectToMoveImm(SDNode *N);
    107   SDNode *LowerToFPLitPool(SDNode *Node);
    108   SDNode *SelectToLitPool(SDNode *N);
    109 
    110   SDNode* Select(SDNode*);
    111 private:
    112 };
    113 }
    114 
    115 bool
    116 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
    117                                               unsigned RegWidth) {
    118   const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
    119   if (!CN) return false;
    120 
    121   // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
    122   // is between 1 and 32 for a destination w-register, or 1 and 64 for an
    123   // x-register.
    124   //
    125   // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
    126   // want THIS_NODE to be 2^fbits. This is much easier to deal with using
    127   // integers.
    128   bool IsExact;
    129 
    130   // fbits is between 1 and 64 in the worst-case, which means the fmul
    131   // could have 2^64 as an actual operand. Need 65 bits of precision.
    132   APSInt IntVal(65, true);
    133   CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
    134 
    135   // N.b. isPowerOf2 also checks for > 0.
    136   if (!IsExact || !IntVal.isPowerOf2()) return false;
    137   unsigned FBits = IntVal.logBase2();
    138 
    139   // Checks above should have guaranteed that we haven't lost information in
    140   // finding FBits, but it must still be in range.
    141   if (FBits == 0 || FBits > RegWidth) return false;
    142 
    143   FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
    144   return true;
    145 }
    146 
    147 bool
    148 AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
    149                                                  char ConstraintCode,
    150                                                  std::vector<SDValue> &OutOps) {
    151   switch (ConstraintCode) {
    152   default: llvm_unreachable("Unrecognised AArch64 memory constraint");
    153   case 'm':
    154     // FIXME: more freedom is actually permitted for 'm'. We can go
    155     // hunting for a base and an offset if we want. Of course, since
    156     // we don't really know how the operand is going to be used we're
    157     // probably restricted to the load/store pair's simm7 as an offset
    158     // range anyway.
    159   case 'Q':
    160     OutOps.push_back(Op);
    161   }
    162 
    163   return false;
    164 }
    165 
    166 bool
    167 AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
    168   ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
    169   if (!Imm || !Imm->getValueAPF().isPosZero())
    170     return false;
    171 
    172   // Doesn't actually carry any information, but keeps TableGen quiet.
    173   Dummy = CurDAG->getTargetConstant(0, MVT::i32);
    174   return true;
    175 }
    176 
    177 bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
    178   uint32_t Bits;
    179   uint32_t RegWidth = N.getValueType().getSizeInBits();
    180 
    181   ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
    182   if (!CN) return false;
    183 
    184   if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
    185     return false;
    186 
    187   Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
    188   return true;
    189 }
    190 
    191 SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
    192   SDNode *ResNode;
    193   SDLoc dl(Node);
    194   EVT DestType = Node->getValueType(0);
    195   unsigned DestWidth = DestType.getSizeInBits();
    196 
    197   unsigned MOVOpcode;
    198   EVT MOVType;
    199   int UImm16, Shift;
    200   uint32_t LogicalBits;
    201 
    202   uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
    203   if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
    204     MOVType = DestType;
    205     MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
    206   } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
    207     MOVType = DestType;
    208     MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
    209   } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
    210     // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
    211     // use a 32-bit instruction: "movn w0, 0xedbc".
    212     MOVType = MVT::i32;
    213     MOVOpcode = AArch64::MOVNwii;
    214   } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits))  {
    215     MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
    216     uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
    217 
    218     return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
    219                               CurDAG->getRegister(ZR, DestType),
    220                               CurDAG->getTargetConstant(LogicalBits, MVT::i32));
    221   } else {
    222     // Can't handle it in one instruction. There's scope for permitting two (or
    223     // more) instructions, but that'll need more thought.
    224     return NULL;
    225   }
    226 
    227   ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
    228                                    CurDAG->getTargetConstant(UImm16, MVT::i32),
    229                                    CurDAG->getTargetConstant(Shift, MVT::i32));
    230 
    231   if (MOVType != DestType) {
    232     ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
    233                           MVT::i64, MVT::i32, MVT::Other,
    234                           CurDAG->getTargetConstant(0, MVT::i64),
    235                           SDValue(ResNode, 0),
    236                           CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
    237   }
    238 
    239   return ResNode;
    240 }
    241 
    242 SDValue
    243 AArch64DAGToDAGISel::getConstantPoolItemAddress(SDLoc DL,
    244                                                 const Constant *CV) {
    245   EVT PtrVT = getTargetLowering()->getPointerTy();
    246 
    247   switch (getTargetLowering()->getTargetMachine().getCodeModel()) {
    248   case CodeModel::Small: {
    249     unsigned Alignment =
    250       getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
    251     return CurDAG->getNode(
    252         AArch64ISD::WrapperSmall, DL, PtrVT,
    253         CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_NO_FLAG),
    254         CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_LO12),
    255         CurDAG->getConstant(Alignment, MVT::i32));
    256   }
    257   case CodeModel::Large: {
    258     SDNode *LitAddr;
    259     LitAddr = CurDAG->getMachineNode(
    260         AArch64::MOVZxii, DL, PtrVT,
    261         CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
    262         CurDAG->getTargetConstant(3, MVT::i32));
    263     LitAddr = CurDAG->getMachineNode(
    264         AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
    265         CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
    266         CurDAG->getTargetConstant(2, MVT::i32));
    267     LitAddr = CurDAG->getMachineNode(
    268         AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
    269         CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
    270         CurDAG->getTargetConstant(1, MVT::i32));
    271     LitAddr = CurDAG->getMachineNode(
    272         AArch64::MOVKxii, DL, PtrVT, SDValue(LitAddr, 0),
    273         CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC),
    274         CurDAG->getTargetConstant(0, MVT::i32));
    275     return SDValue(LitAddr, 0);
    276   }
    277   default:
    278     llvm_unreachable("Only small and large code models supported now");
    279   }
    280 }
    281 
    282 SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
    283   SDLoc DL(Node);
    284   uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
    285   int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
    286   EVT DestType = Node->getValueType(0);
    287 
    288   // Since we may end up loading a 64-bit constant from a 32-bit entry the
    289   // constant in the pool may have a different type to the eventual node.
    290   ISD::LoadExtType Extension;
    291   EVT MemType;
    292 
    293   assert((DestType == MVT::i64 || DestType == MVT::i32)
    294          && "Only expect integer constants at the moment");
    295 
    296   if (DestType == MVT::i32) {
    297     Extension = ISD::NON_EXTLOAD;
    298     MemType = MVT::i32;
    299   } else if (UnsignedVal <= UINT32_MAX) {
    300     Extension = ISD::ZEXTLOAD;
    301     MemType = MVT::i32;
    302   } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
    303     Extension = ISD::SEXTLOAD;
    304     MemType = MVT::i32;
    305   } else {
    306     Extension = ISD::NON_EXTLOAD;
    307     MemType = MVT::i64;
    308   }
    309 
    310   Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
    311                                                   MemType.getSizeInBits()),
    312                                   UnsignedVal);
    313   SDValue PoolAddr = getConstantPoolItemAddress(DL, CV);
    314   unsigned Alignment =
    315     getTargetLowering()->getDataLayout()->getABITypeAlignment(CV->getType());
    316 
    317   return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
    318                             PoolAddr,
    319                             MachinePointerInfo::getConstantPool(), MemType,
    320                             /* isVolatile = */ false,
    321                             /* isNonTemporal = */ false,
    322                             Alignment).getNode();
    323 }
    324 
    325 SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
    326   SDLoc DL(Node);
    327   const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
    328   EVT DestType = Node->getValueType(0);
    329 
    330   unsigned Alignment =
    331     getTargetLowering()->getDataLayout()->getABITypeAlignment(FV->getType());
    332   SDValue PoolAddr = getConstantPoolItemAddress(DL, FV);
    333 
    334   return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
    335                          MachinePointerInfo::getConstantPool(),
    336                          /* isVolatile = */ false,
    337                          /* isNonTemporal = */ false,
    338                          /* isInvariant = */ true,
    339                          Alignment).getNode();
    340 }
    341 
    342 bool
    343 AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
    344                                        unsigned RegWidth) {
    345   const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
    346   if (!CN) return false;
    347 
    348   uint64_t Val = CN->getZExtValue();
    349 
    350   if (!isPowerOf2_64(Val)) return false;
    351 
    352   unsigned TestedBit = Log2_64(Val);
    353   // Checks above should have guaranteed that we haven't lost information in
    354   // finding TestedBit, but it must still be in range.
    355   if (TestedBit >= RegWidth) return false;
    356 
    357   FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
    358   return true;
    359 }
    360 
    361 SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
    362                                           unsigned Op16,unsigned Op32,
    363                                           unsigned Op64) {
    364   // Mostly direct translation to the given operations, except that we preserve
    365   // the AtomicOrdering for use later on.
    366   AtomicSDNode *AN = cast<AtomicSDNode>(Node);
    367   EVT VT = AN->getMemoryVT();
    368 
    369   unsigned Op;
    370   if (VT == MVT::i8)
    371     Op = Op8;
    372   else if (VT == MVT::i16)
    373     Op = Op16;
    374   else if (VT == MVT::i32)
    375     Op = Op32;
    376   else if (VT == MVT::i64)
    377     Op = Op64;
    378   else
    379     llvm_unreachable("Unexpected atomic operation");
    380 
    381   SmallVector<SDValue, 4> Ops;
    382   for (unsigned i = 1; i < AN->getNumOperands(); ++i)
    383       Ops.push_back(AN->getOperand(i));
    384 
    385   Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
    386   Ops.push_back(AN->getOperand(0)); // Chain moves to the end
    387 
    388   return CurDAG->SelectNodeTo(Node, Op,
    389                               AN->getValueType(0), MVT::Other,
    390                               &Ops[0], Ops.size());
    391 }
    392 
    393 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
    394   // Dump information about the Node being selected
    395   DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
    396 
    397   if (Node->isMachineOpcode()) {
    398     DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
    399     return NULL;
    400   }
    401 
    402   switch (Node->getOpcode()) {
    403   case ISD::ATOMIC_LOAD_ADD:
    404     return SelectAtomic(Node,
    405                         AArch64::ATOMIC_LOAD_ADD_I8,
    406                         AArch64::ATOMIC_LOAD_ADD_I16,
    407                         AArch64::ATOMIC_LOAD_ADD_I32,
    408                         AArch64::ATOMIC_LOAD_ADD_I64);
    409   case ISD::ATOMIC_LOAD_SUB:
    410     return SelectAtomic(Node,
    411                         AArch64::ATOMIC_LOAD_SUB_I8,
    412                         AArch64::ATOMIC_LOAD_SUB_I16,
    413                         AArch64::ATOMIC_LOAD_SUB_I32,
    414                         AArch64::ATOMIC_LOAD_SUB_I64);
    415   case ISD::ATOMIC_LOAD_AND:
    416     return SelectAtomic(Node,
    417                         AArch64::ATOMIC_LOAD_AND_I8,
    418                         AArch64::ATOMIC_LOAD_AND_I16,
    419                         AArch64::ATOMIC_LOAD_AND_I32,
    420                         AArch64::ATOMIC_LOAD_AND_I64);
    421   case ISD::ATOMIC_LOAD_OR:
    422     return SelectAtomic(Node,
    423                         AArch64::ATOMIC_LOAD_OR_I8,
    424                         AArch64::ATOMIC_LOAD_OR_I16,
    425                         AArch64::ATOMIC_LOAD_OR_I32,
    426                         AArch64::ATOMIC_LOAD_OR_I64);
    427   case ISD::ATOMIC_LOAD_XOR:
    428     return SelectAtomic(Node,
    429                         AArch64::ATOMIC_LOAD_XOR_I8,
    430                         AArch64::ATOMIC_LOAD_XOR_I16,
    431                         AArch64::ATOMIC_LOAD_XOR_I32,
    432                         AArch64::ATOMIC_LOAD_XOR_I64);
    433   case ISD::ATOMIC_LOAD_NAND:
    434     return SelectAtomic(Node,
    435                         AArch64::ATOMIC_LOAD_NAND_I8,
    436                         AArch64::ATOMIC_LOAD_NAND_I16,
    437                         AArch64::ATOMIC_LOAD_NAND_I32,
    438                         AArch64::ATOMIC_LOAD_NAND_I64);
    439   case ISD::ATOMIC_LOAD_MIN:
    440     return SelectAtomic(Node,
    441                         AArch64::ATOMIC_LOAD_MIN_I8,
    442                         AArch64::ATOMIC_LOAD_MIN_I16,
    443                         AArch64::ATOMIC_LOAD_MIN_I32,
    444                         AArch64::ATOMIC_LOAD_MIN_I64);
    445   case ISD::ATOMIC_LOAD_MAX:
    446     return SelectAtomic(Node,
    447                         AArch64::ATOMIC_LOAD_MAX_I8,
    448                         AArch64::ATOMIC_LOAD_MAX_I16,
    449                         AArch64::ATOMIC_LOAD_MAX_I32,
    450                         AArch64::ATOMIC_LOAD_MAX_I64);
    451   case ISD::ATOMIC_LOAD_UMIN:
    452     return SelectAtomic(Node,
    453                         AArch64::ATOMIC_LOAD_UMIN_I8,
    454                         AArch64::ATOMIC_LOAD_UMIN_I16,
    455                         AArch64::ATOMIC_LOAD_UMIN_I32,
    456                         AArch64::ATOMIC_LOAD_UMIN_I64);
    457   case ISD::ATOMIC_LOAD_UMAX:
    458     return SelectAtomic(Node,
    459                         AArch64::ATOMIC_LOAD_UMAX_I8,
    460                         AArch64::ATOMIC_LOAD_UMAX_I16,
    461                         AArch64::ATOMIC_LOAD_UMAX_I32,
    462                         AArch64::ATOMIC_LOAD_UMAX_I64);
    463   case ISD::ATOMIC_SWAP:
    464     return SelectAtomic(Node,
    465                         AArch64::ATOMIC_SWAP_I8,
    466                         AArch64::ATOMIC_SWAP_I16,
    467                         AArch64::ATOMIC_SWAP_I32,
    468                         AArch64::ATOMIC_SWAP_I64);
    469   case ISD::ATOMIC_CMP_SWAP:
    470     return SelectAtomic(Node,
    471                         AArch64::ATOMIC_CMP_SWAP_I8,
    472                         AArch64::ATOMIC_CMP_SWAP_I16,
    473                         AArch64::ATOMIC_CMP_SWAP_I32,
    474                         AArch64::ATOMIC_CMP_SWAP_I64);
    475   case ISD::FrameIndex: {
    476     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
    477     EVT PtrTy = getTargetLowering()->getPointerTy();
    478     SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
    479     return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
    480                                 TFI, CurDAG->getTargetConstant(0, PtrTy));
    481   }
    482   case ISD::ConstantPool: {
    483     // Constant pools are fine, just create a Target entry.
    484     ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
    485     const Constant *C = CN->getConstVal();
    486     SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
    487 
    488     ReplaceUses(SDValue(Node, 0), CP);
    489     return NULL;
    490   }
    491   case ISD::Constant: {
    492     SDNode *ResNode = 0;
    493     if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
    494       // XZR and WZR are probably even better than an actual move: most of the
    495       // time they can be folded into another instruction with *no* cost.
    496 
    497       EVT Ty = Node->getValueType(0);
    498       assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
    499       uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
    500       ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
    501                                        SDLoc(Node),
    502                                        Register, Ty).getNode();
    503     }
    504 
    505     // Next best option is a move-immediate, see if we can do that.
    506     if (!ResNode) {
    507       ResNode = TrySelectToMoveImm(Node);
    508     }
    509 
    510     if (ResNode)
    511       return ResNode;
    512 
    513     // If even that fails we fall back to a lit-pool entry at the moment. Future
    514     // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
    515     ResNode = SelectToLitPool(Node);
    516     assert(ResNode && "We need *some* way to materialise a constant");
    517 
    518     // We want to continue selection at this point since the litpool access
    519     // generated used generic nodes for simplicity.
    520     ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
    521     Node = ResNode;
    522     break;
    523   }
    524   case ISD::ConstantFP: {
    525     if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
    526       // FMOV will take care of it from TableGen
    527       break;
    528     }
    529 
    530     SDNode *ResNode = LowerToFPLitPool(Node);
    531     ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
    532 
    533     // We want to continue selection at this point since the litpool access
    534     // generated used generic nodes for simplicity.
    535     Node = ResNode;
    536     break;
    537   }
    538   default:
    539     break; // Let generic code handle it
    540   }
    541 
    542   SDNode *ResNode = SelectCode(Node);
    543 
    544   DEBUG(dbgs() << "=> ";
    545         if (ResNode == NULL || ResNode == Node)
    546           Node->dump(CurDAG);
    547         else
    548           ResNode->dump(CurDAG);
    549         dbgs() << "\n");
    550 
    551   return ResNode;
    552 }
    553 
    554 /// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
    555 /// instruction scheduling.
    556 FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
    557                                          CodeGenOpt::Level OptLevel) {
    558   return new AArch64DAGToDAGISel(TM, OptLevel);
    559 }
    560