Home | History | Annotate | Download | only in R600
      1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 /// \file
     11 /// \brief Interface definition of the TargetLowering class that is common
     12 /// to all AMD GPUs.
     13 //
     14 //===----------------------------------------------------------------------===//
     15 
     16 #ifndef AMDGPUISELLOWERING_H
     17 #define AMDGPUISELLOWERING_H
     18 
     19 #include "llvm/Target/TargetLowering.h"
     20 
     21 namespace llvm {
     22 
     23 class AMDGPUMachineFunction;
     24 class AMDGPUSubtarget;
     25 class MachineRegisterInfo;
     26 
     27 class AMDGPUTargetLowering : public TargetLowering {
     28 protected:
     29   const AMDGPUSubtarget *Subtarget;
     30 
     31 private:
     32   SDValue LowerConstantInitializer(const Constant* Init, const GlobalValue *GV,
     33                                    const SDValue &InitPtr,
     34                                    SDValue Chain,
     35                                    SelectionDAG &DAG) const;
     36   SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
     37   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
     38   SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
     39   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
     40   /// \brief Lower vector stores by merging the vector elements into an integer
     41   /// of the same bitwidth.
     42   SDValue MergeVectorStore(const SDValue &Op, SelectionDAG &DAG) const;
     43   /// \brief Split a vector store into multiple scalar stores.
     44   /// \returns The resulting chain.
     45 
     46   SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const;
     47   SDValue LowerSDIV24(SDValue Op, SelectionDAG &DAG) const;
     48   SDValue LowerSDIV32(SDValue Op, SelectionDAG &DAG) const;
     49   SDValue LowerSDIV64(SDValue Op, SelectionDAG &DAG) const;
     50   SDValue LowerSREM(SDValue Op, SelectionDAG &DAG) const;
     51   SDValue LowerSREM32(SDValue Op, SelectionDAG &DAG) const;
     52   SDValue LowerSREM64(SDValue Op, SelectionDAG &DAG) const;
     53   SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
     54   SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
     55   SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
     56   SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
     57   SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
     58   SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
     59 
     60   SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
     61 
     62   SDValue ExpandSIGN_EXTEND_INREG(SDValue Op,
     63                                   unsigned BitsDiff,
     64                                   SelectionDAG &DAG) const;
     65   SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
     66 
     67   SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
     68 
     69 protected:
     70   static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
     71   static EVT getEquivalentLoadRegType(LLVMContext &Context, EVT VT);
     72 
     73   /// \brief Helper function that adds Reg to the LiveIn list of the DAG's
     74   /// MachineFunction.
     75   ///
     76   /// \returns a RegisterSDNode representing Reg.
     77   virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
     78                                        const TargetRegisterClass *RC,
     79                                        unsigned Reg, EVT VT) const;
     80   SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
     81                              SelectionDAG &DAG) const;
     82   /// \brief Split a vector load into multiple scalar loads.
     83   SDValue SplitVectorLoad(const SDValue &Op, SelectionDAG &DAG) const;
     84   SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
     85   SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
     86   SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
     87   SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
     88   bool isHWTrueValue(SDValue Op) const;
     89   bool isHWFalseValue(SDValue Op) const;
     90 
     91   /// The SelectionDAGBuilder will automatically promote function arguments
     92   /// with illegal types.  However, this does not work for the AMDGPU targets
     93   /// since the function arguments are stored in memory as these illegal types.
     94   /// In order to handle this properly we need to get the origianl types sizes
     95   /// from the LLVM IR Function and fixup the ISD:InputArg values before
     96   /// passing them to AnalyzeFormalArguments()
     97   void getOriginalFunctionArgs(SelectionDAG &DAG,
     98                                const Function *F,
     99                                const SmallVectorImpl<ISD::InputArg> &Ins,
    100                                SmallVectorImpl<ISD::InputArg> &OrigIns) const;
    101   void AnalyzeFormalArguments(CCState &State,
    102                               const SmallVectorImpl<ISD::InputArg> &Ins) const;
    103 
    104 public:
    105   AMDGPUTargetLowering(TargetMachine &TM);
    106 
    107   bool isFAbsFree(EVT VT) const override;
    108   bool isFNegFree(EVT VT) const override;
    109   bool isTruncateFree(EVT Src, EVT Dest) const override;
    110   bool isTruncateFree(Type *Src, Type *Dest) const override;
    111 
    112   bool isZExtFree(Type *Src, Type *Dest) const override;
    113   bool isZExtFree(EVT Src, EVT Dest) const override;
    114   bool isZExtFree(SDValue Val, EVT VT2) const override;
    115 
    116   bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
    117 
    118   MVT getVectorIdxTy() const override;
    119   bool isSelectSupported(SelectSupportKind) const override;
    120 
    121   bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
    122   bool ShouldShrinkFPConstant(EVT VT) const override;
    123 
    124   bool isLoadBitCastBeneficial(EVT, EVT) const override;
    125   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
    126                       bool isVarArg,
    127                       const SmallVectorImpl<ISD::OutputArg> &Outs,
    128                       const SmallVectorImpl<SDValue> &OutVals,
    129                       SDLoc DL, SelectionDAG &DAG) const override;
    130   SDValue LowerCall(CallLoweringInfo &CLI,
    131                     SmallVectorImpl<SDValue> &InVals) const override;
    132 
    133   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
    134   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
    135   void ReplaceNodeResults(SDNode * N,
    136                           SmallVectorImpl<SDValue> &Results,
    137                           SelectionDAG &DAG) const override;
    138 
    139   SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
    140   SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;
    141   SDValue CombineMinMax(SDNode *N, SelectionDAG &DAG) const;
    142   const char* getTargetNodeName(unsigned Opcode) const override;
    143 
    144   virtual SDNode *PostISelFolding(MachineSDNode *N,
    145                                   SelectionDAG &DAG) const {
    146     return N;
    147   }
    148 
    149   /// \brief Determine which of the bits specified in \p Mask are known to be
    150   /// either zero or one and return them in the \p KnownZero and \p KnownOne
    151   /// bitsets.
    152   void computeKnownBitsForTargetNode(const SDValue Op,
    153                                      APInt &KnownZero,
    154                                      APInt &KnownOne,
    155                                      const SelectionDAG &DAG,
    156                                      unsigned Depth = 0) const override;
    157 
    158   virtual unsigned ComputeNumSignBitsForTargetNode(
    159     SDValue Op,
    160     const SelectionDAG &DAG,
    161     unsigned Depth = 0) const override;
    162 };
    163 
    164 namespace AMDGPUISD {
    165 
    166 enum {
    167   // AMDIL ISD Opcodes
    168   FIRST_NUMBER = ISD::BUILTIN_OP_END,
    169   CALL,        // Function call based on a single integer
    170   UMUL,        // 32bit unsigned multiplication
    171   RET_FLAG,
    172   BRANCH_COND,
    173   // End AMDIL ISD Opcodes
    174   DWORDADDR,
    175   FRACT,
    176   CLAMP,
    177 
    178   // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
    179   // Denormals handled on some parts.
    180   COS_HW,
    181   SIN_HW,
    182   FMAX,
    183   SMAX,
    184   UMAX,
    185   FMIN,
    186   SMIN,
    187   UMIN,
    188   URECIP,
    189   DIV_SCALE,
    190   DIV_FMAS,
    191   DIV_FIXUP,
    192   TRIG_PREOP, // 1 ULP max error for f64
    193 
    194   // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
    195   //            For f64, max error 2^29 ULP, handles denormals.
    196   RCP,
    197   RSQ,
    198   RSQ_LEGACY,
    199   RSQ_CLAMPED,
    200   DOT4,
    201   BFE_U32, // Extract range of bits with zero extension to 32-bits.
    202   BFE_I32, // Extract range of bits with sign extension to 32-bits.
    203   BFI, // (src0 & src1) | (~src0 & src2)
    204   BFM, // Insert a range of bits into a 32-bit word.
    205   BREV, // Reverse bits.
    206   MUL_U24,
    207   MUL_I24,
    208   MAD_U24,
    209   MAD_I24,
    210   TEXTURE_FETCH,
    211   EXPORT,
    212   CONST_ADDRESS,
    213   REGISTER_LOAD,
    214   REGISTER_STORE,
    215   LOAD_INPUT,
    216   SAMPLE,
    217   SAMPLEB,
    218   SAMPLED,
    219   SAMPLEL,
    220 
    221   // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
    222   CVT_F32_UBYTE0,
    223   CVT_F32_UBYTE1,
    224   CVT_F32_UBYTE2,
    225   CVT_F32_UBYTE3,
    226   /// This node is for VLIW targets and it is used to represent a vector
    227   /// that is stored in consecutive registers with the same channel.
    228   /// For example:
    229   ///   |X  |Y|Z|W|
    230   /// T0|v.x| | | |
    231   /// T1|v.y| | | |
    232   /// T2|v.z| | | |
    233   /// T3|v.w| | | |
    234   BUILD_VERTICAL_VECTOR,
    235   FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
    236   STORE_MSKOR,
    237   LOAD_CONSTANT,
    238   TBUFFER_STORE_FORMAT,
    239   LAST_AMDGPU_ISD_NUMBER
    240 };
    241 
    242 
    243 } // End namespace AMDGPUISD
    244 
    245 } // End namespace llvm
    246 
    247 #endif // AMDGPUISELLOWERING_H
    248