Home | History | Annotate | Download | only in rdma
      1 /****************************************************************************
      2  ****************************************************************************
      3  ***
      4  ***   This header was automatically generated from a Linux kernel header
      5  ***   of the same name, to make information necessary for userspace to
      6  ***   call into the kernel available to libc.  It contains only constants,
      7  ***   structures, and macros generated from the original header, and thus,
      8  ***   contains no copyrightable information.
      9  ***
     10  ***   To edit the content of this header, modify the corresponding
     11  ***   source file (e.g. under external/kernel-headers/original/) then
     12  ***   run bionic/libc/kernel/tools/update_all.py
     13  ***
     14  ***   Any manual change here will be lost the next time this script will
     15  ***   be run. You've been warned!
     16  ***
     17  ****************************************************************************
     18  ****************************************************************************/
     19 #ifndef MLX5_ABI_USER_H
     20 #define MLX5_ABI_USER_H
     21 #include <linux/types.h>
     22 #include <linux/if_ether.h>
     23 #include <rdma/ib_user_ioctl_verbs.h>
     24 enum {
     25   MLX5_QP_FLAG_SIGNATURE = 1 << 0,
     26   MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
     27   MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
     28   MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
     29   MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
     30   MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
     31   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
     32   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
     33   MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
     34   MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
     35 };
     36 enum {
     37   MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
     38 };
     39 enum {
     40   MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
     41 };
     42 #define MLX5_IB_UVERBS_ABI_VERSION 1
     43 struct mlx5_ib_alloc_ucontext_req {
     44   __u32 total_num_bfregs;
     45   __u32 num_low_latency_bfregs;
     46 };
     47 enum mlx5_lib_caps {
     48   MLX5_LIB_CAP_4K_UAR = (__u64) 1 << 0,
     49 };
     50 enum mlx5_ib_alloc_uctx_v2_flags {
     51   MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
     52 };
     53 struct mlx5_ib_alloc_ucontext_req_v2 {
     54   __u32 total_num_bfregs;
     55   __u32 num_low_latency_bfregs;
     56   __u32 flags;
     57   __u32 comp_mask;
     58   __u8 max_cqe_version;
     59   __u8 reserved0;
     60   __u16 reserved1;
     61   __u32 reserved2;
     62   __aligned_u64 lib_caps;
     63 };
     64 enum mlx5_ib_alloc_ucontext_resp_mask {
     65   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
     66   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
     67 };
     68 enum mlx5_user_cmds_supp_uhw {
     69   MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
     70   MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
     71 };
     72 enum mlx5_user_inline_mode {
     73   MLX5_USER_INLINE_MODE_NA,
     74   MLX5_USER_INLINE_MODE_NONE,
     75   MLX5_USER_INLINE_MODE_L2,
     76   MLX5_USER_INLINE_MODE_IP,
     77   MLX5_USER_INLINE_MODE_TCP_UDP,
     78 };
     79 enum {
     80   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
     81   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
     82   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
     83   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
     84   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
     85 };
     86 struct mlx5_ib_alloc_ucontext_resp {
     87   __u32 qp_tab_size;
     88   __u32 bf_reg_size;
     89   __u32 tot_bfregs;
     90   __u32 cache_line_size;
     91   __u16 max_sq_desc_sz;
     92   __u16 max_rq_desc_sz;
     93   __u32 max_send_wqebb;
     94   __u32 max_recv_wr;
     95   __u32 max_srq_recv_wr;
     96   __u16 num_ports;
     97   __u16 flow_action_flags;
     98   __u32 comp_mask;
     99   __u32 response_length;
    100   __u8 cqe_version;
    101   __u8 cmds_supp_uhw;
    102   __u8 eth_min_inline;
    103   __u8 clock_info_versions;
    104   __aligned_u64 hca_core_clock_offset;
    105   __u32 log_uar_size;
    106   __u32 num_uars_per_page;
    107   __u32 num_dyn_bfregs;
    108   __u32 dump_fill_mkey;
    109 };
    110 struct mlx5_ib_alloc_pd_resp {
    111   __u32 pdn;
    112 };
    113 struct mlx5_ib_tso_caps {
    114   __u32 max_tso;
    115   __u32 supported_qpts;
    116 };
    117 struct mlx5_ib_rss_caps {
    118   __aligned_u64 rx_hash_fields_mask;
    119   __u8 rx_hash_function;
    120   __u8 reserved[7];
    121 };
    122 enum mlx5_ib_cqe_comp_res_format {
    123   MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
    124   MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
    125   MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
    126 };
    127 struct mlx5_ib_cqe_comp_caps {
    128   __u32 max_num;
    129   __u32 supported_format;
    130 };
    131 enum mlx5_ib_packet_pacing_cap_flags {
    132   MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
    133 };
    134 struct mlx5_packet_pacing_caps {
    135   __u32 qp_rate_limit_min;
    136   __u32 qp_rate_limit_max;
    137   __u32 supported_qpts;
    138   __u8 cap_flags;
    139   __u8 reserved[3];
    140 };
    141 enum mlx5_ib_mpw_caps {
    142   MPW_RESERVED = 1 << 0,
    143   MLX5_IB_ALLOW_MPW = 1 << 1,
    144   MLX5_IB_SUPPORT_EMPW = 1 << 2,
    145 };
    146 enum mlx5_ib_sw_parsing_offloads {
    147   MLX5_IB_SW_PARSING = 1 << 0,
    148   MLX5_IB_SW_PARSING_CSUM = 1 << 1,
    149   MLX5_IB_SW_PARSING_LSO = 1 << 2,
    150 };
    151 struct mlx5_ib_sw_parsing_caps {
    152   __u32 sw_parsing_offloads;
    153   __u32 supported_qpts;
    154 };
    155 struct mlx5_ib_striding_rq_caps {
    156   __u32 min_single_stride_log_num_of_bytes;
    157   __u32 max_single_stride_log_num_of_bytes;
    158   __u32 min_single_wqe_log_num_of_strides;
    159   __u32 max_single_wqe_log_num_of_strides;
    160   __u32 supported_qpts;
    161   __u32 reserved;
    162 };
    163 enum mlx5_ib_query_dev_resp_flags {
    164   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
    165   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
    166   MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
    167 };
    168 enum mlx5_ib_tunnel_offloads {
    169   MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
    170   MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
    171   MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
    172   MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
    173   MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
    174 };
    175 struct mlx5_ib_query_device_resp {
    176   __u32 comp_mask;
    177   __u32 response_length;
    178   struct mlx5_ib_tso_caps tso_caps;
    179   struct mlx5_ib_rss_caps rss_caps;
    180   struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
    181   struct mlx5_packet_pacing_caps packet_pacing_caps;
    182   __u32 mlx5_ib_support_multi_pkt_send_wqes;
    183   __u32 flags;
    184   struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
    185   struct mlx5_ib_striding_rq_caps striding_rq_caps;
    186   __u32 tunnel_offloads_caps;
    187   __u32 reserved;
    188 };
    189 enum mlx5_ib_create_cq_flags {
    190   MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
    191 };
    192 struct mlx5_ib_create_cq {
    193   __aligned_u64 buf_addr;
    194   __aligned_u64 db_addr;
    195   __u32 cqe_size;
    196   __u8 cqe_comp_en;
    197   __u8 cqe_comp_res_format;
    198   __u16 flags;
    199 };
    200 struct mlx5_ib_create_cq_resp {
    201   __u32 cqn;
    202   __u32 reserved;
    203 };
    204 struct mlx5_ib_resize_cq {
    205   __aligned_u64 buf_addr;
    206   __u16 cqe_size;
    207   __u16 reserved0;
    208   __u32 reserved1;
    209 };
    210 struct mlx5_ib_create_srq {
    211   __aligned_u64 buf_addr;
    212   __aligned_u64 db_addr;
    213   __u32 flags;
    214   __u32 reserved0;
    215   __u32 uidx;
    216   __u32 reserved1;
    217 };
    218 struct mlx5_ib_create_srq_resp {
    219   __u32 srqn;
    220   __u32 reserved;
    221 };
    222 struct mlx5_ib_create_qp {
    223   __aligned_u64 buf_addr;
    224   __aligned_u64 db_addr;
    225   __u32 sq_wqe_count;
    226   __u32 rq_wqe_count;
    227   __u32 rq_wqe_shift;
    228   __u32 flags;
    229   __u32 uidx;
    230   __u32 bfreg_index;
    231   union {
    232     __aligned_u64 sq_buf_addr;
    233     __aligned_u64 access_key;
    234   };
    235 };
    236 enum mlx5_rx_hash_function_flags {
    237   MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
    238 };
    239 enum mlx5_rx_hash_fields {
    240   MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
    241   MLX5_RX_HASH_DST_IPV4 = 1 << 1,
    242   MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
    243   MLX5_RX_HASH_DST_IPV6 = 1 << 3,
    244   MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
    245   MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
    246   MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
    247   MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
    248   MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
    249   MLX5_RX_HASH_INNER = (1UL << 31),
    250 };
    251 struct mlx5_ib_create_qp_rss {
    252   __aligned_u64 rx_hash_fields_mask;
    253   __u8 rx_hash_function;
    254   __u8 rx_key_len;
    255   __u8 reserved[6];
    256   __u8 rx_hash_key[128];
    257   __u32 comp_mask;
    258   __u32 flags;
    259 };
    260 enum mlx5_ib_create_qp_resp_mask {
    261   MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
    262   MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
    263   MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
    264   MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
    265 };
    266 struct mlx5_ib_create_qp_resp {
    267   __u32 bfreg_index;
    268   __u32 reserved;
    269   __u32 comp_mask;
    270   __u32 tirn;
    271   __u32 tisn;
    272   __u32 rqn;
    273   __u32 sqn;
    274   __u32 reserved1;
    275 };
    276 struct mlx5_ib_alloc_mw {
    277   __u32 comp_mask;
    278   __u8 num_klms;
    279   __u8 reserved1;
    280   __u16 reserved2;
    281 };
    282 enum mlx5_ib_create_wq_mask {
    283   MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
    284 };
    285 struct mlx5_ib_create_wq {
    286   __aligned_u64 buf_addr;
    287   __aligned_u64 db_addr;
    288   __u32 rq_wqe_count;
    289   __u32 rq_wqe_shift;
    290   __u32 user_index;
    291   __u32 flags;
    292   __u32 comp_mask;
    293   __u32 single_stride_log_num_of_bytes;
    294   __u32 single_wqe_log_num_of_strides;
    295   __u32 two_byte_shift_en;
    296 };
    297 struct mlx5_ib_create_ah_resp {
    298   __u32 response_length;
    299   __u8 dmac[ETH_ALEN];
    300   __u8 reserved[6];
    301 };
    302 struct mlx5_ib_burst_info {
    303   __u32 max_burst_sz;
    304   __u16 typical_pkt_sz;
    305   __u16 reserved;
    306 };
    307 struct mlx5_ib_modify_qp {
    308   __u32 comp_mask;
    309   struct mlx5_ib_burst_info burst_info;
    310   __u32 reserved;
    311 };
    312 struct mlx5_ib_modify_qp_resp {
    313   __u32 response_length;
    314   __u32 dctn;
    315 };
    316 struct mlx5_ib_create_wq_resp {
    317   __u32 response_length;
    318   __u32 reserved;
    319 };
    320 struct mlx5_ib_create_rwq_ind_tbl_resp {
    321   __u32 response_length;
    322   __u32 reserved;
    323 };
    324 struct mlx5_ib_modify_wq {
    325   __u32 comp_mask;
    326   __u32 reserved;
    327 };
    328 struct mlx5_ib_clock_info {
    329   __u32 sign;
    330   __u32 resv;
    331   __aligned_u64 nsec;
    332   __aligned_u64 cycles;
    333   __aligned_u64 frac;
    334   __u32 mult;
    335   __u32 shift;
    336   __aligned_u64 mask;
    337   __aligned_u64 overflow_period;
    338 };
    339 enum mlx5_ib_mmap_cmd {
    340   MLX5_IB_MMAP_REGULAR_PAGE = 0,
    341   MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
    342   MLX5_IB_MMAP_WC_PAGE = 2,
    343   MLX5_IB_MMAP_NC_PAGE = 3,
    344   MLX5_IB_MMAP_CORE_CLOCK = 5,
    345   MLX5_IB_MMAP_ALLOC_WC = 6,
    346   MLX5_IB_MMAP_CLOCK_INFO = 7,
    347   MLX5_IB_MMAP_DEVICE_MEM = 8,
    348 };
    349 enum {
    350   MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
    351 };
    352 enum {
    353   MLX5_IB_CLOCK_INFO_V1 = 0,
    354 };
    355 struct mlx5_ib_flow_counters_desc {
    356   __u32 description;
    357   __u32 index;
    358 };
    359 struct mlx5_ib_flow_counters_data {
    360   RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
    361   __u32 ncounters;
    362   __u32 reserved;
    363 };
    364 struct mlx5_ib_create_flow {
    365   __u32 ncounters_data;
    366   __u32 reserved;
    367   struct mlx5_ib_flow_counters_data data[];
    368 };
    369 #endif
    370