1 /****************************************************************************
2  ****************************************************************************
3  ***
4  ***   This header was automatically generated from a Linux kernel header
5  ***   of the same name, to make information necessary for userspace to
6  ***   call into the kernel available to libc.  It contains only constants,
7  ***   structures, and macros generated from the original header, and thus,
8  ***   contains no copyrightable information.
9  ***
10  ***   To edit the content of this header, modify the corresponding
11  ***   source file (e.g. under external/kernel-headers/original/) then
12  ***   run bionic/libc/kernel/tools/update_all.py
13  ***
14  ***   Any manual change here will be lost the next time this script will
15  ***   be run. You've been warned!
16  ***
17  ****************************************************************************
18  ****************************************************************************/
19 #ifndef MLX5_ABI_USER_H
20 #define MLX5_ABI_USER_H
21 #include <linux/types.h>
22 #include <linux/if_ether.h>
23 #include <rdma/ib_user_ioctl_verbs.h>
24 enum {
25   MLX5_QP_FLAG_SIGNATURE = 1 << 0,
26   MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
27   MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
28   MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
29   MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
30   MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
31   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
32   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
33   MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
34   MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
35   MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
36 };
37 enum {
38   MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
39 };
40 enum {
41   MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
42 };
43 #define MLX5_IB_UVERBS_ABI_VERSION 1
44 struct mlx5_ib_alloc_ucontext_req {
45   __u32 total_num_bfregs;
46   __u32 num_low_latency_bfregs;
47 };
48 enum mlx5_lib_caps {
49   MLX5_LIB_CAP_4K_UAR = (__u64) 1 << 0,
50   MLX5_LIB_CAP_DYN_UAR = (__u64) 1 << 1,
51 };
52 enum mlx5_ib_alloc_uctx_v2_flags {
53   MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
54 };
55 struct mlx5_ib_alloc_ucontext_req_v2 {
56   __u32 total_num_bfregs;
57   __u32 num_low_latency_bfregs;
58   __u32 flags;
59   __u32 comp_mask;
60   __u8 max_cqe_version;
61   __u8 reserved0;
62   __u16 reserved1;
63   __u32 reserved2;
64   __aligned_u64 lib_caps;
65 };
66 enum mlx5_ib_alloc_ucontext_resp_mask {
67   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
68   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
69   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
70 };
71 enum mlx5_user_cmds_supp_uhw {
72   MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
73   MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
74 };
75 enum mlx5_user_inline_mode {
76   MLX5_USER_INLINE_MODE_NA,
77   MLX5_USER_INLINE_MODE_NONE,
78   MLX5_USER_INLINE_MODE_L2,
79   MLX5_USER_INLINE_MODE_IP,
80   MLX5_USER_INLINE_MODE_TCP_UDP,
81 };
82 enum {
83   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
84   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
85   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
86   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
87   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
88 };
89 struct mlx5_ib_alloc_ucontext_resp {
90   __u32 qp_tab_size;
91   __u32 bf_reg_size;
92   __u32 tot_bfregs;
93   __u32 cache_line_size;
94   __u16 max_sq_desc_sz;
95   __u16 max_rq_desc_sz;
96   __u32 max_send_wqebb;
97   __u32 max_recv_wr;
98   __u32 max_srq_recv_wr;
99   __u16 num_ports;
100   __u16 flow_action_flags;
101   __u32 comp_mask;
102   __u32 response_length;
103   __u8 cqe_version;
104   __u8 cmds_supp_uhw;
105   __u8 eth_min_inline;
106   __u8 clock_info_versions;
107   __aligned_u64 hca_core_clock_offset;
108   __u32 log_uar_size;
109   __u32 num_uars_per_page;
110   __u32 num_dyn_bfregs;
111   __u32 dump_fill_mkey;
112 };
113 struct mlx5_ib_alloc_pd_resp {
114   __u32 pdn;
115 };
116 struct mlx5_ib_tso_caps {
117   __u32 max_tso;
118   __u32 supported_qpts;
119 };
120 struct mlx5_ib_rss_caps {
121   __aligned_u64 rx_hash_fields_mask;
122   __u8 rx_hash_function;
123   __u8 reserved[7];
124 };
125 enum mlx5_ib_cqe_comp_res_format {
126   MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
127   MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
128   MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
129 };
130 struct mlx5_ib_cqe_comp_caps {
131   __u32 max_num;
132   __u32 supported_format;
133 };
134 enum mlx5_ib_packet_pacing_cap_flags {
135   MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
136 };
137 struct mlx5_packet_pacing_caps {
138   __u32 qp_rate_limit_min;
139   __u32 qp_rate_limit_max;
140   __u32 supported_qpts;
141   __u8 cap_flags;
142   __u8 reserved[3];
143 };
144 enum mlx5_ib_mpw_caps {
145   MPW_RESERVED = 1 << 0,
146   MLX5_IB_ALLOW_MPW = 1 << 1,
147   MLX5_IB_SUPPORT_EMPW = 1 << 2,
148 };
149 enum mlx5_ib_sw_parsing_offloads {
150   MLX5_IB_SW_PARSING = 1 << 0,
151   MLX5_IB_SW_PARSING_CSUM = 1 << 1,
152   MLX5_IB_SW_PARSING_LSO = 1 << 2,
153 };
154 struct mlx5_ib_sw_parsing_caps {
155   __u32 sw_parsing_offloads;
156   __u32 supported_qpts;
157 };
158 struct mlx5_ib_striding_rq_caps {
159   __u32 min_single_stride_log_num_of_bytes;
160   __u32 max_single_stride_log_num_of_bytes;
161   __u32 min_single_wqe_log_num_of_strides;
162   __u32 max_single_wqe_log_num_of_strides;
163   __u32 supported_qpts;
164   __u32 reserved;
165 };
166 enum mlx5_ib_query_dev_resp_flags {
167   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
168   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
169   MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
170   MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
171 };
172 enum mlx5_ib_tunnel_offloads {
173   MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
174   MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
175   MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
176   MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
177   MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
178 };
179 struct mlx5_ib_query_device_resp {
180   __u32 comp_mask;
181   __u32 response_length;
182   struct mlx5_ib_tso_caps tso_caps;
183   struct mlx5_ib_rss_caps rss_caps;
184   struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
185   struct mlx5_packet_pacing_caps packet_pacing_caps;
186   __u32 mlx5_ib_support_multi_pkt_send_wqes;
187   __u32 flags;
188   struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
189   struct mlx5_ib_striding_rq_caps striding_rq_caps;
190   __u32 tunnel_offloads_caps;
191   __u32 reserved;
192 };
193 enum mlx5_ib_create_cq_flags {
194   MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
195   MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
196 };
197 struct mlx5_ib_create_cq {
198   __aligned_u64 buf_addr;
199   __aligned_u64 db_addr;
200   __u32 cqe_size;
201   __u8 cqe_comp_en;
202   __u8 cqe_comp_res_format;
203   __u16 flags;
204   __u16 uar_page_index;
205   __u16 reserved0;
206   __u32 reserved1;
207 };
208 struct mlx5_ib_create_cq_resp {
209   __u32 cqn;
210   __u32 reserved;
211 };
212 struct mlx5_ib_resize_cq {
213   __aligned_u64 buf_addr;
214   __u16 cqe_size;
215   __u16 reserved0;
216   __u32 reserved1;
217 };
218 struct mlx5_ib_create_srq {
219   __aligned_u64 buf_addr;
220   __aligned_u64 db_addr;
221   __u32 flags;
222   __u32 reserved0;
223   __u32 uidx;
224   __u32 reserved1;
225 };
226 struct mlx5_ib_create_srq_resp {
227   __u32 srqn;
228   __u32 reserved;
229 };
230 struct mlx5_ib_create_qp {
231   __aligned_u64 buf_addr;
232   __aligned_u64 db_addr;
233   __u32 sq_wqe_count;
234   __u32 rq_wqe_count;
235   __u32 rq_wqe_shift;
236   __u32 flags;
237   __u32 uidx;
238   __u32 bfreg_index;
239   union {
240     __aligned_u64 sq_buf_addr;
241     __aligned_u64 access_key;
242   };
243   __u32 ece_options;
244   __u32 reserved;
245 };
246 enum mlx5_rx_hash_function_flags {
247   MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
248 };
249 enum mlx5_rx_hash_fields {
250   MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
251   MLX5_RX_HASH_DST_IPV4 = 1 << 1,
252   MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
253   MLX5_RX_HASH_DST_IPV6 = 1 << 3,
254   MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
255   MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
256   MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
257   MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
258   MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
259   MLX5_RX_HASH_INNER = (1UL << 31),
260 };
261 struct mlx5_ib_create_qp_rss {
262   __aligned_u64 rx_hash_fields_mask;
263   __u8 rx_hash_function;
264   __u8 rx_key_len;
265   __u8 reserved[6];
266   __u8 rx_hash_key[128];
267   __u32 comp_mask;
268   __u32 flags;
269 };
270 enum mlx5_ib_create_qp_resp_mask {
271   MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
272   MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
273   MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
274   MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
275   MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4,
276 };
277 struct mlx5_ib_create_qp_resp {
278   __u32 bfreg_index;
279   __u32 ece_options;
280   __u32 comp_mask;
281   __u32 tirn;
282   __u32 tisn;
283   __u32 rqn;
284   __u32 sqn;
285   __u32 reserved1;
286   __u64 tir_icm_addr;
287 };
288 struct mlx5_ib_alloc_mw {
289   __u32 comp_mask;
290   __u8 num_klms;
291   __u8 reserved1;
292   __u16 reserved2;
293 };
294 enum mlx5_ib_create_wq_mask {
295   MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
296 };
297 struct mlx5_ib_create_wq {
298   __aligned_u64 buf_addr;
299   __aligned_u64 db_addr;
300   __u32 rq_wqe_count;
301   __u32 rq_wqe_shift;
302   __u32 user_index;
303   __u32 flags;
304   __u32 comp_mask;
305   __u32 single_stride_log_num_of_bytes;
306   __u32 single_wqe_log_num_of_strides;
307   __u32 two_byte_shift_en;
308 };
309 struct mlx5_ib_create_ah_resp {
310   __u32 response_length;
311   __u8 dmac[ETH_ALEN];
312   __u8 reserved[6];
313 };
314 struct mlx5_ib_burst_info {
315   __u32 max_burst_sz;
316   __u16 typical_pkt_sz;
317   __u16 reserved;
318 };
319 struct mlx5_ib_modify_qp {
320   __u32 comp_mask;
321   struct mlx5_ib_burst_info burst_info;
322   __u32 ece_options;
323 };
324 struct mlx5_ib_modify_qp_resp {
325   __u32 response_length;
326   __u32 dctn;
327   __u32 ece_options;
328   __u32 reserved;
329 };
330 struct mlx5_ib_create_wq_resp {
331   __u32 response_length;
332   __u32 reserved;
333 };
334 struct mlx5_ib_create_rwq_ind_tbl_resp {
335   __u32 response_length;
336   __u32 reserved;
337 };
338 struct mlx5_ib_modify_wq {
339   __u32 comp_mask;
340   __u32 reserved;
341 };
342 struct mlx5_ib_clock_info {
343   __u32 sign;
344   __u32 resv;
345   __aligned_u64 nsec;
346   __aligned_u64 cycles;
347   __aligned_u64 frac;
348   __u32 mult;
349   __u32 shift;
350   __aligned_u64 mask;
351   __aligned_u64 overflow_period;
352 };
353 enum mlx5_ib_mmap_cmd {
354   MLX5_IB_MMAP_REGULAR_PAGE = 0,
355   MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
356   MLX5_IB_MMAP_WC_PAGE = 2,
357   MLX5_IB_MMAP_NC_PAGE = 3,
358   MLX5_IB_MMAP_CORE_CLOCK = 5,
359   MLX5_IB_MMAP_ALLOC_WC = 6,
360   MLX5_IB_MMAP_CLOCK_INFO = 7,
361   MLX5_IB_MMAP_DEVICE_MEM = 8,
362 };
363 enum {
364   MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
365 };
366 enum {
367   MLX5_IB_CLOCK_INFO_V1 = 0,
368 };
369 struct mlx5_ib_flow_counters_desc {
370   __u32 description;
371   __u32 index;
372 };
373 struct mlx5_ib_flow_counters_data {
374   RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
375   __u32 ncounters;
376   __u32 reserved;
377 };
378 struct mlx5_ib_create_flow {
379   __u32 ncounters_data;
380   __u32 reserved;
381   struct mlx5_ib_flow_counters_data data[];
382 };
383 #endif
384