xref: /freebsd/sys/dev/mlx5/device.h (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #ifndef MLX5_DEVICE_H
29 #define MLX5_DEVICE_H
30 
31 #include <linux/types.h>
32 #include <rdma/ib_verbs.h>
33 #include <dev/mlx5/mlx5_ifc.h>
34 
35 #define FW_INIT_TIMEOUT_MILI 2000
36 #define FW_INIT_WAIT_MS 2
37 
38 #if defined(__LITTLE_ENDIAN)
39 #define MLX5_SET_HOST_ENDIANNESS	0
40 #elif defined(__BIG_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS	0x80
42 #else
43 #error Host endianness not defined
44 #endif
45 
46 /* helper macros */
47 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
48 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
49 #define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
50 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
51 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
52 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
53 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
54 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
55 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
56 
57 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
58 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
59 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
60 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
61 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
62 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
63 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
64 
65 /* insert a value to a struct */
66 #define MLX5_SET(typ, p, fld, v) do { \
67 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
68 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
69 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
70 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
71 		     (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
72 		     << __mlx5_dw_bit_off(typ, fld))); \
73 } while (0)
74 
75 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
76 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
77 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
78 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
79 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
80 		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
81 		     << __mlx5_dw_bit_off(typ, fld))); \
82 } while (0)
83 
84 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
85 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
86 __mlx5_mask(typ, fld))
87 
88 #define MLX5_GET_PR(typ, p, fld) ({ \
89 	u32 ___t = MLX5_GET(typ, p, fld); \
90 	pr_debug(#fld " = 0x%x\n", ___t); \
91 	___t; \
92 })
93 
94 #define MLX5_SET64(typ, p, fld, v) do { \
95 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
96 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
97 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
98 } while (0)
99 
100 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
101 
102 enum {
103 	MLX5_MAX_COMMANDS		= 32,
104 	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
105 	MLX5_PCI_CMD_XPORT		= 7,
106 	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
107 	MLX5_MAX_PSVS			= 4,
108 };
109 
110 enum {
111 	MLX5_EXTENDED_UD_AV		= 0x80000000,
112 };
113 
114 enum {
115 	MLX5_STAT_RATE_OFFSET	= 5,
116 };
117 
118 enum {
119 	MLX5_INLINE_SEG = 0x80000000,
120 };
121 
122 enum {
123 	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
124 };
125 
126 enum {
127 	MLX5_MIN_PKEY_TABLE_SIZE = 128,
128 	MLX5_MAX_LOG_PKEY_TABLE  = 5,
129 };
130 
131 enum {
132 	MLX5_PERM_LOCAL_READ	= 1 << 2,
133 	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
134 	MLX5_PERM_REMOTE_READ	= 1 << 4,
135 	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
136 	MLX5_PERM_ATOMIC	= 1 << 6,
137 	MLX5_PERM_UMR_EN	= 1 << 7,
138 };
139 
140 enum {
141 	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
142 	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
143 	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
144 	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
145 	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
146 };
147 
148 enum {
149 	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
150 	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
151 	MLX5_MKEY_BSF_EN	= 1 << 30,
152 	MLX5_MKEY_LEN64		= 1 << 31,
153 };
154 
155 enum {
156 	MLX5_EN_RD	= (u64)1,
157 	MLX5_EN_WR	= (u64)2
158 };
159 
160 enum {
161 	MLX5_BF_REGS_PER_PAGE		= 4,
162 	MLX5_MAX_UAR_PAGES		= 1 << 8,
163 	MLX5_NON_FP_BF_REGS_PER_PAGE	= 2,
164 	MLX5_MAX_UUARS	= MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
165 };
166 
167 enum {
168 	MLX5_MKEY_MASK_LEN		= 1ull << 0,
169 	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
170 	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
171 	MLX5_MKEY_MASK_PD		= 1ull << 7,
172 	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
173 	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
174 	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
175 	MLX5_MKEY_MASK_KEY		= 1ull << 13,
176 	MLX5_MKEY_MASK_QPN		= 1ull << 14,
177 	MLX5_MKEY_MASK_LR		= 1ull << 17,
178 	MLX5_MKEY_MASK_LW		= 1ull << 18,
179 	MLX5_MKEY_MASK_RR		= 1ull << 19,
180 	MLX5_MKEY_MASK_RW		= 1ull << 20,
181 	MLX5_MKEY_MASK_A		= 1ull << 21,
182 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
183 	MLX5_MKEY_MASK_FREE		= 1ull << 29,
184 };
185 
186 enum {
187 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
188 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
189 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
190 	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
191 	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
192 	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
193 	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
194 };
195 
196 enum {
197 	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
198 	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
199 	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
200 	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
201 	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
202 	MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD	= 1LL << 21,
203 	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
204 	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
205 	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
206 	MLX5_DEV_CAP_FLAG_ROCE          = 1LL << 34,
207 	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
208 	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
209 	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
210 };
211 
212 enum {
213 	MLX5_ROCE_VERSION_1		= 0,
214 	MLX5_ROCE_VERSION_1_5		= 1,
215 	MLX5_ROCE_VERSION_2		= 2,
216 };
217 
218 enum {
219 	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
220 	MLX5_ROCE_VERSION_1_5_CAP	= 1 << MLX5_ROCE_VERSION_1_5,
221 	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
222 };
223 
224 enum {
225 	MLX5_ROCE_L3_TYPE_IPV4		= 0,
226 	MLX5_ROCE_L3_TYPE_IPV6		= 1,
227 };
228 
229 enum {
230 	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
231 	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
232 };
233 
234 enum {
235 	MLX5_OPCODE_NOP			= 0x00,
236 	MLX5_OPCODE_SEND_INVAL		= 0x01,
237 	MLX5_OPCODE_RDMA_WRITE		= 0x08,
238 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
239 	MLX5_OPCODE_SEND		= 0x0a,
240 	MLX5_OPCODE_SEND_IMM		= 0x0b,
241 	MLX5_OPCODE_LSO			= 0x0e,
242 	MLX5_OPCODE_RDMA_READ		= 0x10,
243 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
244 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
245 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
246 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
247 	MLX5_OPCODE_BIND_MW		= 0x18,
248 	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
249 
250 	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
251 	MLX5_RECV_OPCODE_SEND		= 0x01,
252 	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
253 	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
254 
255 	MLX5_CQE_OPCODE_ERROR		= 0x1e,
256 	MLX5_CQE_OPCODE_RESIZE		= 0x16,
257 
258 	MLX5_OPCODE_SET_PSV		= 0x20,
259 	MLX5_OPCODE_GET_PSV		= 0x21,
260 	MLX5_OPCODE_CHECK_PSV		= 0x22,
261 	MLX5_OPCODE_RGET_PSV		= 0x26,
262 	MLX5_OPCODE_RCHECK_PSV		= 0x27,
263 
264 	MLX5_OPCODE_UMR			= 0x25,
265 
266 };
267 
268 enum {
269 	MLX5_SET_PORT_RESET_QKEY	= 0,
270 	MLX5_SET_PORT_GUID0		= 16,
271 	MLX5_SET_PORT_NODE_GUID		= 17,
272 	MLX5_SET_PORT_SYS_GUID		= 18,
273 	MLX5_SET_PORT_GID_TABLE		= 19,
274 	MLX5_SET_PORT_PKEY_TABLE	= 20,
275 };
276 
277 enum {
278 	MLX5_MAX_PAGE_SHIFT		= 31
279 };
280 
281 enum {
282 	MLX5_ADAPTER_PAGE_SHIFT		= 12,
283 	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
284 };
285 
286 enum {
287 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
288 };
289 
290 struct mlx5_inbox_hdr {
291 	__be16		opcode;
292 	u8		rsvd[4];
293 	__be16		opmod;
294 };
295 
296 struct mlx5_outbox_hdr {
297 	u8		status;
298 	u8		rsvd[3];
299 	__be32		syndrome;
300 };
301 
302 struct mlx5_cmd_layout {
303 	u8		type;
304 	u8		rsvd0[3];
305 	__be32		inlen;
306 	__be64		in_ptr;
307 	__be32		in[4];
308 	__be32		out[4];
309 	__be64		out_ptr;
310 	__be32		outlen;
311 	u8		token;
312 	u8		sig;
313 	u8		rsvd1;
314 	u8		status_own;
315 };
316 
317 
318 struct mlx5_health_buffer {
319 	__be32		assert_var[5];
320 	__be32		rsvd0[3];
321 	__be32		assert_exit_ptr;
322 	__be32		assert_callra;
323 	__be32		rsvd1[2];
324 	__be32		fw_ver;
325 	__be32		hw_id;
326 	__be32		rsvd2;
327 	u8		irisc_index;
328 	u8		synd;
329 	__be16		ext_sync;
330 };
331 
332 struct mlx5_init_seg {
333 	__be32			fw_rev;
334 	__be32			cmdif_rev_fw_sub;
335 	__be32			rsvd0[2];
336 	__be32			cmdq_addr_h;
337 	__be32			cmdq_addr_l_sz;
338 	__be32			cmd_dbell;
339 	__be32			rsvd1[120];
340 	__be32			initializing;
341 	struct mlx5_health_buffer  health;
342 	__be32			rsvd2[884];
343 	__be32			health_counter;
344 	__be32			rsvd3[1019];
345 	__be64			ieee1588_clk;
346 	__be32			ieee1588_clk_type;
347 	__be32			clr_intx;
348 };
349 
350 struct mlx5_eqe_comp {
351 	__be32	reserved[6];
352 	__be32	cqn;
353 };
354 
355 struct mlx5_eqe_qp_srq {
356 	__be32	reserved[6];
357 	__be32	qp_srq_n;
358 };
359 
360 struct mlx5_eqe_cq_err {
361 	__be32	cqn;
362 	u8	reserved1[7];
363 	u8	syndrome;
364 };
365 
366 struct mlx5_eqe_port_state {
367 	u8	reserved0[8];
368 	u8	port;
369 };
370 
371 struct mlx5_eqe_gpio {
372 	__be32	reserved0[2];
373 	__be64	gpio_event;
374 };
375 
376 struct mlx5_eqe_congestion {
377 	u8	type;
378 	u8	rsvd0;
379 	u8	congestion_level;
380 };
381 
382 struct mlx5_eqe_stall_vl {
383 	u8	rsvd0[3];
384 	u8	port_vl;
385 };
386 
387 struct mlx5_eqe_cmd {
388 	__be32	vector;
389 	__be32	rsvd[6];
390 };
391 
392 struct mlx5_eqe_page_req {
393 	u8		rsvd0[2];
394 	__be16		func_id;
395 	__be32		num_pages;
396 	__be32		rsvd1[5];
397 };
398 
399 struct mlx5_eqe_vport_change {
400 	u8		rsvd0[2];
401 	__be16		vport_num;
402 	__be32		rsvd1[6];
403 };
404 
405 
406 #define PORT_MODULE_EVENT_MODULE_STATUS_MASK  0xF
407 #define PORT_MODULE_EVENT_ERROR_TYPE_MASK     0xF
408 
409 enum {
410 	MLX5_MODULE_STATUS_PLUGGED    = 0x1,
411 	MLX5_MODULE_STATUS_UNPLUGGED  = 0x2,
412 	MLX5_MODULE_STATUS_ERROR      = 0x3,
413 };
414 
415 enum {
416 	MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED                 = 0x0,
417 	MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE  = 0x1,
418 	MLX5_MODULE_EVENT_ERROR_BUS_STUCK                             = 0x2,
419 	MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT               = 0x3,
420 	MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST              = 0x4,
421 	MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER                    = 0x5,
422 	MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE                      = 0x6,
423 };
424 
425 struct mlx5_eqe_port_module_event {
426 	u8        rsvd0;
427 	u8        module;
428 	u8        rsvd1;
429 	u8        module_status;
430 	u8        rsvd2[2];
431 	u8        error_type;
432 };
433 
434 union ev_data {
435 	__be32				raw[7];
436 	struct mlx5_eqe_cmd		cmd;
437 	struct mlx5_eqe_comp		comp;
438 	struct mlx5_eqe_qp_srq		qp_srq;
439 	struct mlx5_eqe_cq_err		cq_err;
440 	struct mlx5_eqe_port_state	port;
441 	struct mlx5_eqe_gpio		gpio;
442 	struct mlx5_eqe_congestion	cong;
443 	struct mlx5_eqe_stall_vl	stall_vl;
444 	struct mlx5_eqe_page_req	req_pages;
445 	struct mlx5_eqe_port_module_event port_module_event;
446 	struct mlx5_eqe_vport_change	vport_change;
447 } __packed;
448 
449 struct mlx5_eqe {
450 	u8		rsvd0;
451 	u8		type;
452 	u8		rsvd1;
453 	u8		sub_type;
454 	__be32		rsvd2[7];
455 	union ev_data	data;
456 	__be16		rsvd3;
457 	u8		signature;
458 	u8		owner;
459 } __packed;
460 
461 struct mlx5_cmd_prot_block {
462 	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
463 	u8		rsvd0[48];
464 	__be64		next;
465 	__be32		block_num;
466 	u8		rsvd1;
467 	u8		token;
468 	u8		ctrl_sig;
469 	u8		sig;
470 };
471 
472 enum {
473 	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
474 };
475 
476 struct mlx5_err_cqe {
477 	u8	rsvd0[32];
478 	__be32	srqn;
479 	u8	rsvd1[18];
480 	u8	vendor_err_synd;
481 	u8	syndrome;
482 	__be32	s_wqe_opcode_qpn;
483 	__be16	wqe_counter;
484 	u8	signature;
485 	u8	op_own;
486 };
487 
488 struct mlx5_cqe64 {
489 	u8		tunneled_etc;
490 	u8		rsvd0[3];
491 	u8		lro_tcppsh_abort_dupack;
492 	u8		lro_min_ttl;
493 	__be16		lro_tcp_win;
494 	__be32		lro_ack_seq_num;
495 	__be32		rss_hash_result;
496 	u8		rss_hash_type;
497 	u8		ml_path;
498 	u8		rsvd20[2];
499 	__be16		check_sum;
500 	__be16		slid;
501 	__be32		flags_rqpn;
502 	u8		hds_ip_ext;
503 	u8		l4_hdr_type_etc;
504 	__be16		vlan_info;
505 	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
506 	__be32		imm_inval_pkey;
507 	u8		rsvd40[4];
508 	__be32		byte_cnt;
509 	__be64		timestamp;
510 	__be32		sop_drop_qpn;
511 	__be16		wqe_counter;
512 	u8		signature;
513 	u8		op_own;
514 };
515 
516 static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
517 {
518 	return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
519 }
520 
521 static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
522 {
523 	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
524 }
525 
526 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
527 {
528 	return (cqe->l4_hdr_type_etc >> 4) & 0x7;
529 }
530 
531 static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
532 {
533 	return be16_to_cpu(cqe->vlan_info) & 0xfff;
534 }
535 
536 static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
537 {
538 	memcpy(smac, &cqe->rss_hash_type , 4);
539 	memcpy(smac + 4, &cqe->slid , 2);
540 }
541 
542 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
543 {
544 	return cqe->l4_hdr_type_etc & 0x1;
545 }
546 
547 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
548 {
549 	return cqe->tunneled_etc & 0x1;
550 }
551 
552 enum {
553 	CQE_L4_HDR_TYPE_NONE			= 0x0,
554 	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
555 	CQE_L4_HDR_TYPE_UDP			= 0x2,
556 	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
557 	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
558 };
559 
560 enum {
561 	/* source L3 hash types */
562 	CQE_RSS_SRC_HTYPE_IP	= 0x3 << 0,
563 	CQE_RSS_SRC_HTYPE_IPV4	= 0x1 << 0,
564 	CQE_RSS_SRC_HTYPE_IPV6	= 0x2 << 0,
565 
566 	/* destination L3 hash types */
567 	CQE_RSS_DST_HTYPE_IP	= 0x3 << 2,
568 	CQE_RSS_DST_HTYPE_IPV4	= 0x1 << 2,
569 	CQE_RSS_DST_HTYPE_IPV6	= 0x2 << 2,
570 
571 	/* source L4 hash types */
572 	CQE_RSS_SRC_HTYPE_L4	= 0x3 << 4,
573 	CQE_RSS_SRC_HTYPE_TCP	= 0x1 << 4,
574 	CQE_RSS_SRC_HTYPE_UDP	= 0x2 << 4,
575 	CQE_RSS_SRC_HTYPE_IPSEC	= 0x3 << 4,
576 
577 	/* destination L4 hash types */
578 	CQE_RSS_DST_HTYPE_L4	= 0x3 << 6,
579 	CQE_RSS_DST_HTYPE_TCP	= 0x1 << 6,
580 	CQE_RSS_DST_HTYPE_UDP	= 0x2 << 6,
581 	CQE_RSS_DST_HTYPE_IPSEC	= 0x3 << 6,
582 };
583 
584 enum {
585 	CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
586 	CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
587 	CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
588 };
589 
590 enum {
591 	CQE_L2_OK	= 1 << 0,
592 	CQE_L3_OK	= 1 << 1,
593 	CQE_L4_OK	= 1 << 2,
594 };
595 
596 struct mlx5_sig_err_cqe {
597 	u8		rsvd0[16];
598 	__be32		expected_trans_sig;
599 	__be32		actual_trans_sig;
600 	__be32		expected_reftag;
601 	__be32		actual_reftag;
602 	__be16		syndrome;
603 	u8		rsvd22[2];
604 	__be32		mkey;
605 	__be64		err_offset;
606 	u8		rsvd30[8];
607 	__be32		qpn;
608 	u8		rsvd38[2];
609 	u8		signature;
610 	u8		op_own;
611 };
612 
613 struct mlx5_wqe_srq_next_seg {
614 	u8			rsvd0[2];
615 	__be16			next_wqe_index;
616 	u8			signature;
617 	u8			rsvd1[11];
618 };
619 
620 union mlx5_ext_cqe {
621 	struct ib_grh	grh;
622 	u8		inl[64];
623 };
624 
625 struct mlx5_cqe128 {
626 	union mlx5_ext_cqe	inl_grh;
627 	struct mlx5_cqe64	cqe64;
628 };
629 
630 struct mlx5_srq_ctx {
631 	u8			state_log_sz;
632 	u8			rsvd0[3];
633 	__be32			flags_xrcd;
634 	__be32			pgoff_cqn;
635 	u8			rsvd1[4];
636 	u8			log_pg_sz;
637 	u8			rsvd2[7];
638 	__be32			pd;
639 	__be16			lwm;
640 	__be16			wqe_cnt;
641 	u8			rsvd3[8];
642 	__be64			db_record;
643 };
644 
645 struct mlx5_create_srq_mbox_in {
646 	struct mlx5_inbox_hdr	hdr;
647 	__be32			input_srqn;
648 	u8			rsvd0[4];
649 	struct mlx5_srq_ctx	ctx;
650 	u8			rsvd1[208];
651 	__be64			pas[0];
652 };
653 
654 struct mlx5_create_srq_mbox_out {
655 	struct mlx5_outbox_hdr	hdr;
656 	__be32			srqn;
657 	u8			rsvd[4];
658 };
659 
660 struct mlx5_destroy_srq_mbox_in {
661 	struct mlx5_inbox_hdr	hdr;
662 	__be32			srqn;
663 	u8			rsvd[4];
664 };
665 
666 struct mlx5_destroy_srq_mbox_out {
667 	struct mlx5_outbox_hdr	hdr;
668 	u8			rsvd[8];
669 };
670 
671 struct mlx5_query_srq_mbox_in {
672 	struct mlx5_inbox_hdr	hdr;
673 	__be32			srqn;
674 	u8			rsvd0[4];
675 };
676 
677 struct mlx5_query_srq_mbox_out {
678 	struct mlx5_outbox_hdr	hdr;
679 	u8			rsvd0[8];
680 	struct mlx5_srq_ctx	ctx;
681 	u8			rsvd1[32];
682 	__be64			pas[0];
683 };
684 
685 struct mlx5_arm_srq_mbox_in {
686 	struct mlx5_inbox_hdr	hdr;
687 	__be32			srqn;
688 	__be16			rsvd;
689 	__be16			lwm;
690 };
691 
692 struct mlx5_arm_srq_mbox_out {
693 	struct mlx5_outbox_hdr	hdr;
694 	u8			rsvd[8];
695 };
696 
697 struct mlx5_cq_context {
698 	u8			status;
699 	u8			cqe_sz_flags;
700 	u8			st;
701 	u8			rsvd3;
702 	u8			rsvd4[6];
703 	__be16			page_offset;
704 	__be32			log_sz_usr_page;
705 	__be16			cq_period;
706 	__be16			cq_max_count;
707 	__be16			rsvd20;
708 	__be16			c_eqn;
709 	u8			log_pg_sz;
710 	u8			rsvd25[7];
711 	__be32			last_notified_index;
712 	__be32			solicit_producer_index;
713 	__be32			consumer_counter;
714 	__be32			producer_counter;
715 	u8			rsvd48[8];
716 	__be64			db_record_addr;
717 };
718 
719 struct mlx5_create_cq_mbox_in {
720 	struct mlx5_inbox_hdr	hdr;
721 	__be32			input_cqn;
722 	u8			rsvdx[4];
723 	struct mlx5_cq_context	ctx;
724 	u8			rsvd6[192];
725 	__be64			pas[0];
726 };
727 
728 struct mlx5_create_cq_mbox_out {
729 	struct mlx5_outbox_hdr	hdr;
730 	__be32			cqn;
731 	u8			rsvd0[4];
732 };
733 
734 struct mlx5_destroy_cq_mbox_in {
735 	struct mlx5_inbox_hdr	hdr;
736 	__be32			cqn;
737 	u8			rsvd0[4];
738 };
739 
740 struct mlx5_destroy_cq_mbox_out {
741 	struct mlx5_outbox_hdr	hdr;
742 	u8			rsvd0[8];
743 };
744 
745 struct mlx5_query_cq_mbox_in {
746 	struct mlx5_inbox_hdr	hdr;
747 	__be32			cqn;
748 	u8			rsvd0[4];
749 };
750 
751 struct mlx5_query_cq_mbox_out {
752 	struct mlx5_outbox_hdr	hdr;
753 	u8			rsvd0[8];
754 	struct mlx5_cq_context	ctx;
755 	u8			rsvd6[16];
756 	__be64			pas[0];
757 };
758 
759 struct mlx5_modify_cq_mbox_in {
760 	struct mlx5_inbox_hdr	hdr;
761 	__be32			cqn;
762 	__be32			field_select;
763 	struct mlx5_cq_context	ctx;
764 	u8			rsvd[192];
765 	__be64			pas[0];
766 };
767 
768 struct mlx5_modify_cq_mbox_out {
769 	struct mlx5_outbox_hdr	hdr;
770 	u8			rsvd[8];
771 };
772 
773 struct mlx5_eq_context {
774 	u8			status;
775 	u8			ec_oi;
776 	u8			st;
777 	u8			rsvd2[7];
778 	__be16			page_pffset;
779 	__be32			log_sz_usr_page;
780 	u8			rsvd3[7];
781 	u8			intr;
782 	u8			log_page_size;
783 	u8			rsvd4[15];
784 	__be32			consumer_counter;
785 	__be32			produser_counter;
786 	u8			rsvd5[16];
787 };
788 
789 struct mlx5_create_eq_mbox_in {
790 	struct mlx5_inbox_hdr	hdr;
791 	u8			rsvd0[3];
792 	u8			input_eqn;
793 	u8			rsvd1[4];
794 	struct mlx5_eq_context	ctx;
795 	u8			rsvd2[8];
796 	__be64			events_mask;
797 	u8			rsvd3[176];
798 	__be64			pas[0];
799 };
800 
801 struct mlx5_create_eq_mbox_out {
802 	struct mlx5_outbox_hdr	hdr;
803 	u8			rsvd0[3];
804 	u8			eq_number;
805 	u8			rsvd1[4];
806 };
807 
808 struct mlx5_map_eq_mbox_in {
809 	struct mlx5_inbox_hdr	hdr;
810 	__be64			mask;
811 	u8			mu;
812 	u8			rsvd0[2];
813 	u8			eqn;
814 	u8			rsvd1[24];
815 };
816 
817 struct mlx5_map_eq_mbox_out {
818 	struct mlx5_outbox_hdr	hdr;
819 	u8			rsvd[8];
820 };
821 
822 struct mlx5_query_eq_mbox_in {
823 	struct mlx5_inbox_hdr	hdr;
824 	u8			rsvd0[3];
825 	u8			eqn;
826 	u8			rsvd1[4];
827 };
828 
829 struct mlx5_query_eq_mbox_out {
830 	struct mlx5_outbox_hdr	hdr;
831 	u8			rsvd[8];
832 	struct mlx5_eq_context	ctx;
833 };
834 
835 struct mlx5_mkey_seg {
836 	/* This is a two bit field occupying bits 31-30.
837 	 * bit 31 is always 0,
838 	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
839 	 */
840 	u8		status;
841 	u8		pcie_control;
842 	u8		flags;
843 	u8		version;
844 	__be32		qpn_mkey7_0;
845 	u8		rsvd1[4];
846 	__be32		flags_pd;
847 	__be64		start_addr;
848 	__be64		len;
849 	__be32		bsfs_octo_size;
850 	u8		rsvd2[16];
851 	__be32		xlt_oct_size;
852 	u8		rsvd3[3];
853 	u8		log2_page_size;
854 	u8		rsvd4[4];
855 };
856 
857 struct mlx5_query_special_ctxs_mbox_in {
858 	struct mlx5_inbox_hdr	hdr;
859 	u8			rsvd[8];
860 };
861 
862 struct mlx5_query_special_ctxs_mbox_out {
863 	struct mlx5_outbox_hdr	hdr;
864 	__be32			dump_fill_mkey;
865 	__be32			reserved_lkey;
866 };
867 
868 struct mlx5_create_mkey_mbox_in {
869 	struct mlx5_inbox_hdr	hdr;
870 	__be32			input_mkey_index;
871 	u8			rsvd0[4];
872 	struct mlx5_mkey_seg	seg;
873 	u8			rsvd1[16];
874 	__be32			xlat_oct_act_size;
875 	__be32			rsvd2;
876 	u8			rsvd3[168];
877 	__be64			pas[0];
878 };
879 
880 struct mlx5_create_mkey_mbox_out {
881 	struct mlx5_outbox_hdr	hdr;
882 	__be32			mkey;
883 	u8			rsvd[4];
884 };
885 
886 struct mlx5_query_mkey_mbox_in {
887 	struct mlx5_inbox_hdr	hdr;
888 	__be32			mkey;
889 };
890 
891 struct mlx5_query_mkey_mbox_out {
892 	struct mlx5_outbox_hdr	hdr;
893 	__be64			pas[0];
894 };
895 
896 struct mlx5_modify_mkey_mbox_in {
897 	struct mlx5_inbox_hdr	hdr;
898 	__be32			mkey;
899 	__be64			pas[0];
900 };
901 
902 struct mlx5_modify_mkey_mbox_out {
903 	struct mlx5_outbox_hdr	hdr;
904 	u8			rsvd[8];
905 };
906 
907 struct mlx5_dump_mkey_mbox_in {
908 	struct mlx5_inbox_hdr	hdr;
909 };
910 
911 struct mlx5_dump_mkey_mbox_out {
912 	struct mlx5_outbox_hdr	hdr;
913 	__be32			mkey;
914 };
915 
916 struct mlx5_mad_ifc_mbox_in {
917 	struct mlx5_inbox_hdr	hdr;
918 	__be16			remote_lid;
919 	u8			rsvd0;
920 	u8			port;
921 	u8			rsvd1[4];
922 	u8			data[256];
923 };
924 
925 struct mlx5_mad_ifc_mbox_out {
926 	struct mlx5_outbox_hdr	hdr;
927 	u8			rsvd[8];
928 	u8			data[256];
929 };
930 
931 struct mlx5_access_reg_mbox_in {
932 	struct mlx5_inbox_hdr		hdr;
933 	u8				rsvd0[2];
934 	__be16				register_id;
935 	__be32				arg;
936 	__be32				data[0];
937 };
938 
939 struct mlx5_access_reg_mbox_out {
940 	struct mlx5_outbox_hdr		hdr;
941 	u8				rsvd[8];
942 	__be32				data[0];
943 };
944 
945 #define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
946 
947 enum {
948 	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
949 };
950 
951 struct mlx5_allocate_psv_in {
952 	struct mlx5_inbox_hdr   hdr;
953 	__be32			npsv_pd;
954 	__be32			rsvd_psv0;
955 };
956 
957 struct mlx5_allocate_psv_out {
958 	struct mlx5_outbox_hdr  hdr;
959 	u8			rsvd[8];
960 	__be32			psv_idx[4];
961 };
962 
963 struct mlx5_destroy_psv_in {
964 	struct mlx5_inbox_hdr	hdr;
965 	__be32                  psv_number;
966 	u8                      rsvd[4];
967 };
968 
969 struct mlx5_destroy_psv_out {
970 	struct mlx5_outbox_hdr  hdr;
971 	u8                      rsvd[8];
972 };
973 
974 #define MLX5_CMD_OP_MAX 0x939
975 
976 enum {
977 	VPORT_STATE_DOWN		= 0x0,
978 	VPORT_STATE_UP			= 0x1,
979 };
980 
981 enum {
982 	MLX5_L3_PROT_TYPE_IPV4		= 0,
983 	MLX5_L3_PROT_TYPE_IPV6		= 1,
984 };
985 
986 enum {
987 	MLX5_L4_PROT_TYPE_TCP		= 0,
988 	MLX5_L4_PROT_TYPE_UDP		= 1,
989 };
990 
991 enum {
992 	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
993 	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
994 	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
995 	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
996 	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
997 };
998 
999 enum {
1000 	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
1001 	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
1002 	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
1003 
1004 };
1005 
1006 enum {
1007 	MLX5_FLOW_TABLE_TYPE_NIC_RCV	 = 0,
1008 	MLX5_FLOW_TABLE_TYPE_EGRESS_ACL  = 2,
1009 	MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
1010 	MLX5_FLOW_TABLE_TYPE_ESWITCH	 = 4,
1011 };
1012 
1013 enum {
1014 	MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE	      = 0,
1015 	MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
1016 	MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE  = 2
1017 };
1018 
1019 enum {
1020 	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP  = 1 << 0,
1021 	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP  = 1 << 1,
1022 	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
1023 	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
1024 };
1025 
1026 enum {
1027 	MLX5_UC_ADDR_CHANGE = (1 << 0),
1028 	MLX5_MC_ADDR_CHANGE = (1 << 1),
1029 	MLX5_VLAN_CHANGE    = (1 << 2),
1030 	MLX5_PROMISC_CHANGE = (1 << 3),
1031 	MLX5_MTU_CHANGE     = (1 << 4),
1032 };
1033 
1034 enum mlx5_list_type {
1035 	MLX5_NIC_VPORT_LIST_TYPE_UC   = 0x0,
1036 	MLX5_NIC_VPORT_LIST_TYPE_MC   = 0x1,
1037 	MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
1038 };
1039 
1040 enum {
1041 	MLX5_ESW_VPORT_ADMIN_STATE_DOWN  = 0x0,
1042 	MLX5_ESW_VPORT_ADMIN_STATE_UP    = 0x1,
1043 	MLX5_ESW_VPORT_ADMIN_STATE_AUTO  = 0x2,
1044 };
1045 
1046 /* MLX5 DEV CAPs */
1047 
1048 /* TODO: EAT.ME */
1049 enum mlx5_cap_mode {
1050 	HCA_CAP_OPMOD_GET_MAX	= 0,
1051 	HCA_CAP_OPMOD_GET_CUR	= 1,
1052 };
1053 
1054 enum mlx5_cap_type {
1055 	MLX5_CAP_GENERAL = 0,
1056 	MLX5_CAP_ETHERNET_OFFLOADS,
1057 	MLX5_CAP_ODP,
1058 	MLX5_CAP_ATOMIC,
1059 	MLX5_CAP_ROCE,
1060 	MLX5_CAP_IPOIB_OFFLOADS,
1061 	MLX5_CAP_EOIB_OFFLOADS,
1062 	MLX5_CAP_FLOW_TABLE,
1063 	MLX5_CAP_ESWITCH_FLOW_TABLE,
1064 	MLX5_CAP_ESWITCH,
1065 	/* NUM OF CAP Types */
1066 	MLX5_CAP_NUM
1067 };
1068 
1069 /* GET Dev Caps macros */
1070 #define MLX5_CAP_GEN(mdev, cap) \
1071 	MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1072 
1073 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1074 	MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1075 
1076 #define MLX5_CAP_ETH(mdev, cap) \
1077 	MLX5_GET(per_protocol_networking_offload_caps,\
1078 		 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1079 
1080 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1081 	MLX5_GET(per_protocol_networking_offload_caps,\
1082 		 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1083 
1084 #define MLX5_CAP_ROCE(mdev, cap) \
1085 	MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1086 
1087 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1088 	MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1089 
1090 #define MLX5_CAP_ATOMIC(mdev, cap) \
1091 	MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1092 
1093 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1094 	MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1095 
1096 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1097 	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1098 
1099 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1100 	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1101 
1102 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1103 	MLX5_GET(flow_table_eswitch_cap, \
1104 		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1105 
1106 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1107 	MLX5_GET(flow_table_eswitch_cap, \
1108 		 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1109 
1110 #define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(mdev, cap) \
1111 	MLX5_CAP_ESW_FLOWTABLE(dev, \
1112 			       flow_table_properties_esw_acl_egress.cap)
1113 
1114 #define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL_MAX(mdev, cap) \
1115 	MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
1116 				   flow_table_properties_esw_acl_egress.cap)
1117 
1118 #define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(mdev, cap) \
1119 	MLX5_CAP_ESW_FLOWTABLE(dev, \
1120 			       flow_table_properties_esw_acl_ingress.cap)
1121 
1122 #define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL_MAX(mdev, cap) \
1123 	MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
1124 				   flow_table_properties_esw_acl_ingress.cap)
1125 
1126 #define MLX5_CAP_ESW(mdev, cap) \
1127 	MLX5_GET(e_switch_cap, \
1128 		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1129 
1130 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1131 	MLX5_GET(e_switch_cap, \
1132 		 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1133 
1134 #define MLX5_CAP_ODP(mdev, cap)\
1135 	MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1136 
1137 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1138 	MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
1139 
1140 enum {
1141 	MLX5_CMD_STAT_OK			= 0x0,
1142 	MLX5_CMD_STAT_INT_ERR			= 0x1,
1143 	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
1144 	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
1145 	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
1146 	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
1147 	MLX5_CMD_STAT_RES_BUSY			= 0x6,
1148 	MLX5_CMD_STAT_LIM_ERR			= 0x8,
1149 	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
1150 	MLX5_CMD_STAT_IX_ERR			= 0xa,
1151 	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
1152 	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
1153 	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
1154 	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
1155 	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
1156 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
1157 };
1158 
1159 enum {
1160 	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
1161 	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
1162 	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
1163 	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
1164 	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1165 	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1166 	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1167 	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1168 };
1169 
1170 enum {
1171 	MLX5_CAP_PORT_TYPE_IB  = 0x0,
1172 	MLX5_CAP_PORT_TYPE_ETH = 0x1,
1173 };
1174 
1175 enum {
1176 	MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2           = 0x0,
1177 	MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
1178 	MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
1179 };
1180 
1181 enum {
1182 	MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
1183 };
1184 
1185 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1186 {
1187 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1188 		return 0;
1189 	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1190 }
1191 
1192 struct mlx5_ifc_mcia_reg_bits {
1193 	u8         l[0x1];
1194 	u8         reserved_0[0x7];
1195 	u8         module[0x8];
1196 	u8         reserved_1[0x8];
1197 	u8         status[0x8];
1198 
1199 	u8         i2c_device_address[0x8];
1200 	u8         page_number[0x8];
1201 	u8         device_address[0x10];
1202 
1203 	u8         reserved_2[0x10];
1204 	u8         size[0x10];
1205 
1206 	u8         reserved_3[0x20];
1207 
1208 	u8         dword_0[0x20];
1209 	u8         dword_1[0x20];
1210 	u8         dword_2[0x20];
1211 	u8         dword_3[0x20];
1212 	u8         dword_4[0x20];
1213 	u8         dword_5[0x20];
1214 	u8         dword_6[0x20];
1215 	u8         dword_7[0x20];
1216 	u8         dword_8[0x20];
1217 	u8         dword_9[0x20];
1218 	u8         dword_10[0x20];
1219 	u8         dword_11[0x20];
1220 };
1221 
1222 #define MLX5_CMD_OP_QUERY_EEPROM 0x93c
1223 
1224 struct mlx5_mini_cqe8 {
1225 	union {
1226 		u32 rx_hash_result;
1227 		u32 checksum;
1228 		struct {
1229 			u16 wqe_counter;
1230 			u8  s_wqe_opcode;
1231 			u8  reserved;
1232 		} s_wqe_info;
1233 	};
1234 	u32 byte_cnt;
1235 };
1236 
1237 enum {
1238 	MLX5_NO_INLINE_DATA,
1239 	MLX5_INLINE_DATA32_SEG,
1240 	MLX5_INLINE_DATA64_SEG,
1241 	MLX5_COMPRESSED,
1242 };
1243 
1244 enum mlx5_exp_cqe_zip_recv_type {
1245 	MLX5_CQE_FORMAT_HASH,
1246 	MLX5_CQE_FORMAT_CSUM,
1247 };
1248 
1249 #define MLX5E_CQE_FORMAT_MASK 0xc
1250 static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
1251 {
1252 	return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
1253 }
1254 
1255 #endif /* MLX5_DEVICE_H */
1256