xref: /linux/drivers/infiniband/hw/mlx5/mlx5_ib.h (revision ea04ef19ebdcd22e8a21054a19c2c8fefae011ce)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #ifndef MLX5_IB_H
8 #define MLX5_IB_H
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <rdma/ib_smi.h>
15 #include <linux/mlx5/driver.h>
16 #include <linux/mlx5/cq.h>
17 #include <linux/mlx5/fs.h>
18 #include <linux/mlx5/qp.h>
19 #include <linux/types.h>
20 #include <linux/mlx5/transobj.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/mlx5-abi.h>
23 #include <rdma/uverbs_ioctl.h>
24 #include <rdma/mlx5_user_ioctl_cmds.h>
25 #include <rdma/mlx5_user_ioctl_verbs.h>
26 
27 #include "srq.h"
28 #include "qp.h"
29 #include "macsec.h"
30 
31 #define mlx5_ib_dbg(_dev, format, arg...)                                      \
32 	dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
33 		__LINE__, current->pid, ##arg)
34 
35 #define mlx5_ib_err(_dev, format, arg...)                                      \
36 	dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
37 		__LINE__, current->pid, ##arg)
38 
39 #define mlx5_ib_warn(_dev, format, arg...)                                     \
40 	dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,     \
41 		 __LINE__, current->pid, ##arg)
42 
43 #define mlx5_ib_log(lvl, _dev, format, arg...)                                 \
44 	dev_printk(lvl, &(_dev)->ib_dev.dev,  "%s:%d:(pid %d): " format,       \
45 		   __func__, __LINE__, current->pid, ##arg)
46 
47 #define MLX5_IB_DEFAULT_UIDX 0xffffff
48 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
49 
50 static __always_inline unsigned long
51 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
52 			       unsigned int pgsz_shift)
53 {
54 	unsigned int largest_pg_shift =
55 		min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
56 		      BITS_PER_LONG - 1);
57 
58 	/*
59 	 * Despite a command allowing it, the device does not support lower than
60 	 * 4k page size.
61 	 */
62 	pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
63 	return GENMASK(largest_pg_shift, pgsz_shift);
64 }
65 
66 /*
67  * For mkc users, instead of a page_offset the command has a start_iova which
68  * specifies both the page_offset and the on-the-wire IOVA
69  */
70 #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova)    \
71 	ib_umem_find_best_pgsz(umem,                                           \
72 			       __mlx5_log_page_size_to_bitmap(                 \
73 				       __mlx5_bit_sz(typ, log_pgsz_fld),       \
74 				       pgsz_shift),                            \
75 			       iova)
76 
77 static __always_inline unsigned long
78 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
79 			      unsigned int offset_shift)
80 {
81 	unsigned int largest_offset_shift =
82 		min_t(unsigned long, page_offset_bits - 1 + offset_shift,
83 		      BITS_PER_LONG - 1);
84 
85 	return GENMASK(largest_offset_shift, offset_shift);
86 }
87 
88 /*
89  * QP/CQ/WQ/etc type commands take a page offset that satisifies:
90  *   page_offset_quantized * (page_size/scale) = page_offset
91  * Which restricts allowed page sizes to ones that satisify the above.
92  */
93 unsigned long __mlx5_umem_find_best_quantized_pgoff(
94 	struct ib_umem *umem, unsigned long pgsz_bitmap,
95 	unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
96 	unsigned int *page_offset_quantized);
97 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld,           \
98 					    pgsz_shift, page_offset_fld,       \
99 					    scale, page_offset_quantized)      \
100 	__mlx5_umem_find_best_quantized_pgoff(                                 \
101 		umem,                                                          \
102 		__mlx5_log_page_size_to_bitmap(                                \
103 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
104 		__mlx5_bit_sz(typ, page_offset_fld),                           \
105 		GENMASK(31, order_base_2(scale)), scale,                       \
106 		page_offset_quantized)
107 
108 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld,        \
109 					       pgsz_shift, page_offset_fld,    \
110 					       scale, page_offset_quantized)   \
111 	__mlx5_umem_find_best_quantized_pgoff(                                 \
112 		umem,                                                          \
113 		__mlx5_log_page_size_to_bitmap(                                \
114 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
115 		__mlx5_bit_sz(typ, page_offset_fld), 0, scale,                 \
116 		page_offset_quantized)
117 
118 enum {
119 	MLX5_IB_MMAP_OFFSET_START = 9,
120 	MLX5_IB_MMAP_OFFSET_END = 255,
121 };
122 
123 enum {
124 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
125 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
126 };
127 
128 enum {
129 	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
130 	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
131 	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
132 	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
133 };
134 
135 enum mlx5_ib_mad_ifc_flags {
136 	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
137 	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
138 	MLX5_MAD_IFC_NET_VIEW		= 4,
139 };
140 
141 enum {
142 	MLX5_CROSS_CHANNEL_BFREG         = 0,
143 };
144 
145 enum {
146 	MLX5_CQE_VERSION_V0,
147 	MLX5_CQE_VERSION_V1,
148 };
149 
150 enum {
151 	MLX5_TM_MAX_RNDV_MSG_SIZE	= 64,
152 	MLX5_TM_MAX_SGE			= 1,
153 };
154 
155 enum {
156 	MLX5_IB_INVALID_UAR_INDEX	= BIT(31),
157 	MLX5_IB_INVALID_BFREG		= BIT(31),
158 };
159 
160 enum {
161 	MLX5_MAX_MEMIC_PAGES = 0x100,
162 	MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
163 };
164 
165 enum {
166 	MLX5_MEMIC_BASE_ALIGN	= 6,
167 	MLX5_MEMIC_BASE_SIZE	= 1 << MLX5_MEMIC_BASE_ALIGN,
168 };
169 
170 enum mlx5_ib_mmap_type {
171 	MLX5_IB_MMAP_TYPE_MEMIC = 1,
172 	MLX5_IB_MMAP_TYPE_VAR = 2,
173 	MLX5_IB_MMAP_TYPE_UAR_WC = 3,
174 	MLX5_IB_MMAP_TYPE_UAR_NC = 4,
175 	MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
176 };
177 
178 struct mlx5_bfreg_info {
179 	u32 *sys_pages;
180 	int num_low_latency_bfregs;
181 	unsigned int *count;
182 
183 	/*
184 	 * protect bfreg allocation data structs
185 	 */
186 	struct mutex lock;
187 	u32 ver;
188 	u8 lib_uar_4k : 1;
189 	u8 lib_uar_dyn : 1;
190 	u32 num_sys_pages;
191 	u32 num_static_sys_pages;
192 	u32 total_num_bfregs;
193 	u32 num_dyn_bfregs;
194 };
195 
196 struct mlx5_ib_ucontext {
197 	struct ib_ucontext	ibucontext;
198 	struct list_head	db_page_list;
199 
200 	/* protect doorbell record alloc/free
201 	 */
202 	struct mutex		db_page_mutex;
203 	struct mlx5_bfreg_info	bfregi;
204 	u8			cqe_version;
205 	/* Transport Domain number */
206 	u32			tdn;
207 
208 	u64			lib_caps;
209 	u16			devx_uid;
210 	/* For RoCE LAG TX affinity */
211 	atomic_t		tx_port_affinity;
212 };
213 
214 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
215 {
216 	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
217 }
218 
219 struct mlx5_ib_pd {
220 	struct ib_pd		ibpd;
221 	u32			pdn;
222 	u16			uid;
223 };
224 
225 enum {
226 	MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
227 	MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
228 	MLX5_IB_FLOW_ACTION_DECAP,
229 };
230 
231 #define MLX5_IB_FLOW_MCAST_PRIO		(MLX5_BY_PASS_NUM_PRIOS - 1)
232 #define MLX5_IB_FLOW_LAST_PRIO		(MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
233 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
234 #error "Invalid number of bypass priorities"
235 #endif
236 #define MLX5_IB_FLOW_LEFTOVERS_PRIO	(MLX5_IB_FLOW_MCAST_PRIO + 1)
237 
238 #define MLX5_IB_NUM_FLOW_FT		(MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
239 #define MLX5_IB_NUM_SNIFFER_FTS		2
240 #define MLX5_IB_NUM_EGRESS_FTS		1
241 #define MLX5_IB_NUM_FDB_FTS		MLX5_BY_PASS_NUM_REGULAR_PRIOS
242 
243 struct mlx5_ib_anchor {
244 	struct mlx5_flow_table *ft;
245 	struct mlx5_flow_group *fg_goto_table;
246 	struct mlx5_flow_group *fg_drop;
247 	struct mlx5_flow_handle *rule_goto_table;
248 	struct mlx5_flow_handle *rule_drop;
249 	unsigned int rule_goto_table_ref;
250 };
251 
252 struct mlx5_ib_flow_prio {
253 	struct mlx5_flow_table		*flow_table;
254 	struct mlx5_ib_anchor		anchor;
255 	unsigned int			refcount;
256 };
257 
258 struct mlx5_ib_flow_handler {
259 	struct list_head		list;
260 	struct ib_flow			ibflow;
261 	struct mlx5_ib_flow_prio	*prio;
262 	struct mlx5_flow_handle		*rule;
263 	struct ib_counters		*ibcounters;
264 	struct mlx5_ib_dev		*dev;
265 	struct mlx5_ib_flow_matcher	*flow_matcher;
266 };
267 
268 struct mlx5_ib_flow_matcher {
269 	struct mlx5_ib_match_params matcher_mask;
270 	int			mask_len;
271 	enum mlx5_ib_flow_type	flow_type;
272 	enum mlx5_flow_namespace_type ns_type;
273 	u16			priority;
274 	struct mlx5_core_dev	*mdev;
275 	atomic_t		usecnt;
276 	u8			match_criteria_enable;
277 };
278 
279 struct mlx5_ib_steering_anchor {
280 	struct mlx5_ib_flow_prio *ft_prio;
281 	struct mlx5_ib_dev *dev;
282 	atomic_t usecnt;
283 };
284 
285 struct mlx5_ib_pp {
286 	u16 index;
287 	struct mlx5_core_dev *mdev;
288 };
289 
290 enum mlx5_ib_optional_counter_type {
291 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
292 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
293 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
294 
295 	MLX5_IB_OPCOUNTER_MAX,
296 };
297 
298 struct mlx5_ib_flow_db {
299 	struct mlx5_ib_flow_prio	prios[MLX5_IB_NUM_FLOW_FT];
300 	struct mlx5_ib_flow_prio	egress_prios[MLX5_IB_NUM_FLOW_FT];
301 	struct mlx5_ib_flow_prio	sniffer[MLX5_IB_NUM_SNIFFER_FTS];
302 	struct mlx5_ib_flow_prio	egress[MLX5_IB_NUM_EGRESS_FTS];
303 	struct mlx5_ib_flow_prio	fdb[MLX5_IB_NUM_FDB_FTS];
304 	struct mlx5_ib_flow_prio	rdma_rx[MLX5_IB_NUM_FLOW_FT];
305 	struct mlx5_ib_flow_prio	rdma_tx[MLX5_IB_NUM_FLOW_FT];
306 	struct mlx5_ib_flow_prio	opfcs[MLX5_IB_OPCOUNTER_MAX];
307 	struct mlx5_flow_table		*lag_demux_ft;
308 	/* Protect flow steering bypass flow tables
309 	 * when add/del flow rules.
310 	 * only single add/removal of flow steering rule could be done
311 	 * simultaneously.
312 	 */
313 	struct mutex			lock;
314 };
315 
316 /* Use macros here so that don't have to duplicate
317  * enum ib_qp_type for low-level driver
318  */
319 
320 #define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
321 /*
322  * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
323  * creates the actual hardware QP.
324  */
325 #define MLX5_IB_QPT_HW_GSI	IB_QPT_RESERVED2
326 #define MLX5_IB_QPT_DCI		IB_QPT_RESERVED3
327 #define MLX5_IB_QPT_DCT		IB_QPT_RESERVED4
328 #define MLX5_IB_WR_UMR		IB_WR_RESERVED1
329 
330 #define MLX5_IB_UPD_XLT_ZAP	      BIT(0)
331 #define MLX5_IB_UPD_XLT_ENABLE	      BIT(1)
332 #define MLX5_IB_UPD_XLT_ATOMIC	      BIT(2)
333 #define MLX5_IB_UPD_XLT_ADDR	      BIT(3)
334 #define MLX5_IB_UPD_XLT_PD	      BIT(4)
335 #define MLX5_IB_UPD_XLT_ACCESS	      BIT(5)
336 #define MLX5_IB_UPD_XLT_INDIRECT      BIT(6)
337 
338 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
339  *
340  * These flags are intended for internal use by the mlx5_ib driver, and they
341  * rely on the range reserved for that use in the ib_qp_create_flags enum.
342  */
343 #define MLX5_IB_QP_CREATE_SQPN_QP1	IB_QP_CREATE_RESERVED_START
344 #define MLX5_IB_QP_CREATE_WC_TEST	(IB_QP_CREATE_RESERVED_START << 1)
345 
346 struct wr_list {
347 	u16	opcode;
348 	u16	next;
349 };
350 
351 enum mlx5_ib_rq_flags {
352 	MLX5_IB_RQ_CVLAN_STRIPPING	= 1 << 0,
353 	MLX5_IB_RQ_PCI_WRITE_END_PADDING	= 1 << 1,
354 };
355 
356 struct mlx5_ib_wq {
357 	struct mlx5_frag_buf_ctrl fbc;
358 	u64		       *wrid;
359 	u32		       *wr_data;
360 	struct wr_list	       *w_list;
361 	unsigned	       *wqe_head;
362 	u16		        unsig_count;
363 
364 	/* serialize post to the work queue
365 	 */
366 	spinlock_t		lock;
367 	int			wqe_cnt;
368 	int			max_post;
369 	int			max_gs;
370 	int			offset;
371 	int			wqe_shift;
372 	unsigned		head;
373 	unsigned		tail;
374 	u16			cur_post;
375 	u16			last_poll;
376 	void			*cur_edge;
377 };
378 
379 enum mlx5_ib_wq_flags {
380 	MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
381 	MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
382 };
383 
384 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
385 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
386 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
387 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
388 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
389 
390 struct mlx5_ib_rwq {
391 	struct ib_wq		ibwq;
392 	struct mlx5_core_qp	core_qp;
393 	u32			rq_num_pas;
394 	u32			log_rq_stride;
395 	u32			log_rq_size;
396 	u32			rq_page_offset;
397 	u32			log_page_size;
398 	u32			log_num_strides;
399 	u32			two_byte_shift_en;
400 	u32			single_stride_log_num_of_bytes;
401 	struct ib_umem		*umem;
402 	size_t			buf_size;
403 	unsigned int		page_shift;
404 	struct mlx5_db		db;
405 	u32			user_index;
406 	u32			wqe_count;
407 	u32			wqe_shift;
408 	int			wq_sig;
409 	u32			create_flags; /* Use enum mlx5_ib_wq_flags */
410 };
411 
412 struct mlx5_ib_rwq_ind_table {
413 	struct ib_rwq_ind_table ib_rwq_ind_tbl;
414 	u32			rqtn;
415 	u16			uid;
416 };
417 
418 struct mlx5_ib_ubuffer {
419 	struct ib_umem	       *umem;
420 	int			buf_size;
421 	u64			buf_addr;
422 };
423 
424 struct mlx5_ib_qp_base {
425 	struct mlx5_ib_qp	*container_mibqp;
426 	struct mlx5_core_qp	mqp;
427 	struct mlx5_ib_ubuffer	ubuffer;
428 };
429 
430 struct mlx5_ib_qp_trans {
431 	struct mlx5_ib_qp_base	base;
432 	u16			xrcdn;
433 	u32			alt_port;
434 	u8			atomic_rd_en;
435 	u8			resp_depth;
436 };
437 
438 struct mlx5_ib_rss_qp {
439 	u32	tirn;
440 };
441 
442 struct mlx5_ib_rq {
443 	struct mlx5_ib_qp_base base;
444 	struct mlx5_ib_wq	*rq;
445 	struct mlx5_ib_ubuffer	ubuffer;
446 	struct mlx5_db		*doorbell;
447 	u32			tirn;
448 	u8			state;
449 	u32			flags;
450 };
451 
452 struct mlx5_ib_sq {
453 	struct mlx5_ib_qp_base base;
454 	struct mlx5_ib_wq	*sq;
455 	struct mlx5_ib_ubuffer  ubuffer;
456 	struct mlx5_db		*doorbell;
457 	struct mlx5_flow_handle	*flow_rule;
458 	u32			tisn;
459 	u8			state;
460 };
461 
462 struct mlx5_ib_raw_packet_qp {
463 	struct mlx5_ib_sq sq;
464 	struct mlx5_ib_rq rq;
465 };
466 
467 struct mlx5_bf {
468 	int			buf_size;
469 	unsigned long		offset;
470 	struct mlx5_sq_bfreg   *bfreg;
471 };
472 
473 struct mlx5_ib_dct {
474 	struct mlx5_core_dct    mdct;
475 	u32                     *in;
476 };
477 
478 struct mlx5_ib_gsi_qp {
479 	struct ib_qp *rx_qp;
480 	u32 port_num;
481 	struct ib_qp_cap cap;
482 	struct ib_cq *cq;
483 	struct mlx5_ib_gsi_wr *outstanding_wrs;
484 	u32 outstanding_pi, outstanding_ci;
485 	int num_qps;
486 	/* Protects access to the tx_qps. Post send operations synchronize
487 	 * with tx_qp creation in setup_qp(). Also protects the
488 	 * outstanding_wrs array and indices.
489 	 */
490 	spinlock_t lock;
491 	struct ib_qp **tx_qps;
492 };
493 
494 struct mlx5_ib_qp {
495 	struct ib_qp		ibqp;
496 	union {
497 		struct mlx5_ib_qp_trans trans_qp;
498 		struct mlx5_ib_raw_packet_qp raw_packet_qp;
499 		struct mlx5_ib_rss_qp rss_qp;
500 		struct mlx5_ib_dct dct;
501 		struct mlx5_ib_gsi_qp gsi;
502 	};
503 	struct mlx5_frag_buf	buf;
504 
505 	struct mlx5_db		db;
506 	struct mlx5_ib_wq	rq;
507 
508 	u8			sq_signal_bits;
509 	u8			next_fence;
510 	struct mlx5_ib_wq	sq;
511 
512 	/* serialize qp state modifications
513 	 */
514 	struct mutex		mutex;
515 	/* cached variant of create_flags from struct ib_qp_init_attr */
516 	u32			flags;
517 	u32			port;
518 	u8			state;
519 	int			max_inline_data;
520 	struct mlx5_bf	        bf;
521 	u8			has_rq:1;
522 	u8			is_rss:1;
523 
524 	/* only for user space QPs. For kernel
525 	 * we have it from the bf object
526 	 */
527 	int			bfregn;
528 
529 	struct list_head	qps_list;
530 	struct list_head	cq_recv_list;
531 	struct list_head	cq_send_list;
532 	struct mlx5_rate_limit	rl;
533 	u32                     underlay_qpn;
534 	u32			flags_en;
535 	/*
536 	 * IB/core doesn't store low-level QP types, so
537 	 * store both MLX and IBTA types in the field below.
538 	 */
539 	enum ib_qp_type		type;
540 	/* A flag to indicate if there's a new counter is configured
541 	 * but not take effective
542 	 */
543 	u32                     counter_pending;
544 	u16			gsi_lag_port;
545 };
546 
547 struct mlx5_ib_cq_buf {
548 	struct mlx5_frag_buf_ctrl fbc;
549 	struct mlx5_frag_buf    frag_buf;
550 	struct ib_umem		*umem;
551 	int			cqe_size;
552 	int			nent;
553 };
554 
555 enum mlx5_ib_cq_pr_flags {
556 	MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD	= 1 << 0,
557 	MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
558 };
559 
560 struct mlx5_ib_cq {
561 	struct ib_cq		ibcq;
562 	struct mlx5_core_cq	mcq;
563 	struct mlx5_ib_cq_buf	buf;
564 	struct mlx5_db		db;
565 
566 	/* serialize access to the CQ
567 	 */
568 	spinlock_t		lock;
569 
570 	/* protect resize cq
571 	 */
572 	struct mutex		resize_mutex;
573 	struct mlx5_ib_cq_buf  *resize_buf;
574 	struct ib_umem	       *resize_umem;
575 	int			cqe_size;
576 	struct list_head	list_send_qp;
577 	struct list_head	list_recv_qp;
578 	u32			create_flags;
579 	struct list_head	wc_list;
580 	enum ib_cq_notify_flags notify_flags;
581 	struct work_struct	notify_work;
582 	u16			private_flags; /* Use mlx5_ib_cq_pr_flags */
583 };
584 
585 struct mlx5_ib_wc {
586 	struct ib_wc wc;
587 	struct list_head list;
588 };
589 
590 struct mlx5_ib_srq {
591 	struct ib_srq		ibsrq;
592 	struct mlx5_core_srq	msrq;
593 	struct mlx5_frag_buf	buf;
594 	struct mlx5_db		db;
595 	struct mlx5_frag_buf_ctrl fbc;
596 	u64		       *wrid;
597 	/* protect SRQ hanlding
598 	 */
599 	spinlock_t		lock;
600 	int			head;
601 	int			tail;
602 	u16			wqe_ctr;
603 	struct ib_umem	       *umem;
604 	/* serialize arming a SRQ
605 	 */
606 	struct mutex		mutex;
607 	int			wq_sig;
608 };
609 
610 struct mlx5_ib_xrcd {
611 	struct ib_xrcd		ibxrcd;
612 	u32			xrcdn;
613 };
614 
615 enum mlx5_ib_mtt_access_flags {
616 	MLX5_IB_MTT_READ  = (1 << 0),
617 	MLX5_IB_MTT_WRITE = (1 << 1),
618 };
619 
620 struct mlx5_user_mmap_entry {
621 	struct rdma_user_mmap_entry rdma_entry;
622 	u8 mmap_flag;
623 	u64 address;
624 	u32 page_idx;
625 };
626 
627 enum mlx5_mkey_type {
628 	MLX5_MKEY_MR = 1,
629 	MLX5_MKEY_MW,
630 	MLX5_MKEY_INDIRECT_DEVX,
631 };
632 
633 struct mlx5r_cache_rb_key {
634 	u8 ats:1;
635 	unsigned int access_mode;
636 	unsigned int access_flags;
637 	unsigned int ndescs;
638 };
639 
640 struct mlx5_ib_mkey {
641 	u32 key;
642 	enum mlx5_mkey_type type;
643 	unsigned int ndescs;
644 	struct wait_queue_head wait;
645 	refcount_t usecount;
646 	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
647 	struct mlx5r_cache_rb_key rb_key;
648 	struct mlx5_cache_ent *cache_ent;
649 	u8 cacheable : 1;
650 };
651 
652 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
653 
654 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
655 					 IB_ACCESS_REMOTE_WRITE  |\
656 					 IB_ACCESS_REMOTE_READ   |\
657 					 IB_ACCESS_REMOTE_ATOMIC |\
658 					 IB_ZERO_BASED)
659 
660 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
661 					  IB_ACCESS_REMOTE_WRITE  |\
662 					  IB_ACCESS_REMOTE_READ   |\
663 					  IB_ZERO_BASED)
664 
665 #define mlx5_update_odp_stats(mr, counter_name, value)		\
666 	atomic64_add(value, &((mr)->odp_stats.counter_name))
667 
668 struct mlx5_ib_mr {
669 	struct ib_mr ibmr;
670 	struct mlx5_ib_mkey mmkey;
671 
672 	struct ib_umem *umem;
673 
674 	union {
675 		/* Used only by kernel MRs (umem == NULL) */
676 		struct {
677 			void *descs;
678 			void *descs_alloc;
679 			dma_addr_t desc_map;
680 			int max_descs;
681 			int desc_size;
682 			int access_mode;
683 
684 			/* For Kernel IB_MR_TYPE_INTEGRITY */
685 			struct mlx5_core_sig_ctx *sig;
686 			struct mlx5_ib_mr *pi_mr;
687 			struct mlx5_ib_mr *klm_mr;
688 			struct mlx5_ib_mr *mtt_mr;
689 			u64 data_iova;
690 			u64 pi_iova;
691 			int meta_ndescs;
692 			int meta_length;
693 			int data_length;
694 		};
695 
696 		/* Used only by User MRs (umem != NULL) */
697 		struct {
698 			unsigned int page_shift;
699 			/* Current access_flags */
700 			int access_flags;
701 
702 			/* For User ODP */
703 			struct mlx5_ib_mr *parent;
704 			struct xarray implicit_children;
705 			union {
706 				struct work_struct work;
707 			} odp_destroy;
708 			struct ib_odp_counters odp_stats;
709 			bool is_odp_implicit;
710 		};
711 	};
712 };
713 
714 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
715 {
716 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
717 	       mr->umem->is_odp;
718 }
719 
720 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
721 {
722 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
723 	       mr->umem->is_dmabuf;
724 }
725 
726 struct mlx5_ib_mw {
727 	struct ib_mw		ibmw;
728 	struct mlx5_ib_mkey	mmkey;
729 };
730 
731 struct mlx5_ib_umr_context {
732 	struct ib_cqe		cqe;
733 	enum ib_wc_status	status;
734 	struct completion	done;
735 };
736 
737 enum {
738 	MLX5_UMR_STATE_UNINIT,
739 	MLX5_UMR_STATE_ACTIVE,
740 	MLX5_UMR_STATE_RECOVER,
741 	MLX5_UMR_STATE_ERR,
742 };
743 
744 struct umr_common {
745 	struct ib_pd	*pd;
746 	struct ib_cq	*cq;
747 	struct ib_qp	*qp;
748 	/* Protects from UMR QP overflow
749 	 */
750 	struct semaphore	sem;
751 	/* Protects from using UMR while the UMR is not active
752 	 */
753 	struct mutex lock;
754 	unsigned int state;
755 };
756 
757 #define NUM_MKEYS_PER_PAGE \
758 	((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
759 
760 struct mlx5_mkeys_page {
761 	u32 mkeys[NUM_MKEYS_PER_PAGE];
762 	struct list_head list;
763 };
764 static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
765 
766 struct mlx5_mkeys_queue {
767 	struct list_head pages_list;
768 	u32 num_pages;
769 	unsigned long ci;
770 	spinlock_t lock; /* sync list ops */
771 };
772 
773 struct mlx5_cache_ent {
774 	struct mlx5_mkeys_queue	mkeys_queue;
775 	u32			pending;
776 
777 	char                    name[4];
778 
779 	struct rb_node		node;
780 	struct mlx5r_cache_rb_key rb_key;
781 
782 	u8 is_tmp:1;
783 	u8 disabled:1;
784 	u8 fill_to_high_water:1;
785 
786 	/*
787 	 * - limit is the low water mark for stored mkeys, 2* limit is the
788 	 *   upper water mark.
789 	 */
790 	u32 in_use;
791 	u32 limit;
792 
793 	/* Statistics */
794 	u32                     miss;
795 
796 	struct mlx5_ib_dev     *dev;
797 	struct delayed_work	dwork;
798 };
799 
800 struct mlx5r_async_create_mkey {
801 	union {
802 		u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
803 		u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
804 	};
805 	struct mlx5_async_work cb_work;
806 	struct mlx5_cache_ent *ent;
807 	u32 mkey;
808 };
809 
810 struct mlx5_mkey_cache {
811 	struct workqueue_struct *wq;
812 	struct rb_root		rb_root;
813 	struct mutex		rb_lock;
814 	struct dentry		*fs_root;
815 	unsigned long		last_add;
816 	struct delayed_work	remove_ent_dwork;
817 };
818 
819 struct mlx5_ib_port_resources {
820 	struct mlx5_ib_gsi_qp *gsi;
821 	struct work_struct pkey_change_work;
822 };
823 
824 struct mlx5_ib_resources {
825 	struct ib_cq	*c0;
826 	u32 xrcdn0;
827 	u32 xrcdn1;
828 	struct ib_pd	*p0;
829 	struct ib_srq	*s0;
830 	struct ib_srq	*s1;
831 	struct mlx5_ib_port_resources ports[2];
832 };
833 
834 #define MAX_OPFC_RULES 2
835 
836 struct mlx5_ib_op_fc {
837 	struct mlx5_fc *fc;
838 	struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
839 };
840 
841 struct mlx5_ib_counters {
842 	struct rdma_stat_desc *descs;
843 	size_t *offsets;
844 	u32 num_q_counters;
845 	u32 num_cong_counters;
846 	u32 num_ext_ppcnt_counters;
847 	u32 num_op_counters;
848 	u16 set_id;
849 	struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
850 };
851 
852 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
853 			 struct mlx5_ib_op_fc *opfc,
854 			 enum mlx5_ib_optional_counter_type type);
855 
856 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
857 			     struct mlx5_ib_op_fc *opfc,
858 			     enum mlx5_ib_optional_counter_type type);
859 
860 struct mlx5_ib_multiport_info;
861 
862 struct mlx5_ib_multiport {
863 	struct mlx5_ib_multiport_info *mpi;
864 	/* To be held when accessing the multiport info */
865 	spinlock_t mpi_lock;
866 };
867 
868 struct mlx5_roce {
869 	/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
870 	 * netdev pointer
871 	 */
872 	rwlock_t		netdev_lock;
873 	struct net_device	*netdev;
874 	struct notifier_block	nb;
875 	struct netdev_net_notifier nn;
876 	struct notifier_block	mdev_nb;
877 	struct net_device	*tracking_netdev;
878 	atomic_t		tx_port_affinity;
879 	enum ib_port_state last_port_state;
880 	struct mlx5_ib_dev	*dev;
881 	u32			native_port_num;
882 };
883 
884 struct mlx5_ib_port {
885 	struct mlx5_ib_counters cnts;
886 	struct mlx5_ib_multiport mp;
887 	struct mlx5_ib_dbg_cc_params *dbg_cc_params;
888 	struct mlx5_roce roce;
889 	struct mlx5_eswitch_rep		*rep;
890 #ifdef CONFIG_MLX5_MACSEC
891 	struct mlx5_reserved_gids *reserved_gids;
892 #endif
893 };
894 
895 struct mlx5_ib_dbg_param {
896 	int			offset;
897 	struct mlx5_ib_dev	*dev;
898 	struct dentry		*dentry;
899 	u32			port_num;
900 };
901 
902 enum mlx5_ib_dbg_cc_types {
903 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
904 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
905 	MLX5_IB_DBG_CC_RP_TIME_RESET,
906 	MLX5_IB_DBG_CC_RP_BYTE_RESET,
907 	MLX5_IB_DBG_CC_RP_THRESHOLD,
908 	MLX5_IB_DBG_CC_RP_AI_RATE,
909 	MLX5_IB_DBG_CC_RP_MAX_RATE,
910 	MLX5_IB_DBG_CC_RP_HAI_RATE,
911 	MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
912 	MLX5_IB_DBG_CC_RP_MIN_RATE,
913 	MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
914 	MLX5_IB_DBG_CC_RP_DCE_TCP_G,
915 	MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
916 	MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
917 	MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
918 	MLX5_IB_DBG_CC_RP_GD,
919 	MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
920 	MLX5_IB_DBG_CC_NP_CNP_DSCP,
921 	MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
922 	MLX5_IB_DBG_CC_NP_CNP_PRIO,
923 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
924 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
925 	MLX5_IB_DBG_CC_MAX,
926 };
927 
928 struct mlx5_ib_dbg_cc_params {
929 	struct dentry			*root;
930 	struct mlx5_ib_dbg_param	params[MLX5_IB_DBG_CC_MAX];
931 };
932 
933 enum {
934 	MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
935 };
936 
937 struct mlx5_ib_delay_drop {
938 	struct mlx5_ib_dev     *dev;
939 	struct work_struct	delay_drop_work;
940 	/* serialize setting of delay drop */
941 	struct mutex		lock;
942 	u32			timeout;
943 	bool			activate;
944 	atomic_t		events_cnt;
945 	atomic_t		rqs_cnt;
946 	struct dentry		*dir_debugfs;
947 };
948 
949 enum mlx5_ib_stages {
950 	MLX5_IB_STAGE_INIT,
951 	MLX5_IB_STAGE_FS,
952 	MLX5_IB_STAGE_CAPS,
953 	MLX5_IB_STAGE_NON_DEFAULT_CB,
954 	MLX5_IB_STAGE_ROCE,
955 	MLX5_IB_STAGE_QP,
956 	MLX5_IB_STAGE_SRQ,
957 	MLX5_IB_STAGE_DEVICE_RESOURCES,
958 	MLX5_IB_STAGE_DEVICE_NOTIFIER,
959 	MLX5_IB_STAGE_ODP,
960 	MLX5_IB_STAGE_COUNTERS,
961 	MLX5_IB_STAGE_CONG_DEBUGFS,
962 	MLX5_IB_STAGE_UAR,
963 	MLX5_IB_STAGE_BFREG,
964 	MLX5_IB_STAGE_PRE_IB_REG_UMR,
965 	MLX5_IB_STAGE_WHITELIST_UID,
966 	MLX5_IB_STAGE_IB_REG,
967 	MLX5_IB_STAGE_POST_IB_REG_UMR,
968 	MLX5_IB_STAGE_DELAY_DROP,
969 	MLX5_IB_STAGE_RESTRACK,
970 	MLX5_IB_STAGE_MAX,
971 };
972 
973 struct mlx5_ib_stage {
974 	int (*init)(struct mlx5_ib_dev *dev);
975 	void (*cleanup)(struct mlx5_ib_dev *dev);
976 };
977 
978 #define STAGE_CREATE(_stage, _init, _cleanup) \
979 	.stage[_stage] = {.init = _init, .cleanup = _cleanup}
980 
981 struct mlx5_ib_profile {
982 	struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
983 };
984 
985 struct mlx5_ib_multiport_info {
986 	struct list_head list;
987 	struct mlx5_ib_dev *ibdev;
988 	struct mlx5_core_dev *mdev;
989 	struct notifier_block mdev_events;
990 	struct completion unref_comp;
991 	u64 sys_image_guid;
992 	u32 mdev_refcnt;
993 	bool is_master;
994 	bool unaffiliate;
995 };
996 
997 struct mlx5_ib_flow_action {
998 	struct ib_flow_action		ib_action;
999 	union {
1000 		struct {
1001 			u64			    ib_flags;
1002 			struct mlx5_accel_esp_xfrm *ctx;
1003 		} esp_aes_gcm;
1004 		struct {
1005 			struct mlx5_ib_dev *dev;
1006 			u32 sub_type;
1007 			union {
1008 				struct mlx5_modify_hdr *modify_hdr;
1009 				struct mlx5_pkt_reformat *pkt_reformat;
1010 			};
1011 		} flow_action_raw;
1012 	};
1013 };
1014 
1015 struct mlx5_dm {
1016 	struct mlx5_core_dev *dev;
1017 	/* This lock is used to protect the access to the shared
1018 	 * allocation map when concurrent requests by different
1019 	 * processes are handled.
1020 	 */
1021 	spinlock_t lock;
1022 	DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
1023 };
1024 
1025 struct mlx5_read_counters_attr {
1026 	struct mlx5_fc *hw_cntrs_hndl;
1027 	u64 *out;
1028 	u32 flags;
1029 };
1030 
1031 enum mlx5_ib_counters_type {
1032 	MLX5_IB_COUNTERS_FLOW,
1033 };
1034 
1035 struct mlx5_ib_mcounters {
1036 	struct ib_counters ibcntrs;
1037 	enum mlx5_ib_counters_type type;
1038 	/* number of counters supported for this counters type */
1039 	u32 counters_num;
1040 	struct mlx5_fc *hw_cntrs_hndl;
1041 	/* read function for this counters type */
1042 	int (*read_counters)(struct ib_device *ibdev,
1043 			     struct mlx5_read_counters_attr *read_attr);
1044 	/* max index set as part of create_flow */
1045 	u32 cntrs_max_index;
1046 	/* number of counters data entries (<description,index> pair) */
1047 	u32 ncounters;
1048 	/* counters data array for descriptions and indexes */
1049 	struct mlx5_ib_flow_counters_desc *counters_data;
1050 	/* protects access to mcounters internal data */
1051 	struct mutex mcntrs_mutex;
1052 };
1053 
1054 static inline struct mlx5_ib_mcounters *
1055 to_mcounters(struct ib_counters *ibcntrs)
1056 {
1057 	return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
1058 }
1059 
1060 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
1061 			   bool is_egress,
1062 			   struct mlx5_flow_act *action);
1063 struct mlx5_ib_lb_state {
1064 	/* protect the user_td */
1065 	struct mutex		mutex;
1066 	u32			user_td;
1067 	int			qps;
1068 	bool			enabled;
1069 };
1070 
1071 struct mlx5_ib_pf_eq {
1072 	struct notifier_block irq_nb;
1073 	struct mlx5_ib_dev *dev;
1074 	struct mlx5_eq *core;
1075 	struct work_struct work;
1076 	spinlock_t lock; /* Pagefaults spinlock */
1077 	struct workqueue_struct *wq;
1078 	mempool_t *pool;
1079 };
1080 
1081 struct mlx5_devx_event_table {
1082 	struct mlx5_nb devx_nb;
1083 	/* serialize updating the event_xa */
1084 	struct mutex event_xa_lock;
1085 	struct xarray event_xa;
1086 };
1087 
1088 struct mlx5_var_table {
1089 	/* serialize updating the bitmap */
1090 	struct mutex bitmap_lock;
1091 	unsigned long *bitmap;
1092 	u64 hw_start_addr;
1093 	u32 stride_size;
1094 	u64 num_var_hw_entries;
1095 };
1096 
1097 struct mlx5_port_caps {
1098 	bool has_smi;
1099 	u8 ext_port_cap;
1100 };
1101 
1102 
1103 struct mlx5_special_mkeys {
1104 	u32 dump_fill_mkey;
1105 	__be32 null_mkey;
1106 	__be32 terminate_scatter_list_mkey;
1107 };
1108 
1109 struct mlx5_macsec {
1110 	struct mutex lock; /* Protects mlx5_macsec internal contexts */
1111 	struct list_head macsec_devices_list;
1112 	struct notifier_block blocking_events_nb;
1113 };
1114 
1115 struct mlx5_ib_dev {
1116 	struct ib_device		ib_dev;
1117 	struct mlx5_core_dev		*mdev;
1118 	struct notifier_block		mdev_events;
1119 	int				num_ports;
1120 	/* serialize update of capability mask
1121 	 */
1122 	struct mutex			cap_mask_mutex;
1123 	u8				ib_active:1;
1124 	u8				is_rep:1;
1125 	u8				lag_active:1;
1126 	u8				wc_support:1;
1127 	u8				fill_delay;
1128 	struct umr_common		umrc;
1129 	/* sync used page count stats
1130 	 */
1131 	struct mlx5_ib_resources	devr;
1132 
1133 	atomic_t			mkey_var;
1134 	struct mlx5_mkey_cache		cache;
1135 	struct timer_list		delay_timer;
1136 	/* Prevents soft lock on massive reg MRs */
1137 	struct mutex			slow_path_mutex;
1138 	struct ib_odp_caps	odp_caps;
1139 	u64			odp_max_size;
1140 	struct mutex		odp_eq_mutex;
1141 	struct mlx5_ib_pf_eq	odp_pf_eq;
1142 
1143 	struct xarray		odp_mkeys;
1144 
1145 	struct mlx5_ib_flow_db	*flow_db;
1146 	/* protect resources needed as part of reset flow */
1147 	spinlock_t		reset_flow_resource_lock;
1148 	struct list_head	qp_list;
1149 	/* Array with num_ports elements */
1150 	struct mlx5_ib_port	*port;
1151 	struct mlx5_sq_bfreg	bfreg;
1152 	struct mlx5_sq_bfreg	wc_bfreg;
1153 	struct mlx5_sq_bfreg	fp_bfreg;
1154 	struct mlx5_ib_delay_drop	delay_drop;
1155 	const struct mlx5_ib_profile	*profile;
1156 
1157 	struct mlx5_ib_lb_state		lb;
1158 	u8			umr_fence;
1159 	struct list_head	ib_dev_list;
1160 	u64			sys_image_guid;
1161 	struct mlx5_dm		dm;
1162 	u16			devx_whitelist_uid;
1163 	struct mlx5_srq_table   srq_table;
1164 	struct mlx5_qp_table    qp_table;
1165 	struct mlx5_async_ctx   async_ctx;
1166 	struct mlx5_devx_event_table devx_event_table;
1167 	struct mlx5_var_table var_table;
1168 
1169 	struct xarray sig_mrs;
1170 	struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
1171 	u16 pkey_table_len;
1172 	u8 lag_ports;
1173 	struct mlx5_special_mkeys mkeys;
1174 
1175 #ifdef CONFIG_MLX5_MACSEC
1176 	struct mlx5_macsec macsec;
1177 #endif
1178 };
1179 
1180 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1181 {
1182 	return container_of(mcq, struct mlx5_ib_cq, mcq);
1183 }
1184 
1185 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1186 {
1187 	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1188 }
1189 
1190 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1191 {
1192 	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1193 }
1194 
1195 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
1196 {
1197 	return to_mdev(mr->ibmr.device);
1198 }
1199 
1200 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1201 {
1202 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1203 		udata, struct mlx5_ib_ucontext, ibucontext);
1204 
1205 	return to_mdev(context->ibucontext.device);
1206 }
1207 
1208 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1209 {
1210 	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1211 }
1212 
1213 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1214 {
1215 	return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1216 }
1217 
1218 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1219 {
1220 	return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1221 }
1222 
1223 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1224 {
1225 	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1226 }
1227 
1228 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1229 {
1230 	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1231 }
1232 
1233 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1234 {
1235 	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1236 }
1237 
1238 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1239 {
1240 	return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1241 }
1242 
1243 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1244 {
1245 	return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1246 }
1247 
1248 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1249 {
1250 	return container_of(msrq, struct mlx5_ib_srq, msrq);
1251 }
1252 
1253 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1254 {
1255 	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1256 }
1257 
1258 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1259 {
1260 	return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1261 }
1262 
1263 static inline struct mlx5_ib_flow_action *
1264 to_mflow_act(struct ib_flow_action *ibact)
1265 {
1266 	return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1267 }
1268 
1269 static inline struct mlx5_user_mmap_entry *
1270 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1271 {
1272 	return container_of(rdma_entry,
1273 		struct mlx5_user_mmap_entry, rdma_entry);
1274 }
1275 
1276 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1277 			struct mlx5_db *db);
1278 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1279 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1280 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1281 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1282 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1283 		      struct ib_udata *udata);
1284 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1285 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1286 {
1287 	return 0;
1288 }
1289 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1290 		       struct ib_udata *udata);
1291 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1292 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1293 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1294 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1295 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1296 			  const struct ib_recv_wr **bad_wr);
1297 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1298 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1299 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1300 		      struct ib_udata *udata);
1301 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1302 		      int attr_mask, struct ib_udata *udata);
1303 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1304 		     struct ib_qp_init_attr *qp_init_attr);
1305 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1306 void mlx5_ib_drain_sq(struct ib_qp *qp);
1307 void mlx5_ib_drain_rq(struct ib_qp *qp);
1308 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1309 			size_t buflen, size_t *bc);
1310 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1311 			size_t buflen, size_t *bc);
1312 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1313 			 size_t buflen, size_t *bc);
1314 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1315 		      struct ib_udata *udata);
1316 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1317 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1318 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1319 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1320 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1321 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1322 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1323 				  u64 virt_addr, int access_flags,
1324 				  struct ib_udata *udata);
1325 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
1326 					 u64 length, u64 virt_addr,
1327 					 int fd, int access_flags,
1328 					 struct ib_udata *udata);
1329 int mlx5_ib_advise_mr(struct ib_pd *pd,
1330 		      enum ib_uverbs_advise_mr_advice advice,
1331 		      u32 flags,
1332 		      struct ib_sge *sg_list,
1333 		      u32 num_sge,
1334 		      struct uverbs_attr_bundle *attrs);
1335 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1336 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1337 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1338 					     int access_flags);
1339 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1340 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
1341 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1342 				    u64 length, u64 virt_addr, int access_flags,
1343 				    struct ib_pd *pd, struct ib_udata *udata);
1344 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1345 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1346 			       u32 max_num_sg);
1347 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1348 					 u32 max_num_sg,
1349 					 u32 max_num_meta_sg);
1350 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1351 		      unsigned int *sg_offset);
1352 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1353 			 int data_sg_nents, unsigned int *data_sg_offset,
1354 			 struct scatterlist *meta_sg, int meta_sg_nents,
1355 			 unsigned int *meta_sg_offset);
1356 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
1357 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1358 			const struct ib_mad *in, struct ib_mad *out,
1359 			size_t *out_mad_size, u16 *out_mad_pkey_index);
1360 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1361 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1362 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
1363 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1364 					 __be64 *sys_image_guid);
1365 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1366 				 u16 *max_pkeys);
1367 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1368 				 u32 *vendor_id);
1369 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1370 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1371 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
1372 			    u16 *pkey);
1373 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
1374 			    union ib_gid *gid);
1375 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
1376 			    struct ib_port_attr *props);
1377 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1378 		       struct ib_port_attr *props);
1379 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
1380 			  u64 access_flags);
1381 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1382 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
1383 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
1384 struct mlx5_cache_ent *
1385 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
1386 			      struct mlx5r_cache_rb_key rb_key,
1387 			      bool persistent_entry);
1388 
1389 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1390 				       int access_flags, int access_mode,
1391 				       int ndescs);
1392 
1393 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1394 			    struct ib_mr_status *mr_status);
1395 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1396 				struct ib_wq_init_attr *init_attr,
1397 				struct ib_udata *udata);
1398 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1399 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1400 		      u32 wq_attr_mask, struct ib_udata *udata);
1401 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1402 				 struct ib_rwq_ind_table_init_attr *init_attr,
1403 				 struct ib_udata *udata);
1404 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1405 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1406 				struct ib_dm_mr_attr *attr,
1407 				struct uverbs_attr_bundle *attrs);
1408 
1409 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1410 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1411 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
1412 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1413 int __init mlx5_ib_odp_init(void);
1414 void mlx5_ib_odp_cleanup(void);
1415 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
1416 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1417 			   struct mlx5_ib_mr *mr, int flags);
1418 
1419 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1420 			       enum ib_uverbs_advise_mr_advice advice,
1421 			       u32 flags, struct ib_sge *sg_list, u32 num_sge);
1422 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
1423 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
1424 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1425 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1426 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
1427 				      struct mlx5_ib_pf_eq *eq)
1428 {
1429 	return 0;
1430 }
1431 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
1432 static inline int mlx5_ib_odp_init(void) { return 0; }
1433 static inline void mlx5_ib_odp_cleanup(void)				    {}
1434 static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1435 {
1436 	return 0;
1437 }
1438 static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1439 					 struct mlx5_ib_mr *mr, int flags) {}
1440 
1441 static inline int
1442 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1443 			   enum ib_uverbs_advise_mr_advice advice, u32 flags,
1444 			   struct ib_sge *sg_list, u32 num_sge)
1445 {
1446 	return -EOPNOTSUPP;
1447 }
1448 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
1449 {
1450 	return -EOPNOTSUPP;
1451 }
1452 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
1453 {
1454 	return -EOPNOTSUPP;
1455 }
1456 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1457 
1458 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1459 
1460 /* Needed for rep profile */
1461 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1462 		      const struct mlx5_ib_profile *profile,
1463 		      int stage);
1464 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
1465 		  const struct mlx5_ib_profile *profile);
1466 
1467 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1468 			  u32 port, struct ifla_vf_info *info);
1469 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1470 			      u32 port, int state);
1471 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1472 			 u32 port, struct ifla_vf_stats *stats);
1473 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
1474 			struct ifla_vf_guid *node_guid,
1475 			struct ifla_vf_guid *port_guid);
1476 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
1477 			u64 guid, int type);
1478 
1479 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1480 				   const struct ib_gid_attr *attr);
1481 
1482 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1483 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1484 
1485 /* GSI QP helper functions */
1486 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1487 		       struct ib_qp_init_attr *attr);
1488 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1489 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1490 			  int attr_mask);
1491 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1492 			 int qp_attr_mask,
1493 			 struct ib_qp_init_attr *qp_init_attr);
1494 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1495 			  const struct ib_send_wr **bad_wr);
1496 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1497 			  const struct ib_recv_wr **bad_wr);
1498 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1499 
1500 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1501 
1502 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1503 			int bfregn);
1504 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1505 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1506 						   u32 ib_port_num,
1507 						   u32 *native_port_num);
1508 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1509 				  u32 port_num);
1510 
1511 extern const struct uapi_definition mlx5_ib_devx_defs[];
1512 extern const struct uapi_definition mlx5_ib_flow_defs[];
1513 extern const struct uapi_definition mlx5_ib_qos_defs[];
1514 extern const struct uapi_definition mlx5_ib_std_types_defs[];
1515 
1516 static inline int is_qp1(enum ib_qp_type qp_type)
1517 {
1518 	return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1519 }
1520 
1521 static inline u32 check_cq_create_flags(u32 flags)
1522 {
1523 	/*
1524 	 * It returns non-zero value for unsupported CQ
1525 	 * create flags, otherwise it returns zero.
1526 	 */
1527 	return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1528 			  IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1529 }
1530 
1531 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1532 				     u32 *user_index)
1533 {
1534 	if (cqe_version) {
1535 		if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1536 		    (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1537 			return -EINVAL;
1538 		*user_index = cmd_uidx;
1539 	} else {
1540 		*user_index = MLX5_IB_DEFAULT_UIDX;
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1547 				    struct mlx5_ib_create_qp *ucmd,
1548 				    int inlen,
1549 				    u32 *user_index)
1550 {
1551 	u8 cqe_version = ucontext->cqe_version;
1552 
1553 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1554 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1555 		return 0;
1556 
1557 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1558 		return -EINVAL;
1559 
1560 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1561 }
1562 
1563 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1564 				     struct mlx5_ib_create_srq *ucmd,
1565 				     int inlen,
1566 				     u32 *user_index)
1567 {
1568 	u8 cqe_version = ucontext->cqe_version;
1569 
1570 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1571 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1572 		return 0;
1573 
1574 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1575 		return -EINVAL;
1576 
1577 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1578 }
1579 
1580 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1581 {
1582 	return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1583 				MLX5_UARS_IN_PAGE : 1;
1584 }
1585 
1586 extern void *xlt_emergency_page;
1587 
1588 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1589 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
1590 			bool dyn_bfreg);
1591 
1592 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
1593 				       struct mlx5_ib_mkey *mmkey)
1594 {
1595 	refcount_set(&mmkey->usecount, 1);
1596 
1597 	return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
1598 			       mmkey, GFP_KERNEL));
1599 }
1600 
1601 /* deref an mkey that can participate in ODP flow */
1602 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
1603 {
1604 	if (refcount_dec_and_test(&mmkey->usecount))
1605 		wake_up(&mmkey->wait);
1606 }
1607 
1608 /* deref an mkey that can participate in ODP flow and wait for relese */
1609 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
1610 {
1611 	mlx5r_deref_odp_mkey(mmkey);
1612 	wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
1613 }
1614 
1615 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
1616 
1617 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1618 {
1619 	/*
1620 	 * If the driver is in hash mode and the port_select_flow_table_bypass cap
1621 	 * is supported, it means that the driver no longer needs to assign the port
1622 	 * affinity by default. If a user wants to set the port affinity explicitly,
1623 	 * the user has a dedicated API to do that, so there is no need to assign
1624 	 * the port affinity by default.
1625 	 */
1626 	if (dev->lag_active &&
1627 	    mlx5_lag_mode_is_hash(dev->mdev) &&
1628 	    MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
1629 		return 0;
1630 
1631 	if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
1632 		return 0;
1633 
1634 	return dev->lag_active ||
1635 		(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1636 		 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1637 }
1638 
1639 static inline bool rt_supported(int ts_cap)
1640 {
1641 	return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
1642 	       ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1643 }
1644 
1645 /*
1646  * PCI Peer to Peer is a trainwreck. If no switch is present then things
1647  * sometimes work, depending on the pci_distance_p2p logic for excluding broken
1648  * root complexes. However if a switch is present in the path, then things get
1649  * really ugly depending on how the switch is setup. This table assumes that the
1650  * root complex is strict and is validating that all req/reps are matches
1651  * perfectly - so any scenario where it sees only half the transaction is a
1652  * failure.
1653  *
1654  * CR/RR/DT  ATS RO P2P
1655  * 00X       X   X  OK
1656  * 010       X   X  fails (request is routed to root but root never sees comp)
1657  * 011       0   X  fails (request is routed to root but root never sees comp)
1658  * 011       1   X  OK
1659  * 10X       X   1  OK
1660  * 101       X   0  fails (completion is routed to root but root didn't see req)
1661  * 110       X   0  SLOW
1662  * 111       0   0  SLOW
1663  * 111       1   0  fails (completion is routed to root but root didn't see req)
1664  * 111       1   1  OK
1665  *
1666  * Unfortunately we cannot reliably know if a switch is present or what the
1667  * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
1668  * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
1669  *
1670  * For now assume if the umem is a dma_buf then it is P2P.
1671  */
1672 static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
1673 				       struct ib_umem *umem, int access_flags)
1674 {
1675 	if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
1676 		return false;
1677 	return access_flags & IB_ACCESS_RELAXED_ORDERING;
1678 }
1679 
1680 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
1681 		  unsigned int index, const union ib_gid *gid,
1682 		  const struct ib_gid_attr *attr);
1683 #endif /* MLX5_IB_H */
1684