xref: /linux/drivers/infiniband/hw/mlx5/mlx5_ib.h (revision 62de0e67328e9503459a24b9343c3358937cdeef)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #ifndef MLX5_IB_H
8 #define MLX5_IB_H
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <rdma/ib_smi.h>
15 #include <linux/mlx5/driver.h>
16 #include <linux/mlx5/cq.h>
17 #include <linux/mlx5/fs.h>
18 #include <linux/mlx5/qp.h>
19 #include <linux/types.h>
20 #include <linux/mlx5/transobj.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/mlx5-abi.h>
23 #include <rdma/uverbs_ioctl.h>
24 #include <rdma/mlx5_user_ioctl_cmds.h>
25 #include <rdma/mlx5_user_ioctl_verbs.h>
26 
27 #include "srq.h"
28 #include "qp.h"
29 #include "macsec.h"
30 
31 #define mlx5_ib_dbg(_dev, format, arg...)                                      \
32 	dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
33 		__LINE__, current->pid, ##arg)
34 
35 #define mlx5_ib_err(_dev, format, arg...)                                      \
36 	dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
37 		__LINE__, current->pid, ##arg)
38 
39 #define mlx5_ib_warn(_dev, format, arg...)                                     \
40 	dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,     \
41 		 __LINE__, current->pid, ##arg)
42 
43 #define mlx5_ib_log(lvl, _dev, format, arg...)                                 \
44 	dev_printk(lvl, &(_dev)->ib_dev.dev,  "%s:%d:(pid %d): " format,       \
45 		   __func__, __LINE__, current->pid, ##arg)
46 
47 #define MLX5_IB_DEFAULT_UIDX 0xffffff
48 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
49 
50 static __always_inline unsigned long
51 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
52 			       unsigned int pgsz_shift)
53 {
54 	unsigned int largest_pg_shift =
55 		min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
56 		      BITS_PER_LONG - 1);
57 
58 	/*
59 	 * Despite a command allowing it, the device does not support lower than
60 	 * 4k page size.
61 	 */
62 	pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
63 	return GENMASK(largest_pg_shift, pgsz_shift);
64 }
65 
66 static __always_inline unsigned long
67 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
68 			      unsigned int offset_shift)
69 {
70 	unsigned int largest_offset_shift =
71 		min_t(unsigned long, page_offset_bits - 1 + offset_shift,
72 		      BITS_PER_LONG - 1);
73 
74 	return GENMASK(largest_offset_shift, offset_shift);
75 }
76 
77 /*
78  * QP/CQ/WQ/etc type commands take a page offset that satisifies:
79  *   page_offset_quantized * (page_size/scale) = page_offset
80  * Which restricts allowed page sizes to ones that satisify the above.
81  */
82 unsigned long __mlx5_umem_find_best_quantized_pgoff(
83 	struct ib_umem *umem, unsigned long pgsz_bitmap,
84 	unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
85 	unsigned int *page_offset_quantized);
86 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld,           \
87 					    pgsz_shift, page_offset_fld,       \
88 					    scale, page_offset_quantized)      \
89 	__mlx5_umem_find_best_quantized_pgoff(                                 \
90 		umem,                                                          \
91 		__mlx5_log_page_size_to_bitmap(                                \
92 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
93 		__mlx5_bit_sz(typ, page_offset_fld),                           \
94 		GENMASK(31, order_base_2(scale)), scale,                       \
95 		page_offset_quantized)
96 
97 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld,        \
98 					       pgsz_shift, page_offset_fld,    \
99 					       scale, page_offset_quantized)   \
100 	__mlx5_umem_find_best_quantized_pgoff(                                 \
101 		umem,                                                          \
102 		__mlx5_log_page_size_to_bitmap(                                \
103 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
104 		__mlx5_bit_sz(typ, page_offset_fld), 0, scale,                 \
105 		page_offset_quantized)
106 
107 enum {
108 	MLX5_IB_MMAP_OFFSET_START = 9,
109 	MLX5_IB_MMAP_OFFSET_END = 255,
110 };
111 
112 enum {
113 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
114 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
115 };
116 
117 enum {
118 	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
119 	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
120 	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
121 	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
122 };
123 
124 enum mlx5_ib_mad_ifc_flags {
125 	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
126 	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
127 	MLX5_MAD_IFC_NET_VIEW		= 4,
128 };
129 
130 enum {
131 	MLX5_CROSS_CHANNEL_BFREG         = 0,
132 };
133 
134 enum {
135 	MLX5_CQE_VERSION_V0,
136 	MLX5_CQE_VERSION_V1,
137 };
138 
139 enum {
140 	MLX5_TM_MAX_RNDV_MSG_SIZE	= 64,
141 	MLX5_TM_MAX_SGE			= 1,
142 };
143 
144 enum {
145 	MLX5_IB_INVALID_UAR_INDEX	= BIT(31),
146 	MLX5_IB_INVALID_BFREG		= BIT(31),
147 };
148 
149 enum {
150 	MLX5_MAX_MEMIC_PAGES = 0x100,
151 	MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
152 };
153 
154 enum {
155 	MLX5_MEMIC_BASE_ALIGN	= 6,
156 	MLX5_MEMIC_BASE_SIZE	= 1 << MLX5_MEMIC_BASE_ALIGN,
157 };
158 
159 enum mlx5_ib_mmap_type {
160 	MLX5_IB_MMAP_TYPE_MEMIC = 1,
161 	MLX5_IB_MMAP_TYPE_VAR = 2,
162 	MLX5_IB_MMAP_TYPE_UAR_WC = 3,
163 	MLX5_IB_MMAP_TYPE_UAR_NC = 4,
164 	MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
165 };
166 
167 struct mlx5_bfreg_info {
168 	u32 *sys_pages;
169 	int num_low_latency_bfregs;
170 	unsigned int *count;
171 
172 	/*
173 	 * protect bfreg allocation data structs
174 	 */
175 	struct mutex lock;
176 	u32 ver;
177 	u8 lib_uar_4k : 1;
178 	u8 lib_uar_dyn : 1;
179 	u32 num_sys_pages;
180 	u32 num_static_sys_pages;
181 	u32 total_num_bfregs;
182 	u32 num_dyn_bfregs;
183 };
184 
185 struct mlx5_ib_ucontext {
186 	struct ib_ucontext	ibucontext;
187 	struct list_head	db_page_list;
188 
189 	/* protect doorbell record alloc/free
190 	 */
191 	struct mutex		db_page_mutex;
192 	struct mlx5_bfreg_info	bfregi;
193 	u8			cqe_version;
194 	/* Transport Domain number */
195 	u32			tdn;
196 
197 	u64			lib_caps;
198 	u16			devx_uid;
199 	/* For RoCE LAG TX affinity */
200 	atomic_t		tx_port_affinity;
201 };
202 
203 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
204 {
205 	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
206 }
207 
208 struct mlx5_ib_pd {
209 	struct ib_pd		ibpd;
210 	u32			pdn;
211 	u16			uid;
212 };
213 
214 enum {
215 	MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
216 	MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
217 	MLX5_IB_FLOW_ACTION_DECAP,
218 };
219 
220 #define MLX5_IB_FLOW_MCAST_PRIO		(MLX5_BY_PASS_NUM_PRIOS - 1)
221 #define MLX5_IB_FLOW_LAST_PRIO		(MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
222 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
223 #error "Invalid number of bypass priorities"
224 #endif
225 #define MLX5_IB_FLOW_LEFTOVERS_PRIO	(MLX5_IB_FLOW_MCAST_PRIO + 1)
226 
227 #define MLX5_IB_NUM_FLOW_FT		(MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
228 #define MLX5_IB_NUM_SNIFFER_FTS		2
229 #define MLX5_IB_NUM_EGRESS_FTS		1
230 #define MLX5_IB_NUM_FDB_FTS		MLX5_BY_PASS_NUM_REGULAR_PRIOS
231 
232 struct mlx5_ib_anchor {
233 	struct mlx5_flow_table *ft;
234 	struct mlx5_flow_group *fg_goto_table;
235 	struct mlx5_flow_group *fg_drop;
236 	struct mlx5_flow_handle *rule_goto_table;
237 	struct mlx5_flow_handle *rule_drop;
238 	unsigned int rule_goto_table_ref;
239 };
240 
241 struct mlx5_ib_flow_prio {
242 	struct mlx5_flow_table		*flow_table;
243 	struct mlx5_ib_anchor		anchor;
244 	unsigned int			refcount;
245 };
246 
247 struct mlx5_ib_flow_handler {
248 	struct list_head		list;
249 	struct ib_flow			ibflow;
250 	struct mlx5_ib_flow_prio	*prio;
251 	struct mlx5_flow_handle		*rule;
252 	struct ib_counters		*ibcounters;
253 	struct mlx5_ib_dev		*dev;
254 	struct mlx5_ib_flow_matcher	*flow_matcher;
255 };
256 
257 struct mlx5_ib_flow_matcher {
258 	struct mlx5_ib_match_params matcher_mask;
259 	int			mask_len;
260 	enum mlx5_ib_flow_type	flow_type;
261 	enum mlx5_flow_namespace_type ns_type;
262 	u16			priority;
263 	struct mlx5_core_dev	*mdev;
264 	atomic_t		usecnt;
265 	u8			match_criteria_enable;
266 	u32			ib_port;
267 };
268 
269 struct mlx5_ib_steering_anchor {
270 	struct mlx5_ib_flow_prio *ft_prio;
271 	struct mlx5_ib_dev *dev;
272 	atomic_t usecnt;
273 };
274 
275 struct mlx5_ib_pp {
276 	u16 index;
277 	struct mlx5_core_dev *mdev;
278 };
279 
280 enum mlx5_ib_optional_counter_type {
281 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
282 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
283 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
284 	MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS,
285 	MLX5_IB_OPCOUNTER_RDMA_TX_BYTES,
286 	MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS,
287 	MLX5_IB_OPCOUNTER_RDMA_RX_BYTES,
288 
289 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP,
290 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP,
291 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP,
292 	MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP,
293 	MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP,
294 	MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP,
295 	MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP,
296 
297 	MLX5_IB_OPCOUNTER_MAX,
298 };
299 
300 struct mlx5_ib_flow_db {
301 	struct mlx5_ib_flow_prio	prios[MLX5_IB_NUM_FLOW_FT];
302 	struct mlx5_ib_flow_prio	egress_prios[MLX5_IB_NUM_FLOW_FT];
303 	struct mlx5_ib_flow_prio	sniffer[MLX5_IB_NUM_SNIFFER_FTS];
304 	struct mlx5_ib_flow_prio	egress[MLX5_IB_NUM_EGRESS_FTS];
305 	struct mlx5_ib_flow_prio	fdb[MLX5_IB_NUM_FDB_FTS];
306 	struct mlx5_ib_flow_prio	rdma_rx[MLX5_IB_NUM_FLOW_FT];
307 	struct mlx5_ib_flow_prio	rdma_tx[MLX5_IB_NUM_FLOW_FT];
308 	struct mlx5_ib_flow_prio	opfcs[MLX5_IB_OPCOUNTER_MAX];
309 	struct mlx5_flow_table		*lag_demux_ft;
310 	struct mlx5_ib_flow_prio        *rdma_transport_rx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
311 	struct mlx5_ib_flow_prio        *rdma_transport_tx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
312 	/* Protect flow steering bypass flow tables
313 	 * when add/del flow rules.
314 	 * only single add/removal of flow steering rule could be done
315 	 * simultaneously.
316 	 */
317 	struct mutex			lock;
318 };
319 
320 /* Use macros here so that don't have to duplicate
321  * enum ib_qp_type for low-level driver
322  */
323 
324 #define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
325 /*
326  * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
327  * creates the actual hardware QP.
328  */
329 #define MLX5_IB_QPT_HW_GSI	IB_QPT_RESERVED2
330 #define MLX5_IB_QPT_DCI		IB_QPT_RESERVED3
331 #define MLX5_IB_QPT_DCT		IB_QPT_RESERVED4
332 #define MLX5_IB_WR_UMR		IB_WR_RESERVED1
333 
334 #define MLX5_IB_UPD_XLT_ZAP	      BIT(0)
335 #define MLX5_IB_UPD_XLT_ENABLE	      BIT(1)
336 #define MLX5_IB_UPD_XLT_ATOMIC	      BIT(2)
337 #define MLX5_IB_UPD_XLT_ADDR	      BIT(3)
338 #define MLX5_IB_UPD_XLT_PD	      BIT(4)
339 #define MLX5_IB_UPD_XLT_ACCESS	      BIT(5)
340 #define MLX5_IB_UPD_XLT_INDIRECT      BIT(6)
341 #define MLX5_IB_UPD_XLT_DOWNGRADE     BIT(7)
342 #define MLX5_IB_UPD_XLT_KEEP_PGSZ     BIT(8)
343 
344 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
345  *
346  * These flags are intended for internal use by the mlx5_ib driver, and they
347  * rely on the range reserved for that use in the ib_qp_create_flags enum.
348  */
349 #define MLX5_IB_QP_CREATE_SQPN_QP1	IB_QP_CREATE_RESERVED_START
350 
351 struct wr_list {
352 	u16	opcode;
353 	u16	next;
354 };
355 
356 enum mlx5_ib_rq_flags {
357 	MLX5_IB_RQ_CVLAN_STRIPPING	= 1 << 0,
358 	MLX5_IB_RQ_PCI_WRITE_END_PADDING	= 1 << 1,
359 };
360 
361 struct mlx5_ib_wq {
362 	struct mlx5_frag_buf_ctrl fbc;
363 	u64		       *wrid;
364 	u32		       *wr_data;
365 	struct wr_list	       *w_list;
366 	unsigned	       *wqe_head;
367 	u16		        unsig_count;
368 
369 	/* serialize post to the work queue
370 	 */
371 	spinlock_t		lock;
372 	int			wqe_cnt;
373 	int			max_post;
374 	int			max_gs;
375 	int			offset;
376 	int			wqe_shift;
377 	unsigned		head;
378 	unsigned		tail;
379 	u16			cur_post;
380 	u16			last_poll;
381 	void			*cur_edge;
382 };
383 
384 enum mlx5_ib_wq_flags {
385 	MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
386 	MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
387 };
388 
389 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
390 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
391 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
392 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
393 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
394 
395 struct mlx5_ib_rwq {
396 	struct ib_wq		ibwq;
397 	struct mlx5_core_qp	core_qp;
398 	u32			rq_num_pas;
399 	u32			log_rq_stride;
400 	u32			log_rq_size;
401 	u32			rq_page_offset;
402 	u32			log_page_size;
403 	u32			log_num_strides;
404 	u32			two_byte_shift_en;
405 	u32			single_stride_log_num_of_bytes;
406 	struct ib_umem		*umem;
407 	size_t			buf_size;
408 	unsigned int		page_shift;
409 	struct mlx5_db		db;
410 	u32			user_index;
411 	u32			wqe_count;
412 	u32			wqe_shift;
413 	int			wq_sig;
414 	u32			create_flags; /* Use enum mlx5_ib_wq_flags */
415 };
416 
417 struct mlx5_ib_rwq_ind_table {
418 	struct ib_rwq_ind_table ib_rwq_ind_tbl;
419 	u32			rqtn;
420 	u16			uid;
421 };
422 
423 struct mlx5_ib_ubuffer {
424 	struct ib_umem	       *umem;
425 	int			buf_size;
426 	u64			buf_addr;
427 };
428 
429 struct mlx5_ib_qp_base {
430 	struct mlx5_ib_qp	*container_mibqp;
431 	struct mlx5_core_qp	mqp;
432 	struct mlx5_ib_ubuffer	ubuffer;
433 };
434 
435 struct mlx5_ib_qp_trans {
436 	struct mlx5_ib_qp_base	base;
437 	u16			xrcdn;
438 	u32			alt_port;
439 	u8			atomic_rd_en;
440 	u8			resp_depth;
441 };
442 
443 struct mlx5_ib_rss_qp {
444 	u32	tirn;
445 };
446 
447 struct mlx5_ib_rq {
448 	struct mlx5_ib_qp_base base;
449 	struct mlx5_ib_wq	*rq;
450 	struct mlx5_ib_ubuffer	ubuffer;
451 	struct mlx5_db		*doorbell;
452 	u32			tirn;
453 	u8			state;
454 	u32			flags;
455 };
456 
457 struct mlx5_ib_sq {
458 	struct mlx5_ib_qp_base base;
459 	struct mlx5_ib_wq	*sq;
460 	struct mlx5_ib_ubuffer  ubuffer;
461 	struct mlx5_db		*doorbell;
462 	struct mlx5_flow_handle	*flow_rule;
463 	u32			tisn;
464 	u8			state;
465 };
466 
467 struct mlx5_ib_raw_packet_qp {
468 	struct mlx5_ib_sq sq;
469 	struct mlx5_ib_rq rq;
470 };
471 
472 struct mlx5_bf {
473 	int			buf_size;
474 	unsigned long		offset;
475 	struct mlx5_sq_bfreg   *bfreg;
476 };
477 
478 struct mlx5_ib_dct {
479 	struct mlx5_core_dct    mdct;
480 	u32                     *in;
481 };
482 
483 struct mlx5_ib_gsi_qp {
484 	struct ib_qp *rx_qp;
485 	u32 port_num;
486 	struct ib_qp_cap cap;
487 	struct ib_cq *cq;
488 	struct mlx5_ib_gsi_wr *outstanding_wrs;
489 	u32 outstanding_pi, outstanding_ci;
490 	int num_qps;
491 	/* Protects access to the tx_qps. Post send operations synchronize
492 	 * with tx_qp creation in setup_qp(). Also protects the
493 	 * outstanding_wrs array and indices.
494 	 */
495 	spinlock_t lock;
496 	struct ib_qp **tx_qps;
497 };
498 
499 struct mlx5_ib_qp {
500 	struct ib_qp		ibqp;
501 	union {
502 		struct mlx5_ib_qp_trans trans_qp;
503 		struct mlx5_ib_raw_packet_qp raw_packet_qp;
504 		struct mlx5_ib_rss_qp rss_qp;
505 		struct mlx5_ib_dct dct;
506 		struct mlx5_ib_gsi_qp gsi;
507 	};
508 	struct mlx5_frag_buf	buf;
509 
510 	struct mlx5_db		db;
511 	struct mlx5_ib_wq	rq;
512 
513 	u8			sq_signal_bits;
514 	u8			next_fence;
515 	struct mlx5_ib_wq	sq;
516 
517 	/* serialize qp state modifications
518 	 */
519 	struct mutex		mutex;
520 	/* cached variant of create_flags from struct ib_qp_init_attr */
521 	u32			flags;
522 	u32			port;
523 	u8			state;
524 	int			max_inline_data;
525 	struct mlx5_bf	        bf;
526 	u8			has_rq:1;
527 	u8			is_rss:1;
528 	u8			is_ooo_rq:1;
529 
530 	/* only for user space QPs. For kernel
531 	 * we have it from the bf object
532 	 */
533 	int			bfregn;
534 
535 	struct list_head	qps_list;
536 	struct list_head	cq_recv_list;
537 	struct list_head	cq_send_list;
538 	struct mlx5_rate_limit	rl;
539 	u32                     underlay_qpn;
540 	u32			flags_en;
541 	/*
542 	 * IB/core doesn't store low-level QP types, so
543 	 * store both MLX and IBTA types in the field below.
544 	 */
545 	enum ib_qp_type		type;
546 	/* A flag to indicate if there's a new counter is configured
547 	 * but not take effective
548 	 */
549 	u32                     counter_pending;
550 	u16			gsi_lag_port;
551 };
552 
553 struct mlx5_ib_cq_buf {
554 	struct mlx5_frag_buf_ctrl fbc;
555 	struct mlx5_frag_buf    frag_buf;
556 	struct ib_umem		*umem;
557 	int			cqe_size;
558 	int			nent;
559 };
560 
561 enum mlx5_ib_cq_pr_flags {
562 	MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD	= 1 << 0,
563 	MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
564 };
565 
566 struct mlx5_ib_cq {
567 	struct ib_cq		ibcq;
568 	struct mlx5_core_cq	mcq;
569 	struct mlx5_ib_cq_buf	buf;
570 	struct mlx5_db		db;
571 
572 	/* serialize access to the CQ
573 	 */
574 	spinlock_t		lock;
575 
576 	/* protect resize cq
577 	 */
578 	struct mutex		resize_mutex;
579 	struct mlx5_ib_cq_buf  *resize_buf;
580 	struct ib_umem	       *resize_umem;
581 	int			cqe_size;
582 	struct list_head	list_send_qp;
583 	struct list_head	list_recv_qp;
584 	u32			create_flags;
585 	struct list_head	wc_list;
586 	enum ib_cq_notify_flags notify_flags;
587 	struct work_struct	notify_work;
588 	u16			private_flags; /* Use mlx5_ib_cq_pr_flags */
589 };
590 
591 struct mlx5_ib_wc {
592 	struct ib_wc wc;
593 	struct list_head list;
594 };
595 
596 struct mlx5_ib_srq {
597 	struct ib_srq		ibsrq;
598 	struct mlx5_core_srq	msrq;
599 	struct mlx5_frag_buf	buf;
600 	struct mlx5_db		db;
601 	struct mlx5_frag_buf_ctrl fbc;
602 	u64		       *wrid;
603 	/* protect SRQ hanlding
604 	 */
605 	spinlock_t		lock;
606 	int			head;
607 	int			tail;
608 	u16			wqe_ctr;
609 	struct ib_umem	       *umem;
610 	/* serialize arming a SRQ
611 	 */
612 	struct mutex		mutex;
613 	int			wq_sig;
614 };
615 
616 struct mlx5_ib_xrcd {
617 	struct ib_xrcd		ibxrcd;
618 	u32			xrcdn;
619 };
620 
621 enum mlx5_ib_mtt_access_flags {
622 	MLX5_IB_MTT_READ  = (1 << 0),
623 	MLX5_IB_MTT_WRITE = (1 << 1),
624 };
625 
626 struct mlx5_user_mmap_entry {
627 	struct rdma_user_mmap_entry rdma_entry;
628 	u8 mmap_flag;
629 	u64 address;
630 	u32 page_idx;
631 };
632 
633 enum mlx5_mkey_type {
634 	MLX5_MKEY_MR = 1,
635 	MLX5_MKEY_MW,
636 	MLX5_MKEY_INDIRECT_DEVX,
637 	MLX5_MKEY_NULL,
638 	MLX5_MKEY_IMPLICIT_CHILD,
639 };
640 
641 struct mlx5r_cache_rb_key {
642 	u8 ats:1;
643 	unsigned int access_mode;
644 	unsigned int access_flags;
645 	unsigned int ndescs;
646 };
647 
648 struct mlx5_ib_mkey {
649 	u32 key;
650 	enum mlx5_mkey_type type;
651 	unsigned int ndescs;
652 	struct wait_queue_head wait;
653 	refcount_t usecount;
654 	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
655 	struct mlx5r_cache_rb_key rb_key;
656 	struct mlx5_cache_ent *cache_ent;
657 	u8 cacheable : 1;
658 };
659 
660 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
661 
662 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
663 					 IB_ACCESS_REMOTE_WRITE  |\
664 					 IB_ACCESS_REMOTE_READ   |\
665 					 IB_ACCESS_REMOTE_ATOMIC |\
666 					 IB_ZERO_BASED)
667 
668 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
669 					  IB_ACCESS_REMOTE_WRITE  |\
670 					  IB_ACCESS_REMOTE_READ   |\
671 					  IB_ZERO_BASED)
672 
673 #define mlx5_update_odp_stats(mr, counter_name, value)		\
674 	atomic64_add(value, &((mr)->odp_stats.counter_name))
675 
676 #define mlx5_update_odp_stats_with_handled(mr, counter_name, value)         \
677 	do {                                                                \
678 		mlx5_update_odp_stats(mr, counter_name, value);             \
679 		atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
680 	} while (0)
681 
682 struct mlx5_ib_mr {
683 	struct ib_mr ibmr;
684 	struct mlx5_ib_mkey mmkey;
685 
686 	struct ib_umem *umem;
687 	/* The mr is data direct related */
688 	u8 data_direct :1;
689 
690 	union {
691 		/* Used only by kernel MRs (umem == NULL) */
692 		struct {
693 			void *descs;
694 			void *descs_alloc;
695 			dma_addr_t desc_map;
696 			int max_descs;
697 			int desc_size;
698 			int access_mode;
699 
700 			/* For Kernel IB_MR_TYPE_INTEGRITY */
701 			struct mlx5_core_sig_ctx *sig;
702 			struct mlx5_ib_mr *pi_mr;
703 			struct mlx5_ib_mr *klm_mr;
704 			struct mlx5_ib_mr *mtt_mr;
705 			u64 data_iova;
706 			u64 pi_iova;
707 			int meta_ndescs;
708 			int meta_length;
709 			int data_length;
710 		};
711 
712 		/* Used only by User MRs (umem != NULL) */
713 		struct {
714 			unsigned int page_shift;
715 			/* Current access_flags */
716 			int access_flags;
717 
718 			/* For User ODP */
719 			struct mlx5_ib_mr *parent;
720 			struct xarray implicit_children;
721 			union {
722 				struct work_struct work;
723 			} odp_destroy;
724 			struct ib_odp_counters odp_stats;
725 			bool is_odp_implicit;
726 			/* The affilated data direct crossed mr */
727 			struct mlx5_ib_mr *dd_crossed_mr;
728 			struct list_head dd_node;
729 			u8 revoked :1;
730 			/* Indicates previous dmabuf page fault occurred */
731 			u8 dmabuf_faulted:1;
732 			struct mlx5_ib_mkey null_mmkey;
733 		};
734 	};
735 };
736 
737 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
738 {
739 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
740 	       mr->umem->is_odp;
741 }
742 
743 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
744 {
745 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
746 	       mr->umem->is_dmabuf;
747 }
748 
749 struct mlx5_ib_mw {
750 	struct ib_mw		ibmw;
751 	struct mlx5_ib_mkey	mmkey;
752 };
753 
754 struct mlx5_ib_umr_context {
755 	struct ib_cqe		cqe;
756 	enum ib_wc_status	status;
757 	struct completion	done;
758 };
759 
760 enum {
761 	MLX5_UMR_STATE_UNINIT,
762 	MLX5_UMR_STATE_ACTIVE,
763 	MLX5_UMR_STATE_RECOVER,
764 	MLX5_UMR_STATE_ERR,
765 };
766 
767 struct umr_common {
768 	struct ib_pd	*pd;
769 	struct ib_cq	*cq;
770 	struct ib_qp	*qp;
771 	/* Protects from UMR QP overflow
772 	 */
773 	struct semaphore	sem;
774 	/* Protects from using UMR while the UMR is not active
775 	 */
776 	struct mutex lock;
777 	unsigned int state;
778 	/* Protects from repeat UMR QP creation */
779 	struct mutex init_lock;
780 };
781 
782 #define NUM_MKEYS_PER_PAGE \
783 	((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
784 
785 struct mlx5_mkeys_page {
786 	u32 mkeys[NUM_MKEYS_PER_PAGE];
787 	struct list_head list;
788 };
789 static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
790 
791 struct mlx5_mkeys_queue {
792 	struct list_head pages_list;
793 	u32 num_pages;
794 	unsigned long ci;
795 	spinlock_t lock; /* sync list ops */
796 };
797 
798 struct mlx5_cache_ent {
799 	struct mlx5_mkeys_queue	mkeys_queue;
800 	u32			pending;
801 
802 	char                    name[4];
803 
804 	struct rb_node		node;
805 	struct mlx5r_cache_rb_key rb_key;
806 
807 	u8 is_tmp:1;
808 	u8 disabled:1;
809 	u8 fill_to_high_water:1;
810 	u8 tmp_cleanup_scheduled:1;
811 
812 	/*
813 	 * - limit is the low water mark for stored mkeys, 2* limit is the
814 	 *   upper water mark.
815 	 */
816 	u32 in_use;
817 	u32 limit;
818 
819 	/* Statistics */
820 	u32                     miss;
821 
822 	struct mlx5_ib_dev     *dev;
823 	struct delayed_work	dwork;
824 };
825 
826 struct mlx5r_async_create_mkey {
827 	union {
828 		u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
829 		u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
830 	};
831 	struct mlx5_async_work cb_work;
832 	struct mlx5_cache_ent *ent;
833 	u32 mkey;
834 };
835 
836 struct mlx5_mkey_cache {
837 	struct workqueue_struct *wq;
838 	struct rb_root		rb_root;
839 	struct mutex		rb_lock;
840 	struct dentry		*fs_root;
841 	unsigned long		last_add;
842 };
843 
844 struct mlx5_ib_port_resources {
845 	struct mlx5_ib_gsi_qp *gsi;
846 	struct work_struct pkey_change_work;
847 };
848 
849 struct mlx5_data_direct_resources {
850 	u32 pdn;
851 	u32 mkey;
852 };
853 
854 struct mlx5_ib_resources {
855 	struct ib_cq	*c0;
856 	struct mutex cq_lock;
857 	u32 xrcdn0;
858 	u32 xrcdn1;
859 	struct ib_pd	*p0;
860 	struct ib_srq	*s0;
861 	struct ib_srq	*s1;
862 	struct mutex srq_lock;
863 	struct mlx5_ib_port_resources ports[2];
864 };
865 
866 #define MAX_OPFC_RULES 2
867 
868 struct mlx5_ib_op_fc {
869 	struct mlx5_fc *fc;
870 	struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
871 };
872 
873 struct mlx5_ib_counters {
874 	struct rdma_stat_desc *descs;
875 	size_t *offsets;
876 	u32 num_q_counters;
877 	u32 num_cong_counters;
878 	u32 num_ext_ppcnt_counters;
879 	u32 num_op_counters;
880 	u16 set_id;
881 	struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
882 };
883 
884 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
885 			 struct mlx5_ib_op_fc *opfc,
886 			 enum mlx5_ib_optional_counter_type type);
887 
888 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
889 			     struct mlx5_ib_op_fc *opfc,
890 			     enum mlx5_ib_optional_counter_type type);
891 
892 int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
893 			u32 port);
894 
895 void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter);
896 
897 void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
898 			  struct rdma_counter *counter);
899 
900 struct mlx5_ib_multiport_info;
901 
902 struct mlx5_ib_multiport {
903 	struct mlx5_ib_multiport_info *mpi;
904 	/* To be held when accessing the multiport info */
905 	spinlock_t mpi_lock;
906 };
907 
908 struct mlx5_roce {
909 	/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
910 	 * netdev pointer
911 	 */
912 	struct notifier_block	nb;
913 	struct netdev_net_notifier nn;
914 	struct notifier_block	mdev_nb;
915 	struct net_device	*tracking_netdev;
916 	atomic_t		tx_port_affinity;
917 	enum ib_port_state last_port_state;
918 	struct mlx5_ib_dev	*dev;
919 	u32			native_port_num;
920 };
921 
922 struct mlx5_ib_port {
923 	struct mlx5_ib_counters cnts;
924 	struct mlx5_ib_multiport mp;
925 	struct mlx5_ib_dbg_cc_params *dbg_cc_params;
926 	struct mlx5_roce roce;
927 	struct mlx5_eswitch_rep		*rep;
928 #ifdef CONFIG_MLX5_MACSEC
929 	struct mlx5_reserved_gids *reserved_gids;
930 #endif
931 };
932 
933 struct mlx5_ib_dbg_param {
934 	int			offset;
935 	struct mlx5_ib_dev	*dev;
936 	struct dentry		*dentry;
937 	u32			port_num;
938 };
939 
940 enum mlx5_ib_dbg_cc_types {
941 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
942 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
943 	MLX5_IB_DBG_CC_RP_TIME_RESET,
944 	MLX5_IB_DBG_CC_RP_BYTE_RESET,
945 	MLX5_IB_DBG_CC_RP_THRESHOLD,
946 	MLX5_IB_DBG_CC_RP_AI_RATE,
947 	MLX5_IB_DBG_CC_RP_MAX_RATE,
948 	MLX5_IB_DBG_CC_RP_HAI_RATE,
949 	MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
950 	MLX5_IB_DBG_CC_RP_MIN_RATE,
951 	MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
952 	MLX5_IB_DBG_CC_RP_DCE_TCP_G,
953 	MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
954 	MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
955 	MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
956 	MLX5_IB_DBG_CC_RP_GD,
957 	MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
958 	MLX5_IB_DBG_CC_NP_CNP_DSCP,
959 	MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
960 	MLX5_IB_DBG_CC_NP_CNP_PRIO,
961 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
962 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
963 	MLX5_IB_DBG_CC_MAX,
964 };
965 
966 struct mlx5_ib_dbg_cc_params {
967 	struct dentry			*root;
968 	struct mlx5_ib_dbg_param	params[MLX5_IB_DBG_CC_MAX];
969 };
970 
971 enum {
972 	MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
973 };
974 
975 struct mlx5_ib_delay_drop {
976 	struct mlx5_ib_dev     *dev;
977 	struct work_struct	delay_drop_work;
978 	/* serialize setting of delay drop */
979 	struct mutex		lock;
980 	u32			timeout;
981 	bool			activate;
982 	atomic_t		events_cnt;
983 	atomic_t		rqs_cnt;
984 	struct dentry		*dir_debugfs;
985 };
986 
987 enum mlx5_ib_stages {
988 	MLX5_IB_STAGE_INIT,
989 	MLX5_IB_STAGE_FS,
990 	MLX5_IB_STAGE_CAPS,
991 	MLX5_IB_STAGE_NON_DEFAULT_CB,
992 	MLX5_IB_STAGE_ROCE,
993 	MLX5_IB_STAGE_QP,
994 	MLX5_IB_STAGE_SRQ,
995 	MLX5_IB_STAGE_DEVICE_RESOURCES,
996 	MLX5_IB_STAGE_ODP,
997 	MLX5_IB_STAGE_COUNTERS,
998 	MLX5_IB_STAGE_CONG_DEBUGFS,
999 	MLX5_IB_STAGE_BFREG,
1000 	MLX5_IB_STAGE_PRE_IB_REG_UMR,
1001 	MLX5_IB_STAGE_WHITELIST_UID,
1002 	MLX5_IB_STAGE_IB_REG,
1003 	MLX5_IB_STAGE_DEVICE_NOTIFIER,
1004 	MLX5_IB_STAGE_POST_IB_REG_UMR,
1005 	MLX5_IB_STAGE_DELAY_DROP,
1006 	MLX5_IB_STAGE_RESTRACK,
1007 	MLX5_IB_STAGE_MAX,
1008 };
1009 
1010 struct mlx5_ib_stage {
1011 	int (*init)(struct mlx5_ib_dev *dev);
1012 	void (*cleanup)(struct mlx5_ib_dev *dev);
1013 };
1014 
1015 #define STAGE_CREATE(_stage, _init, _cleanup) \
1016 	.stage[_stage] = {.init = _init, .cleanup = _cleanup}
1017 
1018 struct mlx5_ib_profile {
1019 	struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
1020 };
1021 
1022 struct mlx5_ib_multiport_info {
1023 	struct list_head list;
1024 	struct mlx5_ib_dev *ibdev;
1025 	struct mlx5_core_dev *mdev;
1026 	struct notifier_block mdev_events;
1027 	struct completion unref_comp;
1028 	u64 sys_image_guid;
1029 	u32 mdev_refcnt;
1030 	bool is_master;
1031 	bool unaffiliate;
1032 };
1033 
1034 struct mlx5_ib_flow_action {
1035 	struct ib_flow_action		ib_action;
1036 	union {
1037 		struct {
1038 			u64			    ib_flags;
1039 			struct mlx5_accel_esp_xfrm *ctx;
1040 		} esp_aes_gcm;
1041 		struct {
1042 			struct mlx5_ib_dev *dev;
1043 			u32 sub_type;
1044 			union {
1045 				struct mlx5_modify_hdr *modify_hdr;
1046 				struct mlx5_pkt_reformat *pkt_reformat;
1047 			};
1048 		} flow_action_raw;
1049 	};
1050 };
1051 
1052 struct mlx5_dm {
1053 	struct mlx5_core_dev *dev;
1054 	/* This lock is used to protect the access to the shared
1055 	 * allocation map when concurrent requests by different
1056 	 * processes are handled.
1057 	 */
1058 	spinlock_t lock;
1059 	DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
1060 };
1061 
1062 struct mlx5_read_counters_attr {
1063 	struct mlx5_fc *hw_cntrs_hndl;
1064 	u64 *out;
1065 	u32 flags;
1066 };
1067 
1068 enum mlx5_ib_counters_type {
1069 	MLX5_IB_COUNTERS_FLOW,
1070 };
1071 
1072 struct mlx5_ib_mcounters {
1073 	struct ib_counters ibcntrs;
1074 	enum mlx5_ib_counters_type type;
1075 	/* number of counters supported for this counters type */
1076 	u32 counters_num;
1077 	struct mlx5_fc *hw_cntrs_hndl;
1078 	/* read function for this counters type */
1079 	int (*read_counters)(struct ib_device *ibdev,
1080 			     struct mlx5_read_counters_attr *read_attr);
1081 	/* max index set as part of create_flow */
1082 	u32 cntrs_max_index;
1083 	/* number of counters data entries (<description,index> pair) */
1084 	u32 ncounters;
1085 	/* counters data array for descriptions and indexes */
1086 	struct mlx5_ib_flow_counters_desc *counters_data;
1087 	/* protects access to mcounters internal data */
1088 	struct mutex mcntrs_mutex;
1089 };
1090 
1091 static inline struct mlx5_ib_mcounters *
1092 to_mcounters(struct ib_counters *ibcntrs)
1093 {
1094 	return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
1095 }
1096 
1097 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
1098 			   bool is_egress,
1099 			   struct mlx5_flow_act *action);
1100 struct mlx5_ib_lb_state {
1101 	/* protect the user_td */
1102 	struct mutex		mutex;
1103 	u32			user_td;
1104 	int			qps;
1105 	bool			enabled;
1106 };
1107 
1108 struct mlx5_ib_pf_eq {
1109 	struct notifier_block irq_nb;
1110 	struct mlx5_ib_dev *dev;
1111 	struct mlx5_eq *core;
1112 	struct work_struct work;
1113 	spinlock_t lock; /* Pagefaults spinlock */
1114 	struct workqueue_struct *wq;
1115 	mempool_t *pool;
1116 };
1117 
1118 struct mlx5_devx_event_table {
1119 	struct mlx5_nb devx_nb;
1120 	/* serialize updating the event_xa */
1121 	struct mutex event_xa_lock;
1122 	struct xarray event_xa;
1123 };
1124 
1125 struct mlx5_var_table {
1126 	/* serialize updating the bitmap */
1127 	struct mutex bitmap_lock;
1128 	unsigned long *bitmap;
1129 	u64 hw_start_addr;
1130 	u32 stride_size;
1131 	u64 num_var_hw_entries;
1132 };
1133 
1134 struct mlx5_port_caps {
1135 	bool has_smi;
1136 	u8 ext_port_cap;
1137 };
1138 
1139 
1140 struct mlx5_special_mkeys {
1141 	u32 dump_fill_mkey;
1142 	__be32 null_mkey;
1143 	__be32 terminate_scatter_list_mkey;
1144 };
1145 
1146 struct mlx5_macsec {
1147 	struct mutex lock; /* Protects mlx5_macsec internal contexts */
1148 	struct list_head macsec_devices_list;
1149 	struct notifier_block blocking_events_nb;
1150 };
1151 
1152 struct mlx5_ib_dev {
1153 	struct ib_device		ib_dev;
1154 	struct mlx5_core_dev		*mdev;
1155 	struct mlx5_data_direct_dev	*data_direct_dev;
1156 	/* protect accessing data_direct_dev */
1157 	struct mutex			data_direct_lock;
1158 	struct notifier_block		mdev_events;
1159 	struct notifier_block           lag_events;
1160 	int				num_ports;
1161 	/* serialize update of capability mask
1162 	 */
1163 	struct mutex			cap_mask_mutex;
1164 	u8				ib_active:1;
1165 	u8				is_rep:1;
1166 	u8				lag_active:1;
1167 	u8				fill_delay;
1168 	struct umr_common		umrc;
1169 	/* sync used page count stats
1170 	 */
1171 	struct mlx5_ib_resources	devr;
1172 
1173 	atomic_t			mkey_var;
1174 	struct mlx5_mkey_cache		cache;
1175 	struct timer_list		delay_timer;
1176 	/* Prevents soft lock on massive reg MRs */
1177 	struct mutex			slow_path_mutex;
1178 	struct ib_odp_caps	odp_caps;
1179 	u64			odp_max_size;
1180 	struct mutex		odp_eq_mutex;
1181 	struct mlx5_ib_pf_eq	odp_pf_eq;
1182 
1183 	struct xarray		odp_mkeys;
1184 
1185 	struct mlx5_ib_flow_db	*flow_db;
1186 	/* protect resources needed as part of reset flow */
1187 	spinlock_t		reset_flow_resource_lock;
1188 	struct list_head	qp_list;
1189 	struct list_head data_direct_mr_list;
1190 	/* Array with num_ports elements */
1191 	struct mlx5_ib_port	*port;
1192 	struct mlx5_sq_bfreg	bfreg;
1193 	struct mlx5_sq_bfreg	fp_bfreg;
1194 	struct mlx5_ib_delay_drop	delay_drop;
1195 	const struct mlx5_ib_profile	*profile;
1196 
1197 	struct mlx5_ib_lb_state		lb;
1198 	u8			umr_fence;
1199 	struct list_head	ib_dev_list;
1200 	u64			sys_image_guid;
1201 	struct mlx5_dm		dm;
1202 	u16			devx_whitelist_uid;
1203 	struct mlx5_srq_table   srq_table;
1204 	struct mlx5_qp_table    qp_table;
1205 	struct mlx5_async_ctx   async_ctx;
1206 	struct mlx5_devx_event_table devx_event_table;
1207 	struct mlx5_var_table var_table;
1208 
1209 	struct xarray sig_mrs;
1210 	struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
1211 	u16 pkey_table_len;
1212 	u8 lag_ports;
1213 	struct mlx5_special_mkeys mkeys;
1214 	struct mlx5_data_direct_resources ddr;
1215 
1216 #ifdef CONFIG_MLX5_MACSEC
1217 	struct mlx5_macsec macsec;
1218 #endif
1219 
1220 	u8 num_plane;
1221 	struct mlx5_ib_dev *smi_dev;
1222 	const char *sub_dev_name;
1223 };
1224 
1225 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1226 {
1227 	return container_of(mcq, struct mlx5_ib_cq, mcq);
1228 }
1229 
1230 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1231 {
1232 	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1233 }
1234 
1235 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1236 {
1237 	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1238 }
1239 
1240 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
1241 {
1242 	return to_mdev(mr->ibmr.device);
1243 }
1244 
1245 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1246 {
1247 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1248 		udata, struct mlx5_ib_ucontext, ibucontext);
1249 
1250 	return to_mdev(context->ibucontext.device);
1251 }
1252 
1253 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1254 {
1255 	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1256 }
1257 
1258 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1259 {
1260 	return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1261 }
1262 
1263 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1264 {
1265 	return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1266 }
1267 
1268 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1269 {
1270 	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1271 }
1272 
1273 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1274 {
1275 	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1276 }
1277 
1278 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1279 {
1280 	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1281 }
1282 
1283 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1284 {
1285 	return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1286 }
1287 
1288 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1289 {
1290 	return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1291 }
1292 
1293 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1294 {
1295 	return container_of(msrq, struct mlx5_ib_srq, msrq);
1296 }
1297 
1298 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1299 {
1300 	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1301 }
1302 
1303 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1304 {
1305 	return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1306 }
1307 
1308 static inline struct mlx5_ib_flow_action *
1309 to_mflow_act(struct ib_flow_action *ibact)
1310 {
1311 	return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1312 }
1313 
1314 static inline struct mlx5_user_mmap_entry *
1315 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1316 {
1317 	return container_of(rdma_entry,
1318 		struct mlx5_user_mmap_entry, rdma_entry);
1319 }
1320 
1321 int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
1322 int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
1323 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1324 			struct mlx5_db *db);
1325 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1326 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1327 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1328 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1329 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1330 		      struct ib_udata *udata);
1331 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1332 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1333 {
1334 	return 0;
1335 }
1336 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1337 		       struct ib_udata *udata);
1338 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1339 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1340 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1341 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1342 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1343 			  const struct ib_recv_wr **bad_wr);
1344 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1345 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1346 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1347 		      struct ib_udata *udata);
1348 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1349 		      int attr_mask, struct ib_udata *udata);
1350 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1351 		     struct ib_qp_init_attr *qp_init_attr);
1352 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1353 void mlx5_ib_drain_sq(struct ib_qp *qp);
1354 void mlx5_ib_drain_rq(struct ib_qp *qp);
1355 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1356 			size_t buflen, size_t *bc);
1357 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1358 			size_t buflen, size_t *bc);
1359 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1360 			 size_t buflen, size_t *bc);
1361 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1362 		      struct uverbs_attr_bundle *attrs);
1363 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1364 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1365 int mlx5_ib_pre_destroy_cq(struct ib_cq *cq);
1366 void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
1367 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1368 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1369 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1370 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1371 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1372 				  u64 virt_addr, int access_flags,
1373 				  struct ib_udata *udata);
1374 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
1375 					 u64 length, u64 virt_addr,
1376 					 int fd, int access_flags,
1377 					 struct uverbs_attr_bundle *attrs);
1378 int mlx5_ib_advise_mr(struct ib_pd *pd,
1379 		      enum ib_uverbs_advise_mr_advice advice,
1380 		      u32 flags,
1381 		      struct ib_sge *sg_list,
1382 		      u32 num_sge,
1383 		      struct uverbs_attr_bundle *attrs);
1384 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1385 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1386 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1387 					     int access_flags);
1388 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
1389 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1390 				    u64 length, u64 virt_addr, int access_flags,
1391 				    struct ib_pd *pd, struct ib_udata *udata);
1392 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1393 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1394 			       u32 max_num_sg);
1395 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1396 					 u32 max_num_sg,
1397 					 u32 max_num_meta_sg);
1398 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1399 		      unsigned int *sg_offset);
1400 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1401 			 int data_sg_nents, unsigned int *data_sg_offset,
1402 			 struct scatterlist *meta_sg, int meta_sg_nents,
1403 			 unsigned int *meta_sg_offset);
1404 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
1405 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1406 			const struct ib_mad *in, struct ib_mad *out,
1407 			size_t *out_mad_size, u16 *out_mad_pkey_index);
1408 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1409 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1410 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
1411 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1412 					 __be64 *sys_image_guid);
1413 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1414 				 u16 *max_pkeys);
1415 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1416 				 u32 *vendor_id);
1417 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1418 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1419 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
1420 			    u16 *pkey);
1421 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
1422 			    union ib_gid *gid);
1423 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
1424 			    struct ib_port_attr *props);
1425 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1426 		       struct ib_port_attr *props);
1427 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
1428 			  u64 access_flags);
1429 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1430 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
1431 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
1432 struct mlx5_cache_ent *
1433 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
1434 			      struct mlx5r_cache_rb_key rb_key,
1435 			      bool persistent_entry);
1436 
1437 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1438 				       int access_flags, int access_mode,
1439 				       int ndescs);
1440 
1441 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1442 			    struct ib_mr_status *mr_status);
1443 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1444 				struct ib_wq_init_attr *init_attr,
1445 				struct ib_udata *udata);
1446 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1447 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1448 		      u32 wq_attr_mask, struct ib_udata *udata);
1449 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1450 				 struct ib_rwq_ind_table_init_attr *init_attr,
1451 				 struct ib_udata *udata);
1452 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1453 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1454 				struct ib_dm_mr_attr *attr,
1455 				struct uverbs_attr_bundle *attrs);
1456 void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
1457 			      struct mlx5_data_direct_dev *dev);
1458 void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev);
1459 void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev);
1460 
1461 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1462 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1463 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
1464 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1465 int __init mlx5_ib_odp_init(void);
1466 void mlx5_ib_odp_cleanup(void);
1467 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
1468 int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1469 			  struct mlx5_ib_mr *mr, int flags);
1470 
1471 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1472 			       enum ib_uverbs_advise_mr_advice advice,
1473 			       u32 flags, struct ib_sge *sg_list, u32 num_sge);
1474 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
1475 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
1476 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1477 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1478 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
1479 				      struct mlx5_ib_pf_eq *eq)
1480 {
1481 	return 0;
1482 }
1483 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
1484 static inline int mlx5_ib_odp_init(void) { return 0; }
1485 static inline void mlx5_ib_odp_cleanup(void)				    {}
1486 static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1487 {
1488 	return 0;
1489 }
1490 static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1491 					struct mlx5_ib_mr *mr, int flags)
1492 {
1493 	return -EOPNOTSUPP;
1494 }
1495 
1496 static inline int
1497 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1498 			   enum ib_uverbs_advise_mr_advice advice, u32 flags,
1499 			   struct ib_sge *sg_list, u32 num_sge)
1500 {
1501 	return -EOPNOTSUPP;
1502 }
1503 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
1504 {
1505 	return -EOPNOTSUPP;
1506 }
1507 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
1508 {
1509 	return -EOPNOTSUPP;
1510 }
1511 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1512 
1513 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1514 
1515 /* Needed for rep profile */
1516 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1517 		      const struct mlx5_ib_profile *profile,
1518 		      int stage);
1519 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
1520 		  const struct mlx5_ib_profile *profile);
1521 
1522 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1523 			  u32 port, struct ifla_vf_info *info);
1524 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1525 			      u32 port, int state);
1526 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1527 			 u32 port, struct ifla_vf_stats *stats);
1528 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
1529 			struct ifla_vf_guid *node_guid,
1530 			struct ifla_vf_guid *port_guid);
1531 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
1532 			u64 guid, int type);
1533 
1534 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1535 				   const struct ib_gid_attr *attr);
1536 
1537 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1538 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1539 
1540 /* GSI QP helper functions */
1541 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1542 		       struct ib_qp_init_attr *attr);
1543 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1544 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1545 			  int attr_mask);
1546 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1547 			 int qp_attr_mask,
1548 			 struct ib_qp_init_attr *qp_init_attr);
1549 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1550 			  const struct ib_send_wr **bad_wr);
1551 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1552 			  const struct ib_recv_wr **bad_wr);
1553 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1554 
1555 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1556 
1557 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1558 			int bfregn);
1559 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1560 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1561 						   u32 ib_port_num,
1562 						   u32 *native_port_num);
1563 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1564 				  u32 port_num);
1565 
1566 extern const struct uapi_definition mlx5_ib_devx_defs[];
1567 extern const struct uapi_definition mlx5_ib_flow_defs[];
1568 extern const struct uapi_definition mlx5_ib_qos_defs[];
1569 extern const struct uapi_definition mlx5_ib_std_types_defs[];
1570 extern const struct uapi_definition mlx5_ib_create_cq_defs[];
1571 
1572 static inline int is_qp1(enum ib_qp_type qp_type)
1573 {
1574 	return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1575 }
1576 
1577 static inline u32 check_cq_create_flags(u32 flags)
1578 {
1579 	/*
1580 	 * It returns non-zero value for unsupported CQ
1581 	 * create flags, otherwise it returns zero.
1582 	 */
1583 	return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1584 			  IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1585 }
1586 
1587 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1588 				     u32 *user_index)
1589 {
1590 	if (cqe_version) {
1591 		if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1592 		    (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1593 			return -EINVAL;
1594 		*user_index = cmd_uidx;
1595 	} else {
1596 		*user_index = MLX5_IB_DEFAULT_UIDX;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1603 				    struct mlx5_ib_create_qp *ucmd,
1604 				    int inlen,
1605 				    u32 *user_index)
1606 {
1607 	u8 cqe_version = ucontext->cqe_version;
1608 
1609 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1610 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1611 		return 0;
1612 
1613 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1614 		return -EINVAL;
1615 
1616 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1617 }
1618 
1619 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1620 				     struct mlx5_ib_create_srq *ucmd,
1621 				     int inlen,
1622 				     u32 *user_index)
1623 {
1624 	u8 cqe_version = ucontext->cqe_version;
1625 
1626 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1627 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1628 		return 0;
1629 
1630 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1631 		return -EINVAL;
1632 
1633 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1634 }
1635 
1636 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1637 {
1638 	return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1639 				MLX5_UARS_IN_PAGE : 1;
1640 }
1641 
1642 extern void *xlt_emergency_page;
1643 
1644 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1645 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
1646 			bool dyn_bfreg);
1647 
1648 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
1649 				       struct mlx5_ib_mkey *mmkey)
1650 {
1651 	refcount_set(&mmkey->usecount, 1);
1652 
1653 	return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
1654 			       mmkey, GFP_KERNEL));
1655 }
1656 
1657 /* deref an mkey that can participate in ODP flow */
1658 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
1659 {
1660 	if (refcount_dec_and_test(&mmkey->usecount))
1661 		wake_up(&mmkey->wait);
1662 }
1663 
1664 /* deref an mkey that can participate in ODP flow and wait for relese */
1665 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
1666 {
1667 	mlx5r_deref_odp_mkey(mmkey);
1668 	wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
1669 }
1670 
1671 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1672 {
1673 	/*
1674 	 * If the driver is in hash mode and the port_select_flow_table_bypass cap
1675 	 * is supported, it means that the driver no longer needs to assign the port
1676 	 * affinity by default. If a user wants to set the port affinity explicitly,
1677 	 * the user has a dedicated API to do that, so there is no need to assign
1678 	 * the port affinity by default.
1679 	 */
1680 	if (dev->lag_active &&
1681 	    mlx5_lag_mode_is_hash(dev->mdev) &&
1682 	    MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
1683 		return 0;
1684 
1685 	if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
1686 		return 0;
1687 
1688 	return dev->lag_active ||
1689 		(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1690 		 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1691 }
1692 
1693 static inline bool rt_supported(int ts_cap)
1694 {
1695 	return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
1696 	       ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1697 }
1698 
1699 /*
1700  * PCI Peer to Peer is a trainwreck. If no switch is present then things
1701  * sometimes work, depending on the pci_distance_p2p logic for excluding broken
1702  * root complexes. However if a switch is present in the path, then things get
1703  * really ugly depending on how the switch is setup. This table assumes that the
1704  * root complex is strict and is validating that all req/reps are matches
1705  * perfectly - so any scenario where it sees only half the transaction is a
1706  * failure.
1707  *
1708  * CR/RR/DT  ATS RO P2P
1709  * 00X       X   X  OK
1710  * 010       X   X  fails (request is routed to root but root never sees comp)
1711  * 011       0   X  fails (request is routed to root but root never sees comp)
1712  * 011       1   X  OK
1713  * 10X       X   1  OK
1714  * 101       X   0  fails (completion is routed to root but root didn't see req)
1715  * 110       X   0  SLOW
1716  * 111       0   0  SLOW
1717  * 111       1   0  fails (completion is routed to root but root didn't see req)
1718  * 111       1   1  OK
1719  *
1720  * Unfortunately we cannot reliably know if a switch is present or what the
1721  * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
1722  * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
1723  *
1724  * For now assume if the umem is a dma_buf then it is P2P.
1725  */
1726 static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
1727 				       struct ib_umem *umem, int access_flags)
1728 {
1729 	if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
1730 		return false;
1731 	return access_flags & IB_ACCESS_RELAXED_ORDERING;
1732 }
1733 
1734 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
1735 		  unsigned int index, const union ib_gid *gid,
1736 		  const struct ib_gid_attr *attr);
1737 
1738 static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
1739 {
1740 	return (port - 1) / dev->num_ports + 1;
1741 }
1742 
1743 static inline unsigned int get_max_log_entity_size_cap(struct mlx5_ib_dev *dev,
1744 						       int access_mode)
1745 {
1746 	int max_log_size = 0;
1747 
1748 	if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1749 		max_log_size =
1750 			MLX5_CAP_GEN_2(dev->mdev, max_mkey_log_entity_size_mtt);
1751 	else if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
1752 		max_log_size = MLX5_CAP_GEN_2(
1753 			dev->mdev, max_mkey_log_entity_size_fixed_buffer);
1754 
1755 	if (!max_log_size ||
1756 	    (max_log_size > 31 &&
1757 	     !MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5)))
1758 		max_log_size = 31;
1759 
1760 	return max_log_size;
1761 }
1762 
1763 static inline unsigned int get_min_log_entity_size_cap(struct mlx5_ib_dev *dev,
1764 						       int access_mode)
1765 {
1766 	int min_log_size = 0;
1767 
1768 	if (access_mode == MLX5_MKC_ACCESS_MODE_KSM &&
1769 	    MLX5_CAP_GEN_2(dev->mdev,
1770 			   min_mkey_log_entity_size_fixed_buffer_valid))
1771 		min_log_size = MLX5_CAP_GEN_2(
1772 			dev->mdev, min_mkey_log_entity_size_fixed_buffer);
1773 	else
1774 		min_log_size =
1775 			MLX5_CAP_GEN_2(dev->mdev, log_min_mkey_entity_size);
1776 
1777 	min_log_size = max(min_log_size, MLX5_ADAPTER_PAGE_SHIFT);
1778 	return min_log_size;
1779 }
1780 
1781 /*
1782  * For mkc users, instead of a page_offset the command has a start_iova which
1783  * specifies both the page_offset and the on-the-wire IOVA
1784  */
1785 static __always_inline unsigned long
1786 mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1787 			     u64 iova, int access_mode)
1788 {
1789 	unsigned int max_log_entity_size_cap, min_log_entity_size_cap;
1790 	unsigned long bitmap;
1791 
1792 	max_log_entity_size_cap = get_max_log_entity_size_cap(dev, access_mode);
1793 	min_log_entity_size_cap = get_min_log_entity_size_cap(dev, access_mode);
1794 
1795 	bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
1796 
1797 	return ib_umem_find_best_pgsz(umem, bitmap, iova);
1798 }
1799 
1800 static inline unsigned long
1801 mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf,
1802 				int access_mode)
1803 {
1804 	return mlx5_umem_mkc_find_best_pgsz(to_mdev(umem_dmabuf->umem.ibdev),
1805 					    &umem_dmabuf->umem,
1806 					    umem_dmabuf->umem.iova,
1807 					    access_mode);
1808 }
1809 
1810 #endif /* MLX5_IB_H */
1811