xref: /linux/drivers/infiniband/hw/mlx5/mlx5_ib.h (revision dd91b5e1d6448794c07378d1be12e3261c8769e7)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  * Copyright (c) 2020, Intel Corporation. All rights reserved.
5  */
6 
7 #ifndef MLX5_IB_H
8 #define MLX5_IB_H
9 
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <rdma/ib_smi.h>
15 #include <linux/mlx5/driver.h>
16 #include <linux/mlx5/cq.h>
17 #include <linux/mlx5/fs.h>
18 #include <linux/mlx5/qp.h>
19 #include <linux/types.h>
20 #include <linux/mlx5/transobj.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/mlx5-abi.h>
23 #include <rdma/uverbs_ioctl.h>
24 #include <rdma/mlx5_user_ioctl_cmds.h>
25 #include <rdma/mlx5_user_ioctl_verbs.h>
26 
27 #include "srq.h"
28 #include "qp.h"
29 #include "macsec.h"
30 
31 #define mlx5_ib_dbg(_dev, format, arg...)                                      \
32 	dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
33 		__LINE__, current->pid, ##arg)
34 
35 #define mlx5_ib_err(_dev, format, arg...)                                      \
36 	dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
37 		__LINE__, current->pid, ##arg)
38 
39 #define mlx5_ib_warn(_dev, format, arg...)                                     \
40 	dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,     \
41 		 __LINE__, current->pid, ##arg)
42 
43 #define mlx5_ib_log(lvl, _dev, format, arg...)                                 \
44 	dev_printk(lvl, &(_dev)->ib_dev.dev,  "%s:%d:(pid %d): " format,       \
45 		   __func__, __LINE__, current->pid, ##arg)
46 
47 #define MLX5_IB_DEFAULT_UIDX 0xffffff
48 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
49 
50 static __always_inline unsigned long
__mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,unsigned int pgsz_shift)51 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
52 			       unsigned int pgsz_shift)
53 {
54 	unsigned int largest_pg_shift =
55 		min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
56 		      BITS_PER_LONG - 1);
57 
58 	/*
59 	 * Despite a command allowing it, the device does not support lower than
60 	 * 4k page size.
61 	 */
62 	pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
63 	return GENMASK(largest_pg_shift, pgsz_shift);
64 }
65 
66 static __always_inline unsigned long
__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,unsigned int offset_shift)67 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
68 			      unsigned int offset_shift)
69 {
70 	unsigned int largest_offset_shift =
71 		min_t(unsigned long, page_offset_bits - 1 + offset_shift,
72 		      BITS_PER_LONG - 1);
73 
74 	return GENMASK(largest_offset_shift, offset_shift);
75 }
76 
77 /*
78  * QP/CQ/WQ/etc type commands take a page offset that satisifies:
79  *   page_offset_quantized * (page_size/scale) = page_offset
80  * Which restricts allowed page sizes to ones that satisify the above.
81  */
82 unsigned long __mlx5_umem_find_best_quantized_pgoff(
83 	struct ib_umem *umem, unsigned long pgsz_bitmap,
84 	unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
85 	unsigned int *page_offset_quantized);
86 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld,           \
87 					    pgsz_shift, page_offset_fld,       \
88 					    scale, page_offset_quantized)      \
89 	__mlx5_umem_find_best_quantized_pgoff(                                 \
90 		umem,                                                          \
91 		__mlx5_log_page_size_to_bitmap(                                \
92 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
93 		__mlx5_bit_sz(typ, page_offset_fld),                           \
94 		GENMASK(31, order_base_2(scale)), scale,                       \
95 		page_offset_quantized)
96 
97 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld,        \
98 					       pgsz_shift, page_offset_fld,    \
99 					       scale, page_offset_quantized)   \
100 	__mlx5_umem_find_best_quantized_pgoff(                                 \
101 		umem,                                                          \
102 		__mlx5_log_page_size_to_bitmap(                                \
103 			__mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift),         \
104 		__mlx5_bit_sz(typ, page_offset_fld), 0, scale,                 \
105 		page_offset_quantized)
106 
107 static inline unsigned long
mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf * umem_dmabuf)108 mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf)
109 {
110 	/*
111 	 * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able
112 	 * to hold any sgl after a move operation. Ideally the mkc page size
113 	 * could be changed at runtime to be optimal, but right now the driver
114 	 * cannot do that.
115 	 */
116 	return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE,
117 				      umem_dmabuf->umem.iova);
118 }
119 
120 enum {
121 	MLX5_IB_MMAP_OFFSET_START = 9,
122 	MLX5_IB_MMAP_OFFSET_END = 255,
123 };
124 
125 enum {
126 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
127 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
128 };
129 
130 enum {
131 	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
132 	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
133 	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
134 	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
135 };
136 
137 enum mlx5_ib_mad_ifc_flags {
138 	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
139 	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
140 	MLX5_MAD_IFC_NET_VIEW		= 4,
141 };
142 
143 enum {
144 	MLX5_CROSS_CHANNEL_BFREG         = 0,
145 };
146 
147 enum {
148 	MLX5_CQE_VERSION_V0,
149 	MLX5_CQE_VERSION_V1,
150 };
151 
152 enum {
153 	MLX5_TM_MAX_RNDV_MSG_SIZE	= 64,
154 	MLX5_TM_MAX_SGE			= 1,
155 };
156 
157 enum {
158 	MLX5_IB_INVALID_UAR_INDEX	= BIT(31),
159 	MLX5_IB_INVALID_BFREG		= BIT(31),
160 };
161 
162 enum {
163 	MLX5_MAX_MEMIC_PAGES = 0x100,
164 	MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
165 };
166 
167 enum {
168 	MLX5_MEMIC_BASE_ALIGN	= 6,
169 	MLX5_MEMIC_BASE_SIZE	= 1 << MLX5_MEMIC_BASE_ALIGN,
170 };
171 
172 enum mlx5_ib_mmap_type {
173 	MLX5_IB_MMAP_TYPE_MEMIC = 1,
174 	MLX5_IB_MMAP_TYPE_VAR = 2,
175 	MLX5_IB_MMAP_TYPE_UAR_WC = 3,
176 	MLX5_IB_MMAP_TYPE_UAR_NC = 4,
177 	MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
178 };
179 
180 struct mlx5_bfreg_info {
181 	u32 *sys_pages;
182 	int num_low_latency_bfregs;
183 	unsigned int *count;
184 
185 	/*
186 	 * protect bfreg allocation data structs
187 	 */
188 	struct mutex lock;
189 	u32 ver;
190 	u8 lib_uar_4k : 1;
191 	u8 lib_uar_dyn : 1;
192 	u32 num_sys_pages;
193 	u32 num_static_sys_pages;
194 	u32 total_num_bfregs;
195 	u32 num_dyn_bfregs;
196 };
197 
198 struct mlx5_ib_ucontext {
199 	struct ib_ucontext	ibucontext;
200 	struct list_head	db_page_list;
201 
202 	/* protect doorbell record alloc/free
203 	 */
204 	struct mutex		db_page_mutex;
205 	struct mlx5_bfreg_info	bfregi;
206 	u8			cqe_version;
207 	/* Transport Domain number */
208 	u32			tdn;
209 
210 	u64			lib_caps;
211 	u16			devx_uid;
212 	/* For RoCE LAG TX affinity */
213 	atomic_t		tx_port_affinity;
214 };
215 
to_mucontext(struct ib_ucontext * ibucontext)216 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
217 {
218 	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
219 }
220 
221 struct mlx5_ib_pd {
222 	struct ib_pd		ibpd;
223 	u32			pdn;
224 	u16			uid;
225 };
226 
227 enum {
228 	MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
229 	MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
230 	MLX5_IB_FLOW_ACTION_DECAP,
231 };
232 
233 #define MLX5_IB_FLOW_MCAST_PRIO		(MLX5_BY_PASS_NUM_PRIOS - 1)
234 #define MLX5_IB_FLOW_LAST_PRIO		(MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
235 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
236 #error "Invalid number of bypass priorities"
237 #endif
238 #define MLX5_IB_FLOW_LEFTOVERS_PRIO	(MLX5_IB_FLOW_MCAST_PRIO + 1)
239 
240 #define MLX5_IB_NUM_FLOW_FT		(MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
241 #define MLX5_IB_NUM_SNIFFER_FTS		2
242 #define MLX5_IB_NUM_EGRESS_FTS		1
243 #define MLX5_IB_NUM_FDB_FTS		MLX5_BY_PASS_NUM_REGULAR_PRIOS
244 
245 struct mlx5_ib_anchor {
246 	struct mlx5_flow_table *ft;
247 	struct mlx5_flow_group *fg_goto_table;
248 	struct mlx5_flow_group *fg_drop;
249 	struct mlx5_flow_handle *rule_goto_table;
250 	struct mlx5_flow_handle *rule_drop;
251 	unsigned int rule_goto_table_ref;
252 };
253 
254 struct mlx5_ib_flow_prio {
255 	struct mlx5_flow_table		*flow_table;
256 	struct mlx5_ib_anchor		anchor;
257 	unsigned int			refcount;
258 };
259 
260 struct mlx5_ib_flow_handler {
261 	struct list_head		list;
262 	struct ib_flow			ibflow;
263 	struct mlx5_ib_flow_prio	*prio;
264 	struct mlx5_flow_handle		*rule;
265 	struct ib_counters		*ibcounters;
266 	struct mlx5_ib_dev		*dev;
267 	struct mlx5_ib_flow_matcher	*flow_matcher;
268 };
269 
270 struct mlx5_ib_flow_matcher {
271 	struct mlx5_ib_match_params matcher_mask;
272 	int			mask_len;
273 	enum mlx5_ib_flow_type	flow_type;
274 	enum mlx5_flow_namespace_type ns_type;
275 	u16			priority;
276 	struct mlx5_core_dev	*mdev;
277 	atomic_t		usecnt;
278 	u8			match_criteria_enable;
279 	u32			ib_port;
280 };
281 
282 struct mlx5_ib_steering_anchor {
283 	struct mlx5_ib_flow_prio *ft_prio;
284 	struct mlx5_ib_dev *dev;
285 	atomic_t usecnt;
286 };
287 
288 struct mlx5_ib_pp {
289 	u16 index;
290 	struct mlx5_core_dev *mdev;
291 };
292 
293 enum mlx5_ib_optional_counter_type {
294 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
295 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
296 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
297 	MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS,
298 	MLX5_IB_OPCOUNTER_RDMA_TX_BYTES,
299 	MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS,
300 	MLX5_IB_OPCOUNTER_RDMA_RX_BYTES,
301 
302 	MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP,
303 	MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS_PER_QP,
304 	MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS_PER_QP,
305 	MLX5_IB_OPCOUNTER_RDMA_TX_PACKETS_PER_QP,
306 	MLX5_IB_OPCOUNTER_RDMA_TX_BYTES_PER_QP,
307 	MLX5_IB_OPCOUNTER_RDMA_RX_PACKETS_PER_QP,
308 	MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP,
309 
310 	MLX5_IB_OPCOUNTER_MAX,
311 };
312 
313 struct mlx5_ib_flow_db {
314 	struct mlx5_ib_flow_prio	prios[MLX5_IB_NUM_FLOW_FT];
315 	struct mlx5_ib_flow_prio	egress_prios[MLX5_IB_NUM_FLOW_FT];
316 	struct mlx5_ib_flow_prio	sniffer[MLX5_IB_NUM_SNIFFER_FTS];
317 	struct mlx5_ib_flow_prio	egress[MLX5_IB_NUM_EGRESS_FTS];
318 	struct mlx5_ib_flow_prio	fdb[MLX5_IB_NUM_FDB_FTS];
319 	struct mlx5_ib_flow_prio	rdma_rx[MLX5_IB_NUM_FLOW_FT];
320 	struct mlx5_ib_flow_prio	rdma_tx[MLX5_IB_NUM_FLOW_FT];
321 	struct mlx5_ib_flow_prio	opfcs[MLX5_IB_OPCOUNTER_MAX];
322 	struct mlx5_flow_table		*lag_demux_ft;
323 	struct mlx5_ib_flow_prio        *rdma_transport_rx;
324 	struct mlx5_ib_flow_prio        *rdma_transport_tx;
325 	/* Protect flow steering bypass flow tables
326 	 * when add/del flow rules.
327 	 * only single add/removal of flow steering rule could be done
328 	 * simultaneously.
329 	 */
330 	struct mutex			lock;
331 };
332 
333 /* Use macros here so that don't have to duplicate
334  * enum ib_qp_type for low-level driver
335  */
336 
337 #define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
338 /*
339  * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
340  * creates the actual hardware QP.
341  */
342 #define MLX5_IB_QPT_HW_GSI	IB_QPT_RESERVED2
343 #define MLX5_IB_QPT_DCI		IB_QPT_RESERVED3
344 #define MLX5_IB_QPT_DCT		IB_QPT_RESERVED4
345 #define MLX5_IB_WR_UMR		IB_WR_RESERVED1
346 
347 #define MLX5_IB_UPD_XLT_ZAP	      BIT(0)
348 #define MLX5_IB_UPD_XLT_ENABLE	      BIT(1)
349 #define MLX5_IB_UPD_XLT_ATOMIC	      BIT(2)
350 #define MLX5_IB_UPD_XLT_ADDR	      BIT(3)
351 #define MLX5_IB_UPD_XLT_PD	      BIT(4)
352 #define MLX5_IB_UPD_XLT_ACCESS	      BIT(5)
353 #define MLX5_IB_UPD_XLT_INDIRECT      BIT(6)
354 #define MLX5_IB_UPD_XLT_DOWNGRADE     BIT(7)
355 
356 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
357  *
358  * These flags are intended for internal use by the mlx5_ib driver, and they
359  * rely on the range reserved for that use in the ib_qp_create_flags enum.
360  */
361 #define MLX5_IB_QP_CREATE_SQPN_QP1	IB_QP_CREATE_RESERVED_START
362 
363 struct wr_list {
364 	u16	opcode;
365 	u16	next;
366 };
367 
368 enum mlx5_ib_rq_flags {
369 	MLX5_IB_RQ_CVLAN_STRIPPING	= 1 << 0,
370 	MLX5_IB_RQ_PCI_WRITE_END_PADDING	= 1 << 1,
371 };
372 
373 struct mlx5_ib_wq {
374 	struct mlx5_frag_buf_ctrl fbc;
375 	u64		       *wrid;
376 	u32		       *wr_data;
377 	struct wr_list	       *w_list;
378 	unsigned	       *wqe_head;
379 	u16		        unsig_count;
380 
381 	/* serialize post to the work queue
382 	 */
383 	spinlock_t		lock;
384 	int			wqe_cnt;
385 	int			max_post;
386 	int			max_gs;
387 	int			offset;
388 	int			wqe_shift;
389 	unsigned		head;
390 	unsigned		tail;
391 	u16			cur_post;
392 	u16			last_poll;
393 	void			*cur_edge;
394 };
395 
396 enum mlx5_ib_wq_flags {
397 	MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
398 	MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
399 };
400 
401 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
402 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
403 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
404 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
405 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
406 
407 struct mlx5_ib_rwq {
408 	struct ib_wq		ibwq;
409 	struct mlx5_core_qp	core_qp;
410 	u32			rq_num_pas;
411 	u32			log_rq_stride;
412 	u32			log_rq_size;
413 	u32			rq_page_offset;
414 	u32			log_page_size;
415 	u32			log_num_strides;
416 	u32			two_byte_shift_en;
417 	u32			single_stride_log_num_of_bytes;
418 	struct ib_umem		*umem;
419 	size_t			buf_size;
420 	unsigned int		page_shift;
421 	struct mlx5_db		db;
422 	u32			user_index;
423 	u32			wqe_count;
424 	u32			wqe_shift;
425 	int			wq_sig;
426 	u32			create_flags; /* Use enum mlx5_ib_wq_flags */
427 };
428 
429 struct mlx5_ib_rwq_ind_table {
430 	struct ib_rwq_ind_table ib_rwq_ind_tbl;
431 	u32			rqtn;
432 	u16			uid;
433 };
434 
435 struct mlx5_ib_ubuffer {
436 	struct ib_umem	       *umem;
437 	int			buf_size;
438 	u64			buf_addr;
439 };
440 
441 struct mlx5_ib_qp_base {
442 	struct mlx5_ib_qp	*container_mibqp;
443 	struct mlx5_core_qp	mqp;
444 	struct mlx5_ib_ubuffer	ubuffer;
445 };
446 
447 struct mlx5_ib_qp_trans {
448 	struct mlx5_ib_qp_base	base;
449 	u16			xrcdn;
450 	u32			alt_port;
451 	u8			atomic_rd_en;
452 	u8			resp_depth;
453 };
454 
455 struct mlx5_ib_rss_qp {
456 	u32	tirn;
457 };
458 
459 struct mlx5_ib_rq {
460 	struct mlx5_ib_qp_base base;
461 	struct mlx5_ib_wq	*rq;
462 	struct mlx5_ib_ubuffer	ubuffer;
463 	struct mlx5_db		*doorbell;
464 	u32			tirn;
465 	u8			state;
466 	u32			flags;
467 };
468 
469 struct mlx5_ib_sq {
470 	struct mlx5_ib_qp_base base;
471 	struct mlx5_ib_wq	*sq;
472 	struct mlx5_ib_ubuffer  ubuffer;
473 	struct mlx5_db		*doorbell;
474 	struct mlx5_flow_handle	*flow_rule;
475 	u32			tisn;
476 	u8			state;
477 };
478 
479 struct mlx5_ib_raw_packet_qp {
480 	struct mlx5_ib_sq sq;
481 	struct mlx5_ib_rq rq;
482 };
483 
484 struct mlx5_bf {
485 	int			buf_size;
486 	unsigned long		offset;
487 	struct mlx5_sq_bfreg   *bfreg;
488 };
489 
490 struct mlx5_ib_dct {
491 	struct mlx5_core_dct    mdct;
492 	u32                     *in;
493 };
494 
495 struct mlx5_ib_gsi_qp {
496 	struct ib_qp *rx_qp;
497 	u32 port_num;
498 	struct ib_qp_cap cap;
499 	struct ib_cq *cq;
500 	struct mlx5_ib_gsi_wr *outstanding_wrs;
501 	u32 outstanding_pi, outstanding_ci;
502 	int num_qps;
503 	/* Protects access to the tx_qps. Post send operations synchronize
504 	 * with tx_qp creation in setup_qp(). Also protects the
505 	 * outstanding_wrs array and indices.
506 	 */
507 	spinlock_t lock;
508 	struct ib_qp **tx_qps;
509 };
510 
511 struct mlx5_ib_qp {
512 	struct ib_qp		ibqp;
513 	union {
514 		struct mlx5_ib_qp_trans trans_qp;
515 		struct mlx5_ib_raw_packet_qp raw_packet_qp;
516 		struct mlx5_ib_rss_qp rss_qp;
517 		struct mlx5_ib_dct dct;
518 		struct mlx5_ib_gsi_qp gsi;
519 	};
520 	struct mlx5_frag_buf	buf;
521 
522 	struct mlx5_db		db;
523 	struct mlx5_ib_wq	rq;
524 
525 	u8			sq_signal_bits;
526 	u8			next_fence;
527 	struct mlx5_ib_wq	sq;
528 
529 	/* serialize qp state modifications
530 	 */
531 	struct mutex		mutex;
532 	/* cached variant of create_flags from struct ib_qp_init_attr */
533 	u32			flags;
534 	u32			port;
535 	u8			state;
536 	int			max_inline_data;
537 	struct mlx5_bf	        bf;
538 	u8			has_rq:1;
539 	u8			is_rss:1;
540 	u8			is_ooo_rq:1;
541 
542 	/* only for user space QPs. For kernel
543 	 * we have it from the bf object
544 	 */
545 	int			bfregn;
546 
547 	struct list_head	qps_list;
548 	struct list_head	cq_recv_list;
549 	struct list_head	cq_send_list;
550 	struct mlx5_rate_limit	rl;
551 	u32                     underlay_qpn;
552 	u32			flags_en;
553 	/*
554 	 * IB/core doesn't store low-level QP types, so
555 	 * store both MLX and IBTA types in the field below.
556 	 */
557 	enum ib_qp_type		type;
558 	/* A flag to indicate if there's a new counter is configured
559 	 * but not take effective
560 	 */
561 	u32                     counter_pending;
562 	u16			gsi_lag_port;
563 };
564 
565 struct mlx5_ib_cq_buf {
566 	struct mlx5_frag_buf_ctrl fbc;
567 	struct mlx5_frag_buf    frag_buf;
568 	struct ib_umem		*umem;
569 	int			cqe_size;
570 	int			nent;
571 };
572 
573 enum mlx5_ib_cq_pr_flags {
574 	MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD	= 1 << 0,
575 	MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
576 };
577 
578 struct mlx5_ib_cq {
579 	struct ib_cq		ibcq;
580 	struct mlx5_core_cq	mcq;
581 	struct mlx5_ib_cq_buf	buf;
582 	struct mlx5_db		db;
583 
584 	/* serialize access to the CQ
585 	 */
586 	spinlock_t		lock;
587 
588 	/* protect resize cq
589 	 */
590 	struct mutex		resize_mutex;
591 	struct mlx5_ib_cq_buf  *resize_buf;
592 	struct ib_umem	       *resize_umem;
593 	int			cqe_size;
594 	struct list_head	list_send_qp;
595 	struct list_head	list_recv_qp;
596 	u32			create_flags;
597 	struct list_head	wc_list;
598 	enum ib_cq_notify_flags notify_flags;
599 	struct work_struct	notify_work;
600 	u16			private_flags; /* Use mlx5_ib_cq_pr_flags */
601 };
602 
603 struct mlx5_ib_wc {
604 	struct ib_wc wc;
605 	struct list_head list;
606 };
607 
608 struct mlx5_ib_srq {
609 	struct ib_srq		ibsrq;
610 	struct mlx5_core_srq	msrq;
611 	struct mlx5_frag_buf	buf;
612 	struct mlx5_db		db;
613 	struct mlx5_frag_buf_ctrl fbc;
614 	u64		       *wrid;
615 	/* protect SRQ hanlding
616 	 */
617 	spinlock_t		lock;
618 	int			head;
619 	int			tail;
620 	u16			wqe_ctr;
621 	struct ib_umem	       *umem;
622 	/* serialize arming a SRQ
623 	 */
624 	struct mutex		mutex;
625 	int			wq_sig;
626 };
627 
628 struct mlx5_ib_xrcd {
629 	struct ib_xrcd		ibxrcd;
630 	u32			xrcdn;
631 };
632 
633 enum mlx5_ib_mtt_access_flags {
634 	MLX5_IB_MTT_READ  = (1 << 0),
635 	MLX5_IB_MTT_WRITE = (1 << 1),
636 };
637 
638 struct mlx5_user_mmap_entry {
639 	struct rdma_user_mmap_entry rdma_entry;
640 	u8 mmap_flag;
641 	u64 address;
642 	u32 page_idx;
643 };
644 
645 enum mlx5_mkey_type {
646 	MLX5_MKEY_MR = 1,
647 	MLX5_MKEY_MW,
648 	MLX5_MKEY_INDIRECT_DEVX,
649 	MLX5_MKEY_NULL,
650 	MLX5_MKEY_IMPLICIT_CHILD,
651 };
652 
653 struct mlx5r_cache_rb_key {
654 	u8 ats:1;
655 	unsigned int access_mode;
656 	unsigned int access_flags;
657 	unsigned int ndescs;
658 };
659 
660 struct mlx5_ib_mkey {
661 	u32 key;
662 	enum mlx5_mkey_type type;
663 	unsigned int ndescs;
664 	struct wait_queue_head wait;
665 	refcount_t usecount;
666 	/* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
667 	struct mlx5r_cache_rb_key rb_key;
668 	struct mlx5_cache_ent *cache_ent;
669 	u8 cacheable : 1;
670 };
671 
672 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
673 
674 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
675 					 IB_ACCESS_REMOTE_WRITE  |\
676 					 IB_ACCESS_REMOTE_READ   |\
677 					 IB_ACCESS_REMOTE_ATOMIC |\
678 					 IB_ZERO_BASED)
679 
680 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
681 					  IB_ACCESS_REMOTE_WRITE  |\
682 					  IB_ACCESS_REMOTE_READ   |\
683 					  IB_ZERO_BASED)
684 
685 #define mlx5_update_odp_stats(mr, counter_name, value)		\
686 	atomic64_add(value, &((mr)->odp_stats.counter_name))
687 
688 #define mlx5_update_odp_stats_with_handled(mr, counter_name, value)         \
689 	do {                                                                \
690 		mlx5_update_odp_stats(mr, counter_name, value);             \
691 		atomic64_add(1, &((mr)->odp_stats.counter_name##_handled)); \
692 	} while (0)
693 
694 struct mlx5_ib_mr {
695 	struct ib_mr ibmr;
696 	struct mlx5_ib_mkey mmkey;
697 
698 	struct ib_umem *umem;
699 	/* The mr is data direct related */
700 	u8 data_direct :1;
701 
702 	union {
703 		/* Used only by kernel MRs (umem == NULL) */
704 		struct {
705 			void *descs;
706 			void *descs_alloc;
707 			dma_addr_t desc_map;
708 			int max_descs;
709 			int desc_size;
710 			int access_mode;
711 
712 			/* For Kernel IB_MR_TYPE_INTEGRITY */
713 			struct mlx5_core_sig_ctx *sig;
714 			struct mlx5_ib_mr *pi_mr;
715 			struct mlx5_ib_mr *klm_mr;
716 			struct mlx5_ib_mr *mtt_mr;
717 			u64 data_iova;
718 			u64 pi_iova;
719 			int meta_ndescs;
720 			int meta_length;
721 			int data_length;
722 		};
723 
724 		/* Used only by User MRs (umem != NULL) */
725 		struct {
726 			unsigned int page_shift;
727 			/* Current access_flags */
728 			int access_flags;
729 
730 			/* For User ODP */
731 			struct mlx5_ib_mr *parent;
732 			struct xarray implicit_children;
733 			union {
734 				struct work_struct work;
735 			} odp_destroy;
736 			struct ib_odp_counters odp_stats;
737 			bool is_odp_implicit;
738 			/* The affilated data direct crossed mr */
739 			struct mlx5_ib_mr *dd_crossed_mr;
740 			struct list_head dd_node;
741 			u8 revoked :1;
742 			struct mlx5_ib_mkey null_mmkey;
743 		};
744 	};
745 };
746 
is_odp_mr(struct mlx5_ib_mr * mr)747 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
748 {
749 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
750 	       mr->umem->is_odp;
751 }
752 
is_dmabuf_mr(struct mlx5_ib_mr * mr)753 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
754 {
755 	return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
756 	       mr->umem->is_dmabuf;
757 }
758 
759 struct mlx5_ib_mw {
760 	struct ib_mw		ibmw;
761 	struct mlx5_ib_mkey	mmkey;
762 };
763 
764 struct mlx5_ib_umr_context {
765 	struct ib_cqe		cqe;
766 	enum ib_wc_status	status;
767 	struct completion	done;
768 };
769 
770 enum {
771 	MLX5_UMR_STATE_UNINIT,
772 	MLX5_UMR_STATE_ACTIVE,
773 	MLX5_UMR_STATE_RECOVER,
774 	MLX5_UMR_STATE_ERR,
775 };
776 
777 struct umr_common {
778 	struct ib_pd	*pd;
779 	struct ib_cq	*cq;
780 	struct ib_qp	*qp;
781 	/* Protects from UMR QP overflow
782 	 */
783 	struct semaphore	sem;
784 	/* Protects from using UMR while the UMR is not active
785 	 */
786 	struct mutex lock;
787 	unsigned int state;
788 	/* Protects from repeat UMR QP creation */
789 	struct mutex init_lock;
790 };
791 
792 #define NUM_MKEYS_PER_PAGE \
793 	((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
794 
795 struct mlx5_mkeys_page {
796 	u32 mkeys[NUM_MKEYS_PER_PAGE];
797 	struct list_head list;
798 };
799 static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
800 
801 struct mlx5_mkeys_queue {
802 	struct list_head pages_list;
803 	u32 num_pages;
804 	unsigned long ci;
805 	spinlock_t lock; /* sync list ops */
806 };
807 
808 struct mlx5_cache_ent {
809 	struct mlx5_mkeys_queue	mkeys_queue;
810 	u32			pending;
811 
812 	char                    name[4];
813 
814 	struct rb_node		node;
815 	struct mlx5r_cache_rb_key rb_key;
816 
817 	u8 is_tmp:1;
818 	u8 disabled:1;
819 	u8 fill_to_high_water:1;
820 	u8 tmp_cleanup_scheduled:1;
821 
822 	/*
823 	 * - limit is the low water mark for stored mkeys, 2* limit is the
824 	 *   upper water mark.
825 	 */
826 	u32 in_use;
827 	u32 limit;
828 
829 	/* Statistics */
830 	u32                     miss;
831 
832 	struct mlx5_ib_dev     *dev;
833 	struct delayed_work	dwork;
834 };
835 
836 struct mlx5r_async_create_mkey {
837 	union {
838 		u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
839 		u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
840 	};
841 	struct mlx5_async_work cb_work;
842 	struct mlx5_cache_ent *ent;
843 	u32 mkey;
844 };
845 
846 struct mlx5_mkey_cache {
847 	struct workqueue_struct *wq;
848 	struct rb_root		rb_root;
849 	struct mutex		rb_lock;
850 	struct dentry		*fs_root;
851 	unsigned long		last_add;
852 };
853 
854 struct mlx5_ib_port_resources {
855 	struct mlx5_ib_gsi_qp *gsi;
856 	struct work_struct pkey_change_work;
857 };
858 
859 struct mlx5_data_direct_resources {
860 	u32 pdn;
861 	u32 mkey;
862 };
863 
864 struct mlx5_ib_resources {
865 	struct ib_cq	*c0;
866 	struct mutex cq_lock;
867 	u32 xrcdn0;
868 	u32 xrcdn1;
869 	struct ib_pd	*p0;
870 	struct ib_srq	*s0;
871 	struct ib_srq	*s1;
872 	struct mutex srq_lock;
873 	struct mlx5_ib_port_resources ports[2];
874 };
875 
876 #define MAX_OPFC_RULES 2
877 
878 struct mlx5_ib_op_fc {
879 	struct mlx5_fc *fc;
880 	struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
881 };
882 
883 struct mlx5_ib_counters {
884 	struct rdma_stat_desc *descs;
885 	size_t *offsets;
886 	u32 num_q_counters;
887 	u32 num_cong_counters;
888 	u32 num_ext_ppcnt_counters;
889 	u32 num_op_counters;
890 	u16 set_id;
891 	struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
892 };
893 
894 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
895 			 struct mlx5_ib_op_fc *opfc,
896 			 enum mlx5_ib_optional_counter_type type);
897 
898 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
899 			     struct mlx5_ib_op_fc *opfc,
900 			     enum mlx5_ib_optional_counter_type type);
901 
902 int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
903 			u32 port);
904 
905 void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter);
906 
907 void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
908 			  struct rdma_counter *counter);
909 
910 struct mlx5_ib_multiport_info;
911 
912 struct mlx5_ib_multiport {
913 	struct mlx5_ib_multiport_info *mpi;
914 	/* To be held when accessing the multiport info */
915 	spinlock_t mpi_lock;
916 };
917 
918 struct mlx5_roce {
919 	/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
920 	 * netdev pointer
921 	 */
922 	struct notifier_block	nb;
923 	struct netdev_net_notifier nn;
924 	struct notifier_block	mdev_nb;
925 	struct net_device	*tracking_netdev;
926 	atomic_t		tx_port_affinity;
927 	enum ib_port_state last_port_state;
928 	struct mlx5_ib_dev	*dev;
929 	u32			native_port_num;
930 };
931 
932 struct mlx5_ib_port {
933 	struct mlx5_ib_counters cnts;
934 	struct mlx5_ib_multiport mp;
935 	struct mlx5_ib_dbg_cc_params *dbg_cc_params;
936 	struct mlx5_roce roce;
937 	struct mlx5_eswitch_rep		*rep;
938 #ifdef CONFIG_MLX5_MACSEC
939 	struct mlx5_reserved_gids *reserved_gids;
940 #endif
941 };
942 
943 struct mlx5_ib_dbg_param {
944 	int			offset;
945 	struct mlx5_ib_dev	*dev;
946 	struct dentry		*dentry;
947 	u32			port_num;
948 };
949 
950 enum mlx5_ib_dbg_cc_types {
951 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
952 	MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
953 	MLX5_IB_DBG_CC_RP_TIME_RESET,
954 	MLX5_IB_DBG_CC_RP_BYTE_RESET,
955 	MLX5_IB_DBG_CC_RP_THRESHOLD,
956 	MLX5_IB_DBG_CC_RP_AI_RATE,
957 	MLX5_IB_DBG_CC_RP_MAX_RATE,
958 	MLX5_IB_DBG_CC_RP_HAI_RATE,
959 	MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
960 	MLX5_IB_DBG_CC_RP_MIN_RATE,
961 	MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
962 	MLX5_IB_DBG_CC_RP_DCE_TCP_G,
963 	MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
964 	MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
965 	MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
966 	MLX5_IB_DBG_CC_RP_GD,
967 	MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
968 	MLX5_IB_DBG_CC_NP_CNP_DSCP,
969 	MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
970 	MLX5_IB_DBG_CC_NP_CNP_PRIO,
971 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
972 	MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
973 	MLX5_IB_DBG_CC_MAX,
974 };
975 
976 struct mlx5_ib_dbg_cc_params {
977 	struct dentry			*root;
978 	struct mlx5_ib_dbg_param	params[MLX5_IB_DBG_CC_MAX];
979 };
980 
981 enum {
982 	MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
983 };
984 
985 struct mlx5_ib_delay_drop {
986 	struct mlx5_ib_dev     *dev;
987 	struct work_struct	delay_drop_work;
988 	/* serialize setting of delay drop */
989 	struct mutex		lock;
990 	u32			timeout;
991 	bool			activate;
992 	atomic_t		events_cnt;
993 	atomic_t		rqs_cnt;
994 	struct dentry		*dir_debugfs;
995 };
996 
997 enum mlx5_ib_stages {
998 	MLX5_IB_STAGE_INIT,
999 	MLX5_IB_STAGE_FS,
1000 	MLX5_IB_STAGE_CAPS,
1001 	MLX5_IB_STAGE_NON_DEFAULT_CB,
1002 	MLX5_IB_STAGE_ROCE,
1003 	MLX5_IB_STAGE_QP,
1004 	MLX5_IB_STAGE_SRQ,
1005 	MLX5_IB_STAGE_DEVICE_RESOURCES,
1006 	MLX5_IB_STAGE_ODP,
1007 	MLX5_IB_STAGE_COUNTERS,
1008 	MLX5_IB_STAGE_CONG_DEBUGFS,
1009 	MLX5_IB_STAGE_BFREG,
1010 	MLX5_IB_STAGE_PRE_IB_REG_UMR,
1011 	MLX5_IB_STAGE_WHITELIST_UID,
1012 	MLX5_IB_STAGE_IB_REG,
1013 	MLX5_IB_STAGE_DEVICE_NOTIFIER,
1014 	MLX5_IB_STAGE_POST_IB_REG_UMR,
1015 	MLX5_IB_STAGE_DELAY_DROP,
1016 	MLX5_IB_STAGE_RESTRACK,
1017 	MLX5_IB_STAGE_MAX,
1018 };
1019 
1020 struct mlx5_ib_stage {
1021 	int (*init)(struct mlx5_ib_dev *dev);
1022 	void (*cleanup)(struct mlx5_ib_dev *dev);
1023 };
1024 
1025 #define STAGE_CREATE(_stage, _init, _cleanup) \
1026 	.stage[_stage] = {.init = _init, .cleanup = _cleanup}
1027 
1028 struct mlx5_ib_profile {
1029 	struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
1030 };
1031 
1032 struct mlx5_ib_multiport_info {
1033 	struct list_head list;
1034 	struct mlx5_ib_dev *ibdev;
1035 	struct mlx5_core_dev *mdev;
1036 	struct notifier_block mdev_events;
1037 	struct completion unref_comp;
1038 	u64 sys_image_guid;
1039 	u32 mdev_refcnt;
1040 	bool is_master;
1041 	bool unaffiliate;
1042 };
1043 
1044 struct mlx5_ib_flow_action {
1045 	struct ib_flow_action		ib_action;
1046 	union {
1047 		struct {
1048 			u64			    ib_flags;
1049 			struct mlx5_accel_esp_xfrm *ctx;
1050 		} esp_aes_gcm;
1051 		struct {
1052 			struct mlx5_ib_dev *dev;
1053 			u32 sub_type;
1054 			union {
1055 				struct mlx5_modify_hdr *modify_hdr;
1056 				struct mlx5_pkt_reformat *pkt_reformat;
1057 			};
1058 		} flow_action_raw;
1059 	};
1060 };
1061 
1062 struct mlx5_dm {
1063 	struct mlx5_core_dev *dev;
1064 	/* This lock is used to protect the access to the shared
1065 	 * allocation map when concurrent requests by different
1066 	 * processes are handled.
1067 	 */
1068 	spinlock_t lock;
1069 	DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
1070 };
1071 
1072 struct mlx5_read_counters_attr {
1073 	struct mlx5_fc *hw_cntrs_hndl;
1074 	u64 *out;
1075 	u32 flags;
1076 };
1077 
1078 enum mlx5_ib_counters_type {
1079 	MLX5_IB_COUNTERS_FLOW,
1080 };
1081 
1082 struct mlx5_ib_mcounters {
1083 	struct ib_counters ibcntrs;
1084 	enum mlx5_ib_counters_type type;
1085 	/* number of counters supported for this counters type */
1086 	u32 counters_num;
1087 	struct mlx5_fc *hw_cntrs_hndl;
1088 	/* read function for this counters type */
1089 	int (*read_counters)(struct ib_device *ibdev,
1090 			     struct mlx5_read_counters_attr *read_attr);
1091 	/* max index set as part of create_flow */
1092 	u32 cntrs_max_index;
1093 	/* number of counters data entries (<description,index> pair) */
1094 	u32 ncounters;
1095 	/* counters data array for descriptions and indexes */
1096 	struct mlx5_ib_flow_counters_desc *counters_data;
1097 	/* protects access to mcounters internal data */
1098 	struct mutex mcntrs_mutex;
1099 };
1100 
1101 static inline struct mlx5_ib_mcounters *
to_mcounters(struct ib_counters * ibcntrs)1102 to_mcounters(struct ib_counters *ibcntrs)
1103 {
1104 	return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
1105 }
1106 
1107 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
1108 			   bool is_egress,
1109 			   struct mlx5_flow_act *action);
1110 struct mlx5_ib_lb_state {
1111 	/* protect the user_td */
1112 	struct mutex		mutex;
1113 	u32			user_td;
1114 	int			qps;
1115 	bool			enabled;
1116 };
1117 
1118 struct mlx5_ib_pf_eq {
1119 	struct notifier_block irq_nb;
1120 	struct mlx5_ib_dev *dev;
1121 	struct mlx5_eq *core;
1122 	struct work_struct work;
1123 	spinlock_t lock; /* Pagefaults spinlock */
1124 	struct workqueue_struct *wq;
1125 	mempool_t *pool;
1126 };
1127 
1128 struct mlx5_devx_event_table {
1129 	struct mlx5_nb devx_nb;
1130 	/* serialize updating the event_xa */
1131 	struct mutex event_xa_lock;
1132 	struct xarray event_xa;
1133 };
1134 
1135 struct mlx5_var_table {
1136 	/* serialize updating the bitmap */
1137 	struct mutex bitmap_lock;
1138 	unsigned long *bitmap;
1139 	u64 hw_start_addr;
1140 	u32 stride_size;
1141 	u64 num_var_hw_entries;
1142 };
1143 
1144 struct mlx5_port_caps {
1145 	bool has_smi;
1146 	u8 ext_port_cap;
1147 };
1148 
1149 
1150 struct mlx5_special_mkeys {
1151 	u32 dump_fill_mkey;
1152 	__be32 null_mkey;
1153 	__be32 terminate_scatter_list_mkey;
1154 };
1155 
1156 struct mlx5_macsec {
1157 	struct mutex lock; /* Protects mlx5_macsec internal contexts */
1158 	struct list_head macsec_devices_list;
1159 	struct notifier_block blocking_events_nb;
1160 };
1161 
1162 struct mlx5_ib_dev {
1163 	struct ib_device		ib_dev;
1164 	struct mlx5_core_dev		*mdev;
1165 	struct mlx5_data_direct_dev	*data_direct_dev;
1166 	/* protect accessing data_direct_dev */
1167 	struct mutex			data_direct_lock;
1168 	struct notifier_block		mdev_events;
1169 	struct notifier_block           lag_events;
1170 	int				num_ports;
1171 	/* serialize update of capability mask
1172 	 */
1173 	struct mutex			cap_mask_mutex;
1174 	u8				ib_active:1;
1175 	u8				is_rep:1;
1176 	u8				lag_active:1;
1177 	u8				fill_delay;
1178 	struct umr_common		umrc;
1179 	/* sync used page count stats
1180 	 */
1181 	struct mlx5_ib_resources	devr;
1182 
1183 	atomic_t			mkey_var;
1184 	struct mlx5_mkey_cache		cache;
1185 	struct timer_list		delay_timer;
1186 	/* Prevents soft lock on massive reg MRs */
1187 	struct mutex			slow_path_mutex;
1188 	struct ib_odp_caps	odp_caps;
1189 	u64			odp_max_size;
1190 	struct mutex		odp_eq_mutex;
1191 	struct mlx5_ib_pf_eq	odp_pf_eq;
1192 
1193 	struct xarray		odp_mkeys;
1194 
1195 	struct mlx5_ib_flow_db	*flow_db;
1196 	/* protect resources needed as part of reset flow */
1197 	spinlock_t		reset_flow_resource_lock;
1198 	struct list_head	qp_list;
1199 	struct list_head data_direct_mr_list;
1200 	/* Array with num_ports elements */
1201 	struct mlx5_ib_port	*port;
1202 	struct mlx5_sq_bfreg	bfreg;
1203 	struct mlx5_sq_bfreg	fp_bfreg;
1204 	struct mlx5_ib_delay_drop	delay_drop;
1205 	const struct mlx5_ib_profile	*profile;
1206 
1207 	struct mlx5_ib_lb_state		lb;
1208 	u8			umr_fence;
1209 	struct list_head	ib_dev_list;
1210 	u64			sys_image_guid;
1211 	struct mlx5_dm		dm;
1212 	u16			devx_whitelist_uid;
1213 	struct mlx5_srq_table   srq_table;
1214 	struct mlx5_qp_table    qp_table;
1215 	struct mlx5_async_ctx   async_ctx;
1216 	struct mlx5_devx_event_table devx_event_table;
1217 	struct mlx5_var_table var_table;
1218 
1219 	struct xarray sig_mrs;
1220 	struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
1221 	u16 pkey_table_len;
1222 	u8 lag_ports;
1223 	struct mlx5_special_mkeys mkeys;
1224 	struct mlx5_data_direct_resources ddr;
1225 
1226 #ifdef CONFIG_MLX5_MACSEC
1227 	struct mlx5_macsec macsec;
1228 #endif
1229 
1230 	u8 num_plane;
1231 	struct mlx5_ib_dev *smi_dev;
1232 	const char *sub_dev_name;
1233 };
1234 
to_mibcq(struct mlx5_core_cq * mcq)1235 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1236 {
1237 	return container_of(mcq, struct mlx5_ib_cq, mcq);
1238 }
1239 
to_mxrcd(struct ib_xrcd * ibxrcd)1240 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1241 {
1242 	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1243 }
1244 
to_mdev(struct ib_device * ibdev)1245 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1246 {
1247 	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1248 }
1249 
mr_to_mdev(struct mlx5_ib_mr * mr)1250 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
1251 {
1252 	return to_mdev(mr->ibmr.device);
1253 }
1254 
mlx5_udata_to_mdev(struct ib_udata * udata)1255 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1256 {
1257 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1258 		udata, struct mlx5_ib_ucontext, ibucontext);
1259 
1260 	return to_mdev(context->ibucontext.device);
1261 }
1262 
to_mcq(struct ib_cq * ibcq)1263 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1264 {
1265 	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1266 }
1267 
to_mibqp(struct mlx5_core_qp * mqp)1268 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1269 {
1270 	return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1271 }
1272 
to_mibrwq(struct mlx5_core_qp * core_qp)1273 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1274 {
1275 	return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1276 }
1277 
to_mpd(struct ib_pd * ibpd)1278 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1279 {
1280 	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1281 }
1282 
to_msrq(struct ib_srq * ibsrq)1283 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1284 {
1285 	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1286 }
1287 
to_mqp(struct ib_qp * ibqp)1288 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1289 {
1290 	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1291 }
1292 
to_mrwq(struct ib_wq * ibwq)1293 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1294 {
1295 	return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1296 }
1297 
to_mrwq_ind_table(struct ib_rwq_ind_table * ib_rwq_ind_tbl)1298 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1299 {
1300 	return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1301 }
1302 
to_mibsrq(struct mlx5_core_srq * msrq)1303 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1304 {
1305 	return container_of(msrq, struct mlx5_ib_srq, msrq);
1306 }
1307 
to_mmr(struct ib_mr * ibmr)1308 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1309 {
1310 	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1311 }
1312 
to_mmw(struct ib_mw * ibmw)1313 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1314 {
1315 	return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1316 }
1317 
1318 static inline struct mlx5_ib_flow_action *
to_mflow_act(struct ib_flow_action * ibact)1319 to_mflow_act(struct ib_flow_action *ibact)
1320 {
1321 	return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1322 }
1323 
1324 static inline struct mlx5_user_mmap_entry *
to_mmmap(struct rdma_user_mmap_entry * rdma_entry)1325 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1326 {
1327 	return container_of(rdma_entry,
1328 		struct mlx5_user_mmap_entry, rdma_entry);
1329 }
1330 
1331 int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev);
1332 int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev);
1333 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1334 			struct mlx5_db *db);
1335 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1336 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1337 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1338 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1339 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1340 		      struct ib_udata *udata);
1341 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
mlx5_ib_destroy_ah(struct ib_ah * ah,u32 flags)1342 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1343 {
1344 	return 0;
1345 }
1346 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1347 		       struct ib_udata *udata);
1348 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1349 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1350 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1351 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1352 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1353 			  const struct ib_recv_wr **bad_wr);
1354 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1355 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1356 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1357 		      struct ib_udata *udata);
1358 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1359 		      int attr_mask, struct ib_udata *udata);
1360 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1361 		     struct ib_qp_init_attr *qp_init_attr);
1362 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1363 void mlx5_ib_drain_sq(struct ib_qp *qp);
1364 void mlx5_ib_drain_rq(struct ib_qp *qp);
1365 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1366 			size_t buflen, size_t *bc);
1367 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1368 			size_t buflen, size_t *bc);
1369 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1370 			 size_t buflen, size_t *bc);
1371 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1372 		      struct uverbs_attr_bundle *attrs);
1373 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1374 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1375 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1376 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1377 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1378 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1379 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1380 				  u64 virt_addr, int access_flags,
1381 				  struct ib_udata *udata);
1382 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
1383 					 u64 length, u64 virt_addr,
1384 					 int fd, int access_flags,
1385 					 struct uverbs_attr_bundle *attrs);
1386 int mlx5_ib_advise_mr(struct ib_pd *pd,
1387 		      enum ib_uverbs_advise_mr_advice advice,
1388 		      u32 flags,
1389 		      struct ib_sge *sg_list,
1390 		      u32 num_sge,
1391 		      struct uverbs_attr_bundle *attrs);
1392 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1393 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1394 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1395 					     int access_flags);
1396 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
1397 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1398 				    u64 length, u64 virt_addr, int access_flags,
1399 				    struct ib_pd *pd, struct ib_udata *udata);
1400 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1401 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1402 			       u32 max_num_sg);
1403 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1404 					 u32 max_num_sg,
1405 					 u32 max_num_meta_sg);
1406 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1407 		      unsigned int *sg_offset);
1408 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1409 			 int data_sg_nents, unsigned int *data_sg_offset,
1410 			 struct scatterlist *meta_sg, int meta_sg_nents,
1411 			 unsigned int *meta_sg_offset);
1412 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
1413 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1414 			const struct ib_mad *in, struct ib_mad *out,
1415 			size_t *out_mad_size, u16 *out_mad_pkey_index);
1416 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1417 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1418 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
1419 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1420 					 __be64 *sys_image_guid);
1421 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1422 				 u16 *max_pkeys);
1423 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1424 				 u32 *vendor_id);
1425 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1426 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1427 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
1428 			    u16 *pkey);
1429 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
1430 			    union ib_gid *gid);
1431 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
1432 			    struct ib_port_attr *props);
1433 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1434 		       struct ib_port_attr *props);
1435 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
1436 			  u64 access_flags);
1437 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1438 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
1439 void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
1440 struct mlx5_cache_ent *
1441 mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
1442 			      struct mlx5r_cache_rb_key rb_key,
1443 			      bool persistent_entry);
1444 
1445 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1446 				       int access_flags, int access_mode,
1447 				       int ndescs);
1448 
1449 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1450 			    struct ib_mr_status *mr_status);
1451 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1452 				struct ib_wq_init_attr *init_attr,
1453 				struct ib_udata *udata);
1454 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1455 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1456 		      u32 wq_attr_mask, struct ib_udata *udata);
1457 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1458 				 struct ib_rwq_ind_table_init_attr *init_attr,
1459 				 struct ib_udata *udata);
1460 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1461 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1462 				struct ib_dm_mr_attr *attr,
1463 				struct uverbs_attr_bundle *attrs);
1464 void mlx5_ib_data_direct_bind(struct mlx5_ib_dev *ibdev,
1465 			      struct mlx5_data_direct_dev *dev);
1466 void mlx5_ib_data_direct_unbind(struct mlx5_ib_dev *ibdev);
1467 void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev);
1468 
1469 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1470 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1471 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
1472 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1473 int __init mlx5_ib_odp_init(void);
1474 void mlx5_ib_odp_cleanup(void);
1475 int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
1476 int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1477 			  struct mlx5_ib_mr *mr, int flags);
1478 
1479 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1480 			       enum ib_uverbs_advise_mr_advice advice,
1481 			       u32 flags, struct ib_sge *sg_list, u32 num_sge);
1482 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
1483 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
1484 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
mlx5_ib_odp_init_one(struct mlx5_ib_dev * ibdev)1485 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
mlx5r_odp_create_eq(struct mlx5_ib_dev * dev,struct mlx5_ib_pf_eq * eq)1486 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
1487 				      struct mlx5_ib_pf_eq *eq)
1488 {
1489 	return 0;
1490 }
mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev * ibdev)1491 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
mlx5_ib_odp_init(void)1492 static inline int mlx5_ib_odp_init(void) { return 0; }
mlx5_ib_odp_cleanup(void)1493 static inline void mlx5_ib_odp_cleanup(void)				    {}
mlx5_odp_init_mkey_cache(struct mlx5_ib_dev * dev)1494 static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
1495 {
1496 	return 0;
1497 }
mlx5_odp_populate_xlt(void * xlt,size_t idx,size_t nentries,struct mlx5_ib_mr * mr,int flags)1498 static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1499 					struct mlx5_ib_mr *mr, int flags)
1500 {
1501 	return -EOPNOTSUPP;
1502 }
1503 
1504 static inline int
mlx5_ib_advise_mr_prefetch(struct ib_pd * pd,enum ib_uverbs_advise_mr_advice advice,u32 flags,struct ib_sge * sg_list,u32 num_sge)1505 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1506 			   enum ib_uverbs_advise_mr_advice advice, u32 flags,
1507 			   struct ib_sge *sg_list, u32 num_sge)
1508 {
1509 	return -EOPNOTSUPP;
1510 }
mlx5_ib_init_odp_mr(struct mlx5_ib_mr * mr)1511 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
1512 {
1513 	return -EOPNOTSUPP;
1514 }
mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr * mr)1515 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
1516 {
1517 	return -EOPNOTSUPP;
1518 }
1519 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1520 
1521 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1522 
1523 /* Needed for rep profile */
1524 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1525 		      const struct mlx5_ib_profile *profile,
1526 		      int stage);
1527 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
1528 		  const struct mlx5_ib_profile *profile);
1529 
1530 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1531 			  u32 port, struct ifla_vf_info *info);
1532 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1533 			      u32 port, int state);
1534 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1535 			 u32 port, struct ifla_vf_stats *stats);
1536 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
1537 			struct ifla_vf_guid *node_guid,
1538 			struct ifla_vf_guid *port_guid);
1539 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
1540 			u64 guid, int type);
1541 
1542 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1543 				   const struct ib_gid_attr *attr);
1544 
1545 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1546 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1547 
1548 /* GSI QP helper functions */
1549 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1550 		       struct ib_qp_init_attr *attr);
1551 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1552 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1553 			  int attr_mask);
1554 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1555 			 int qp_attr_mask,
1556 			 struct ib_qp_init_attr *qp_init_attr);
1557 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1558 			  const struct ib_send_wr **bad_wr);
1559 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1560 			  const struct ib_recv_wr **bad_wr);
1561 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1562 
1563 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1564 
1565 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1566 			int bfregn);
1567 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1568 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1569 						   u32 ib_port_num,
1570 						   u32 *native_port_num);
1571 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1572 				  u32 port_num);
1573 
1574 extern const struct uapi_definition mlx5_ib_devx_defs[];
1575 extern const struct uapi_definition mlx5_ib_flow_defs[];
1576 extern const struct uapi_definition mlx5_ib_qos_defs[];
1577 extern const struct uapi_definition mlx5_ib_std_types_defs[];
1578 extern const struct uapi_definition mlx5_ib_create_cq_defs[];
1579 
is_qp1(enum ib_qp_type qp_type)1580 static inline int is_qp1(enum ib_qp_type qp_type)
1581 {
1582 	return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1583 }
1584 
check_cq_create_flags(u32 flags)1585 static inline u32 check_cq_create_flags(u32 flags)
1586 {
1587 	/*
1588 	 * It returns non-zero value for unsupported CQ
1589 	 * create flags, otherwise it returns zero.
1590 	 */
1591 	return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1592 			  IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1593 }
1594 
verify_assign_uidx(u8 cqe_version,u32 cmd_uidx,u32 * user_index)1595 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1596 				     u32 *user_index)
1597 {
1598 	if (cqe_version) {
1599 		if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1600 		    (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1601 			return -EINVAL;
1602 		*user_index = cmd_uidx;
1603 	} else {
1604 		*user_index = MLX5_IB_DEFAULT_UIDX;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
get_qp_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_qp * ucmd,int inlen,u32 * user_index)1610 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1611 				    struct mlx5_ib_create_qp *ucmd,
1612 				    int inlen,
1613 				    u32 *user_index)
1614 {
1615 	u8 cqe_version = ucontext->cqe_version;
1616 
1617 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1618 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1619 		return 0;
1620 
1621 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1622 		return -EINVAL;
1623 
1624 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1625 }
1626 
get_srq_user_index(struct mlx5_ib_ucontext * ucontext,struct mlx5_ib_create_srq * ucmd,int inlen,u32 * user_index)1627 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1628 				     struct mlx5_ib_create_srq *ucmd,
1629 				     int inlen,
1630 				     u32 *user_index)
1631 {
1632 	u8 cqe_version = ucontext->cqe_version;
1633 
1634 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1635 	    (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1636 		return 0;
1637 
1638 	if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1639 		return -EINVAL;
1640 
1641 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1642 }
1643 
get_uars_per_sys_page(struct mlx5_ib_dev * dev,bool lib_support)1644 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1645 {
1646 	return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1647 				MLX5_UARS_IN_PAGE : 1;
1648 }
1649 
1650 extern void *xlt_emergency_page;
1651 
1652 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1653 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
1654 			bool dyn_bfreg);
1655 
mlx5r_store_odp_mkey(struct mlx5_ib_dev * dev,struct mlx5_ib_mkey * mmkey)1656 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
1657 				       struct mlx5_ib_mkey *mmkey)
1658 {
1659 	refcount_set(&mmkey->usecount, 1);
1660 
1661 	return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
1662 			       mmkey, GFP_KERNEL));
1663 }
1664 
1665 /* deref an mkey that can participate in ODP flow */
mlx5r_deref_odp_mkey(struct mlx5_ib_mkey * mmkey)1666 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
1667 {
1668 	if (refcount_dec_and_test(&mmkey->usecount))
1669 		wake_up(&mmkey->wait);
1670 }
1671 
1672 /* deref an mkey that can participate in ODP flow and wait for relese */
mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey * mmkey)1673 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
1674 {
1675 	mlx5r_deref_odp_mkey(mmkey);
1676 	wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
1677 }
1678 
mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev * dev)1679 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1680 {
1681 	/*
1682 	 * If the driver is in hash mode and the port_select_flow_table_bypass cap
1683 	 * is supported, it means that the driver no longer needs to assign the port
1684 	 * affinity by default. If a user wants to set the port affinity explicitly,
1685 	 * the user has a dedicated API to do that, so there is no need to assign
1686 	 * the port affinity by default.
1687 	 */
1688 	if (dev->lag_active &&
1689 	    mlx5_lag_mode_is_hash(dev->mdev) &&
1690 	    MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
1691 		return 0;
1692 
1693 	if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
1694 		return 0;
1695 
1696 	return dev->lag_active ||
1697 		(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1698 		 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1699 }
1700 
rt_supported(int ts_cap)1701 static inline bool rt_supported(int ts_cap)
1702 {
1703 	return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
1704 	       ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1705 }
1706 
1707 /*
1708  * PCI Peer to Peer is a trainwreck. If no switch is present then things
1709  * sometimes work, depending on the pci_distance_p2p logic for excluding broken
1710  * root complexes. However if a switch is present in the path, then things get
1711  * really ugly depending on how the switch is setup. This table assumes that the
1712  * root complex is strict and is validating that all req/reps are matches
1713  * perfectly - so any scenario where it sees only half the transaction is a
1714  * failure.
1715  *
1716  * CR/RR/DT  ATS RO P2P
1717  * 00X       X   X  OK
1718  * 010       X   X  fails (request is routed to root but root never sees comp)
1719  * 011       0   X  fails (request is routed to root but root never sees comp)
1720  * 011       1   X  OK
1721  * 10X       X   1  OK
1722  * 101       X   0  fails (completion is routed to root but root didn't see req)
1723  * 110       X   0  SLOW
1724  * 111       0   0  SLOW
1725  * 111       1   0  fails (completion is routed to root but root didn't see req)
1726  * 111       1   1  OK
1727  *
1728  * Unfortunately we cannot reliably know if a switch is present or what the
1729  * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
1730  * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
1731  *
1732  * For now assume if the umem is a dma_buf then it is P2P.
1733  */
mlx5_umem_needs_ats(struct mlx5_ib_dev * dev,struct ib_umem * umem,int access_flags)1734 static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
1735 				       struct ib_umem *umem, int access_flags)
1736 {
1737 	if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
1738 		return false;
1739 	return access_flags & IB_ACCESS_RELAXED_ORDERING;
1740 }
1741 
1742 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
1743 		  unsigned int index, const union ib_gid *gid,
1744 		  const struct ib_gid_attr *attr);
1745 
smi_to_native_portnum(struct mlx5_ib_dev * dev,u32 port)1746 static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
1747 {
1748 	return (port - 1) / dev->num_ports + 1;
1749 }
1750 
1751 /*
1752  * For mkc users, instead of a page_offset the command has a start_iova which
1753  * specifies both the page_offset and the on-the-wire IOVA
1754  */
1755 static __always_inline unsigned long
mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev * dev,struct ib_umem * umem,u64 iova)1756 mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1757 			     u64 iova)
1758 {
1759 	int page_size_bits =
1760 		MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
1761 	unsigned long bitmap =
1762 		__mlx5_log_page_size_to_bitmap(page_size_bits, 0);
1763 
1764 	return ib_umem_find_best_pgsz(umem, bitmap, iova);
1765 }
1766 
1767 #endif /* MLX5_IB_H */
1768