xref: /linux/drivers/infiniband/hw/mlx5/mlx5_ib.h (revision c1aac62f36c1e37ee81c9e09ee9ee733eef05dcb)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_IB_H
34 #define MLX5_IB_H
35 
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <rdma/ib_verbs.h>
39 #include <rdma/ib_smi.h>
40 #include <linux/mlx5/driver.h>
41 #include <linux/mlx5/cq.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/srq.h>
44 #include <linux/types.h>
45 #include <linux/mlx5/transobj.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/mlx5-abi.h>
48 
49 #define mlx5_ib_dbg(dev, format, arg...)				\
50 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
51 	 __LINE__, current->pid, ##arg)
52 
53 #define mlx5_ib_err(dev, format, arg...)				\
54 pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
55 	__LINE__, current->pid, ##arg)
56 
57 #define mlx5_ib_warn(dev, format, arg...)				\
58 pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
59 	__LINE__, current->pid, ##arg)
60 
61 #define field_avail(type, fld, sz) (offsetof(type, fld) +		\
62 				    sizeof(((type *)0)->fld) <= (sz))
63 #define MLX5_IB_DEFAULT_UIDX 0xffffff
64 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
65 
66 #define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
67 
68 enum {
69 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
70 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
71 };
72 
73 enum mlx5_ib_mmap_cmd {
74 	MLX5_IB_MMAP_REGULAR_PAGE		= 0,
75 	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES	= 1,
76 	MLX5_IB_MMAP_WC_PAGE			= 2,
77 	MLX5_IB_MMAP_NC_PAGE			= 3,
78 	/* 5 is chosen in order to be compatible with old versions of libmlx5 */
79 	MLX5_IB_MMAP_CORE_CLOCK			= 5,
80 };
81 
82 enum {
83 	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
84 	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
85 	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
86 	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
87 };
88 
89 enum mlx5_ib_latency_class {
90 	MLX5_IB_LATENCY_CLASS_LOW,
91 	MLX5_IB_LATENCY_CLASS_MEDIUM,
92 	MLX5_IB_LATENCY_CLASS_HIGH,
93 };
94 
95 enum mlx5_ib_mad_ifc_flags {
96 	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
97 	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
98 	MLX5_MAD_IFC_NET_VIEW		= 4,
99 };
100 
101 enum {
102 	MLX5_CROSS_CHANNEL_BFREG         = 0,
103 };
104 
105 enum {
106 	MLX5_CQE_VERSION_V0,
107 	MLX5_CQE_VERSION_V1,
108 };
109 
110 struct mlx5_ib_vma_private_data {
111 	struct list_head list;
112 	struct vm_area_struct *vma;
113 };
114 
115 struct mlx5_ib_ucontext {
116 	struct ib_ucontext	ibucontext;
117 	struct list_head	db_page_list;
118 
119 	/* protect doorbell record alloc/free
120 	 */
121 	struct mutex		db_page_mutex;
122 	struct mlx5_bfreg_info	bfregi;
123 	u8			cqe_version;
124 	/* Transport Domain number */
125 	u32			tdn;
126 	struct list_head	vma_private_list;
127 
128 	unsigned long		upd_xlt_page;
129 	/* protect ODP/KSM */
130 	struct mutex		upd_xlt_page_mutex;
131 	u64			lib_caps;
132 };
133 
134 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
135 {
136 	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
137 }
138 
139 struct mlx5_ib_pd {
140 	struct ib_pd		ibpd;
141 	u32			pdn;
142 };
143 
144 #define MLX5_IB_FLOW_MCAST_PRIO		(MLX5_BY_PASS_NUM_PRIOS - 1)
145 #define MLX5_IB_FLOW_LAST_PRIO		(MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
146 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
147 #error "Invalid number of bypass priorities"
148 #endif
149 #define MLX5_IB_FLOW_LEFTOVERS_PRIO	(MLX5_IB_FLOW_MCAST_PRIO + 1)
150 
151 #define MLX5_IB_NUM_FLOW_FT		(MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
152 #define MLX5_IB_NUM_SNIFFER_FTS		2
153 struct mlx5_ib_flow_prio {
154 	struct mlx5_flow_table		*flow_table;
155 	unsigned int			refcount;
156 };
157 
158 struct mlx5_ib_flow_handler {
159 	struct list_head		list;
160 	struct ib_flow			ibflow;
161 	struct mlx5_ib_flow_prio	*prio;
162 	struct mlx5_flow_handle		*rule;
163 };
164 
165 struct mlx5_ib_flow_db {
166 	struct mlx5_ib_flow_prio	prios[MLX5_IB_NUM_FLOW_FT];
167 	struct mlx5_ib_flow_prio	sniffer[MLX5_IB_NUM_SNIFFER_FTS];
168 	struct mlx5_flow_table		*lag_demux_ft;
169 	/* Protect flow steering bypass flow tables
170 	 * when add/del flow rules.
171 	 * only single add/removal of flow steering rule could be done
172 	 * simultaneously.
173 	 */
174 	struct mutex			lock;
175 };
176 
177 /* Use macros here so that don't have to duplicate
178  * enum ib_send_flags and enum ib_qp_type for low-level driver
179  */
180 
181 #define MLX5_IB_SEND_UMR_ENABLE_MR	       (IB_SEND_RESERVED_START << 0)
182 #define MLX5_IB_SEND_UMR_DISABLE_MR	       (IB_SEND_RESERVED_START << 1)
183 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE	       (IB_SEND_RESERVED_START << 2)
184 #define MLX5_IB_SEND_UMR_UPDATE_XLT	       (IB_SEND_RESERVED_START << 3)
185 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION    (IB_SEND_RESERVED_START << 4)
186 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS       IB_SEND_RESERVED_END
187 
188 #define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
189 /*
190  * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
191  * creates the actual hardware QP.
192  */
193 #define MLX5_IB_QPT_HW_GSI	IB_QPT_RESERVED2
194 #define MLX5_IB_WR_UMR		IB_WR_RESERVED1
195 
196 #define MLX5_IB_UMR_OCTOWORD	       16
197 #define MLX5_IB_UMR_XLT_ALIGNMENT      64
198 
199 #define MLX5_IB_UPD_XLT_ZAP	      BIT(0)
200 #define MLX5_IB_UPD_XLT_ENABLE	      BIT(1)
201 #define MLX5_IB_UPD_XLT_ATOMIC	      BIT(2)
202 #define MLX5_IB_UPD_XLT_ADDR	      BIT(3)
203 #define MLX5_IB_UPD_XLT_PD	      BIT(4)
204 #define MLX5_IB_UPD_XLT_ACCESS	      BIT(5)
205 
206 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
207  *
208  * These flags are intended for internal use by the mlx5_ib driver, and they
209  * rely on the range reserved for that use in the ib_qp_create_flags enum.
210  */
211 
212 /* Create a UD QP whose source QP number is 1 */
213 static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
214 {
215 	return IB_QP_CREATE_RESERVED_START;
216 }
217 
218 struct wr_list {
219 	u16	opcode;
220 	u16	next;
221 };
222 
223 struct mlx5_ib_wq {
224 	u64		       *wrid;
225 	u32		       *wr_data;
226 	struct wr_list	       *w_list;
227 	unsigned	       *wqe_head;
228 	u16		        unsig_count;
229 
230 	/* serialize post to the work queue
231 	 */
232 	spinlock_t		lock;
233 	int			wqe_cnt;
234 	int			max_post;
235 	int			max_gs;
236 	int			offset;
237 	int			wqe_shift;
238 	unsigned		head;
239 	unsigned		tail;
240 	u16			cur_post;
241 	u16			last_poll;
242 	void		       *qend;
243 };
244 
245 struct mlx5_ib_rwq {
246 	struct ib_wq		ibwq;
247 	struct mlx5_core_qp	core_qp;
248 	u32			rq_num_pas;
249 	u32			log_rq_stride;
250 	u32			log_rq_size;
251 	u32			rq_page_offset;
252 	u32			log_page_size;
253 	struct ib_umem		*umem;
254 	size_t			buf_size;
255 	unsigned int		page_shift;
256 	int			create_type;
257 	struct mlx5_db		db;
258 	u32			user_index;
259 	u32			wqe_count;
260 	u32			wqe_shift;
261 	int			wq_sig;
262 };
263 
264 enum {
265 	MLX5_QP_USER,
266 	MLX5_QP_KERNEL,
267 	MLX5_QP_EMPTY
268 };
269 
270 enum {
271 	MLX5_WQ_USER,
272 	MLX5_WQ_KERNEL
273 };
274 
275 struct mlx5_ib_rwq_ind_table {
276 	struct ib_rwq_ind_table ib_rwq_ind_tbl;
277 	u32			rqtn;
278 };
279 
280 struct mlx5_ib_ubuffer {
281 	struct ib_umem	       *umem;
282 	int			buf_size;
283 	u64			buf_addr;
284 };
285 
286 struct mlx5_ib_qp_base {
287 	struct mlx5_ib_qp	*container_mibqp;
288 	struct mlx5_core_qp	mqp;
289 	struct mlx5_ib_ubuffer	ubuffer;
290 };
291 
292 struct mlx5_ib_qp_trans {
293 	struct mlx5_ib_qp_base	base;
294 	u16			xrcdn;
295 	u8			alt_port;
296 	u8			atomic_rd_en;
297 	u8			resp_depth;
298 };
299 
300 struct mlx5_ib_rss_qp {
301 	u32	tirn;
302 };
303 
304 struct mlx5_ib_rq {
305 	struct mlx5_ib_qp_base base;
306 	struct mlx5_ib_wq	*rq;
307 	struct mlx5_ib_ubuffer	ubuffer;
308 	struct mlx5_db		*doorbell;
309 	u32			tirn;
310 	u8			state;
311 };
312 
313 struct mlx5_ib_sq {
314 	struct mlx5_ib_qp_base base;
315 	struct mlx5_ib_wq	*sq;
316 	struct mlx5_ib_ubuffer  ubuffer;
317 	struct mlx5_db		*doorbell;
318 	u32			tisn;
319 	u8			state;
320 };
321 
322 struct mlx5_ib_raw_packet_qp {
323 	struct mlx5_ib_sq sq;
324 	struct mlx5_ib_rq rq;
325 };
326 
327 struct mlx5_bf {
328 	int			buf_size;
329 	unsigned long		offset;
330 	struct mlx5_sq_bfreg   *bfreg;
331 };
332 
333 struct mlx5_ib_qp {
334 	struct ib_qp		ibqp;
335 	union {
336 		struct mlx5_ib_qp_trans trans_qp;
337 		struct mlx5_ib_raw_packet_qp raw_packet_qp;
338 		struct mlx5_ib_rss_qp rss_qp;
339 	};
340 	struct mlx5_buf		buf;
341 
342 	struct mlx5_db		db;
343 	struct mlx5_ib_wq	rq;
344 
345 	u8			sq_signal_bits;
346 	u8			fm_cache;
347 	struct mlx5_ib_wq	sq;
348 
349 	/* serialize qp state modifications
350 	 */
351 	struct mutex		mutex;
352 	u32			flags;
353 	u8			port;
354 	u8			state;
355 	int			wq_sig;
356 	int			scat_cqe;
357 	int			max_inline_data;
358 	struct mlx5_bf	        bf;
359 	int			has_rq;
360 
361 	/* only for user space QPs. For kernel
362 	 * we have it from the bf object
363 	 */
364 	int			bfregn;
365 
366 	int			create_type;
367 
368 	/* Store signature errors */
369 	bool			signature_en;
370 
371 	struct list_head	qps_list;
372 	struct list_head	cq_recv_list;
373 	struct list_head	cq_send_list;
374 	u32			rate_limit;
375 };
376 
377 struct mlx5_ib_cq_buf {
378 	struct mlx5_buf		buf;
379 	struct ib_umem		*umem;
380 	int			cqe_size;
381 	int			nent;
382 };
383 
384 enum mlx5_ib_qp_flags {
385 	MLX5_IB_QP_LSO                          = IB_QP_CREATE_IPOIB_UD_LSO,
386 	MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
387 	MLX5_IB_QP_CROSS_CHANNEL            = IB_QP_CREATE_CROSS_CHANNEL,
388 	MLX5_IB_QP_MANAGED_SEND             = IB_QP_CREATE_MANAGED_SEND,
389 	MLX5_IB_QP_MANAGED_RECV             = IB_QP_CREATE_MANAGED_RECV,
390 	MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 5,
391 	/* QP uses 1 as its source QP number */
392 	MLX5_IB_QP_SQPN_QP1			= 1 << 6,
393 	MLX5_IB_QP_CAP_SCATTER_FCS		= 1 << 7,
394 	MLX5_IB_QP_RSS				= 1 << 8,
395 };
396 
397 struct mlx5_umr_wr {
398 	struct ib_send_wr		wr;
399 	u64				virt_addr;
400 	u64				offset;
401 	struct ib_pd		       *pd;
402 	unsigned int			page_shift;
403 	unsigned int			xlt_size;
404 	u64				length;
405 	int				access_flags;
406 	u32				mkey;
407 };
408 
409 static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
410 {
411 	return container_of(wr, struct mlx5_umr_wr, wr);
412 }
413 
414 struct mlx5_shared_mr_info {
415 	int mr_id;
416 	struct ib_umem		*umem;
417 };
418 
419 struct mlx5_ib_cq {
420 	struct ib_cq		ibcq;
421 	struct mlx5_core_cq	mcq;
422 	struct mlx5_ib_cq_buf	buf;
423 	struct mlx5_db		db;
424 
425 	/* serialize access to the CQ
426 	 */
427 	spinlock_t		lock;
428 
429 	/* protect resize cq
430 	 */
431 	struct mutex		resize_mutex;
432 	struct mlx5_ib_cq_buf  *resize_buf;
433 	struct ib_umem	       *resize_umem;
434 	int			cqe_size;
435 	struct list_head	list_send_qp;
436 	struct list_head	list_recv_qp;
437 	u32			create_flags;
438 	struct list_head	wc_list;
439 	enum ib_cq_notify_flags notify_flags;
440 	struct work_struct	notify_work;
441 };
442 
443 struct mlx5_ib_wc {
444 	struct ib_wc wc;
445 	struct list_head list;
446 };
447 
448 struct mlx5_ib_srq {
449 	struct ib_srq		ibsrq;
450 	struct mlx5_core_srq	msrq;
451 	struct mlx5_buf		buf;
452 	struct mlx5_db		db;
453 	u64		       *wrid;
454 	/* protect SRQ hanlding
455 	 */
456 	spinlock_t		lock;
457 	int			head;
458 	int			tail;
459 	u16			wqe_ctr;
460 	struct ib_umem	       *umem;
461 	/* serialize arming a SRQ
462 	 */
463 	struct mutex		mutex;
464 	int			wq_sig;
465 };
466 
467 struct mlx5_ib_xrcd {
468 	struct ib_xrcd		ibxrcd;
469 	u32			xrcdn;
470 };
471 
472 enum mlx5_ib_mtt_access_flags {
473 	MLX5_IB_MTT_READ  = (1 << 0),
474 	MLX5_IB_MTT_WRITE = (1 << 1),
475 };
476 
477 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
478 
479 struct mlx5_ib_mr {
480 	struct ib_mr		ibmr;
481 	void			*descs;
482 	dma_addr_t		desc_map;
483 	int			ndescs;
484 	int			max_descs;
485 	int			desc_size;
486 	int			access_mode;
487 	struct mlx5_core_mkey	mmkey;
488 	struct ib_umem	       *umem;
489 	struct mlx5_shared_mr_info	*smr_info;
490 	struct list_head	list;
491 	int			order;
492 	int			umred;
493 	int			npages;
494 	struct mlx5_ib_dev     *dev;
495 	u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
496 	struct mlx5_core_sig_ctx    *sig;
497 	int			live;
498 	void			*descs_alloc;
499 	int			access_flags; /* Needed for rereg MR */
500 };
501 
502 struct mlx5_ib_mw {
503 	struct ib_mw		ibmw;
504 	struct mlx5_core_mkey	mmkey;
505 };
506 
507 struct mlx5_ib_umr_context {
508 	struct ib_cqe		cqe;
509 	enum ib_wc_status	status;
510 	struct completion	done;
511 };
512 
513 struct umr_common {
514 	struct ib_pd	*pd;
515 	struct ib_cq	*cq;
516 	struct ib_qp	*qp;
517 	/* control access to UMR QP
518 	 */
519 	struct semaphore	sem;
520 };
521 
522 enum {
523 	MLX5_FMR_INVALID,
524 	MLX5_FMR_VALID,
525 	MLX5_FMR_BUSY,
526 };
527 
528 struct mlx5_cache_ent {
529 	struct list_head	head;
530 	/* sync access to the cahce entry
531 	 */
532 	spinlock_t		lock;
533 
534 
535 	struct dentry	       *dir;
536 	char                    name[4];
537 	u32                     order;
538 	u32			size;
539 	u32                     cur;
540 	u32                     miss;
541 	u32			limit;
542 
543 	struct dentry          *fsize;
544 	struct dentry          *fcur;
545 	struct dentry          *fmiss;
546 	struct dentry          *flimit;
547 
548 	struct mlx5_ib_dev     *dev;
549 	struct work_struct	work;
550 	struct delayed_work	dwork;
551 	int			pending;
552 };
553 
554 struct mlx5_mr_cache {
555 	struct workqueue_struct *wq;
556 	struct mlx5_cache_ent	ent[MAX_MR_CACHE_ENTRIES];
557 	int			stopped;
558 	struct dentry		*root;
559 	unsigned long		last_add;
560 };
561 
562 struct mlx5_ib_gsi_qp;
563 
564 struct mlx5_ib_port_resources {
565 	struct mlx5_ib_resources *devr;
566 	struct mlx5_ib_gsi_qp *gsi;
567 	struct work_struct pkey_change_work;
568 };
569 
570 struct mlx5_ib_resources {
571 	struct ib_cq	*c0;
572 	struct ib_xrcd	*x0;
573 	struct ib_xrcd	*x1;
574 	struct ib_pd	*p0;
575 	struct ib_srq	*s0;
576 	struct ib_srq	*s1;
577 	struct mlx5_ib_port_resources ports[2];
578 	/* Protects changes to the port resources */
579 	struct mutex	mutex;
580 };
581 
582 struct mlx5_ib_port {
583 	u16 q_cnt_id;
584 };
585 
586 struct mlx5_roce {
587 	/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
588 	 * netdev pointer
589 	 */
590 	rwlock_t		netdev_lock;
591 	struct net_device	*netdev;
592 	struct notifier_block	nb;
593 	atomic_t		next_port;
594 };
595 
596 struct mlx5_ib_dev {
597 	struct ib_device		ib_dev;
598 	struct mlx5_core_dev		*mdev;
599 	struct mlx5_roce		roce;
600 	int				num_ports;
601 	/* serialize update of capability mask
602 	 */
603 	struct mutex			cap_mask_mutex;
604 	bool				ib_active;
605 	struct umr_common		umrc;
606 	/* sync used page count stats
607 	 */
608 	struct mlx5_ib_resources	devr;
609 	struct mlx5_mr_cache		cache;
610 	struct timer_list		delay_timer;
611 	/* Prevents soft lock on massive reg MRs */
612 	struct mutex			slow_path_mutex;
613 	int				fill_delay;
614 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
615 	struct ib_odp_caps	odp_caps;
616 	u64			odp_max_size;
617 	/*
618 	 * Sleepable RCU that prevents destruction of MRs while they are still
619 	 * being used by a page fault handler.
620 	 */
621 	struct srcu_struct      mr_srcu;
622 #endif
623 	struct mlx5_ib_flow_db	flow_db;
624 	/* protect resources needed as part of reset flow */
625 	spinlock_t		reset_flow_resource_lock;
626 	struct list_head	qp_list;
627 	/* Array with num_ports elements */
628 	struct mlx5_ib_port	*port;
629 	struct mlx5_sq_bfreg     bfreg;
630 	struct mlx5_sq_bfreg     fp_bfreg;
631 };
632 
633 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
634 {
635 	return container_of(mcq, struct mlx5_ib_cq, mcq);
636 }
637 
638 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
639 {
640 	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
641 }
642 
643 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
644 {
645 	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
646 }
647 
648 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
649 {
650 	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
651 }
652 
653 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
654 {
655 	return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
656 }
657 
658 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
659 {
660 	return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
661 }
662 
663 static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
664 {
665 	return container_of(mmkey, struct mlx5_ib_mr, mmkey);
666 }
667 
668 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
669 {
670 	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
671 }
672 
673 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
674 {
675 	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
676 }
677 
678 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
679 {
680 	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
681 }
682 
683 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
684 {
685 	return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
686 }
687 
688 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
689 {
690 	return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
691 }
692 
693 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
694 {
695 	return container_of(msrq, struct mlx5_ib_srq, msrq);
696 }
697 
698 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
699 {
700 	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
701 }
702 
703 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
704 {
705 	return container_of(ibmw, struct mlx5_ib_mw, ibmw);
706 }
707 
708 struct mlx5_ib_ah {
709 	struct ib_ah		ibah;
710 	struct mlx5_av		av;
711 };
712 
713 static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
714 {
715 	return container_of(ibah, struct mlx5_ib_ah, ibah);
716 }
717 
718 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
719 			struct mlx5_db *db);
720 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
721 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
722 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
723 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
724 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
725 		 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
726 		 const void *in_mad, void *response_mad);
727 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
728 				struct ib_udata *udata);
729 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
730 int mlx5_ib_destroy_ah(struct ib_ah *ah);
731 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
732 				  struct ib_srq_init_attr *init_attr,
733 				  struct ib_udata *udata);
734 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
735 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
736 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
737 int mlx5_ib_destroy_srq(struct ib_srq *srq);
738 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
739 			  struct ib_recv_wr **bad_wr);
740 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
741 				struct ib_qp_init_attr *init_attr,
742 				struct ib_udata *udata);
743 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
744 		      int attr_mask, struct ib_udata *udata);
745 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
746 		     struct ib_qp_init_attr *qp_init_attr);
747 int mlx5_ib_destroy_qp(struct ib_qp *qp);
748 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
749 		      struct ib_send_wr **bad_wr);
750 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
751 		      struct ib_recv_wr **bad_wr);
752 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
753 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
754 			  void *buffer, u32 length,
755 			  struct mlx5_ib_qp_base *base);
756 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
757 				const struct ib_cq_init_attr *attr,
758 				struct ib_ucontext *context,
759 				struct ib_udata *udata);
760 int mlx5_ib_destroy_cq(struct ib_cq *cq);
761 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
762 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
763 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
764 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
765 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
766 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
767 				  u64 virt_addr, int access_flags,
768 				  struct ib_udata *udata);
769 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
770 			       struct ib_udata *udata);
771 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
772 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
773 		       int page_shift, int flags);
774 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
775 			  u64 length, u64 virt_addr, int access_flags,
776 			  struct ib_pd *pd, struct ib_udata *udata);
777 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
778 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
779 			       enum ib_mr_type mr_type,
780 			       u32 max_num_sg);
781 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
782 		      unsigned int *sg_offset);
783 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
784 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
785 			const struct ib_mad_hdr *in, size_t in_mad_size,
786 			struct ib_mad_hdr *out, size_t *out_mad_size,
787 			u16 *out_mad_pkey_index);
788 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
789 					  struct ib_ucontext *context,
790 					  struct ib_udata *udata);
791 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
792 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
793 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
794 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
795 					  struct ib_smp *out_mad);
796 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
797 					 __be64 *sys_image_guid);
798 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
799 				 u16 *max_pkeys);
800 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
801 				 u32 *vendor_id);
802 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
803 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
804 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
805 			    u16 *pkey);
806 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
807 			    union ib_gid *gid);
808 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
809 			    struct ib_port_attr *props);
810 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
811 		       struct ib_port_attr *props);
812 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
813 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
814 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
815 			unsigned long max_page_shift,
816 			int *count, int *shift,
817 			int *ncont, int *order);
818 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
819 			    int page_shift, size_t offset, size_t num_pages,
820 			    __be64 *pas, int access_flags);
821 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
822 			  int page_shift, __be64 *pas, int access_flags);
823 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
824 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
825 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
826 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
827 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
828 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
829 			    struct ib_mr_status *mr_status);
830 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
831 				struct ib_wq_init_attr *init_attr,
832 				struct ib_udata *udata);
833 int mlx5_ib_destroy_wq(struct ib_wq *wq);
834 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
835 		      u32 wq_attr_mask, struct ib_udata *udata);
836 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
837 						      struct ib_rwq_ind_table_init_attr *init_attr,
838 						      struct ib_udata *udata);
839 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
840 
841 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
842 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
843 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
844 		    struct mlx5_pagefault *pfault);
845 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
846 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
847 int __init mlx5_ib_odp_init(void);
848 void mlx5_ib_odp_cleanup(void);
849 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
850 			      unsigned long end);
851 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
852 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
853 {
854 	return;
855 }
856 
857 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
858 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)	{}
859 static inline int mlx5_ib_odp_init(void) { return 0; }
860 static inline void mlx5_ib_odp_cleanup(void)				{}
861 
862 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
863 
864 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
865 			  u8 port, struct ifla_vf_info *info);
866 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
867 			      u8 port, int state);
868 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
869 			 u8 port, struct ifla_vf_stats *stats);
870 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
871 			u64 guid, int type);
872 
873 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
874 			       int index);
875 
876 /* GSI QP helper functions */
877 struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
878 				    struct ib_qp_init_attr *init_attr);
879 int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
880 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
881 			  int attr_mask);
882 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
883 			 int qp_attr_mask,
884 			 struct ib_qp_init_attr *qp_init_attr);
885 int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
886 			  struct ib_send_wr **bad_wr);
887 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
888 			  struct ib_recv_wr **bad_wr);
889 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
890 
891 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
892 
893 static inline void init_query_mad(struct ib_smp *mad)
894 {
895 	mad->base_version  = 1;
896 	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
897 	mad->class_version = 1;
898 	mad->method	   = IB_MGMT_METHOD_GET;
899 }
900 
901 static inline u8 convert_access(int acc)
902 {
903 	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
904 	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
905 	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
906 	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
907 	       MLX5_PERM_LOCAL_READ;
908 }
909 
910 static inline int is_qp1(enum ib_qp_type qp_type)
911 {
912 	return qp_type == MLX5_IB_QPT_HW_GSI;
913 }
914 
915 #define MLX5_MAX_UMR_SHIFT 16
916 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
917 
918 static inline u32 check_cq_create_flags(u32 flags)
919 {
920 	/*
921 	 * It returns non-zero value for unsupported CQ
922 	 * create flags, otherwise it returns zero.
923 	 */
924 	return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
925 			  IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
926 }
927 
928 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
929 				     u32 *user_index)
930 {
931 	if (cqe_version) {
932 		if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
933 		    (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
934 			return -EINVAL;
935 		*user_index = cmd_uidx;
936 	} else {
937 		*user_index = MLX5_IB_DEFAULT_UIDX;
938 	}
939 
940 	return 0;
941 }
942 
943 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
944 				    struct mlx5_ib_create_qp *ucmd,
945 				    int inlen,
946 				    u32 *user_index)
947 {
948 	u8 cqe_version = ucontext->cqe_version;
949 
950 	if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
951 	    !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
952 		return 0;
953 
954 	if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
955 	       !!cqe_version))
956 		return -EINVAL;
957 
958 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
959 }
960 
961 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
962 				     struct mlx5_ib_create_srq *ucmd,
963 				     int inlen,
964 				     u32 *user_index)
965 {
966 	u8 cqe_version = ucontext->cqe_version;
967 
968 	if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
969 	    !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
970 		return 0;
971 
972 	if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
973 	       !!cqe_version))
974 		return -EINVAL;
975 
976 	return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
977 }
978 
979 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
980 {
981 	return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
982 				MLX5_UARS_IN_PAGE : 1;
983 }
984 
985 static inline int get_num_uars(struct mlx5_ib_dev *dev,
986 			       struct mlx5_bfreg_info *bfregi)
987 {
988 	return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
989 }
990 
991 #endif /* MLX5_IB_H */
992