xref: /linux/drivers/infiniband/hw/mlx4/mlx4_ib.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef MLX4_IB_H
35 #define MLX4_IB_H
36 
37 #include <linux/compiler.h>
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/idr.h>
41 
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_umem.h>
44 #include <rdma/ib_mad.h>
45 #include <rdma/ib_sa.h>
46 
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/doorbell.h>
49 
50 #define MLX4_IB_DRV_NAME	"mlx4_ib"
51 
52 #ifdef pr_fmt
53 #undef pr_fmt
54 #endif
55 #define pr_fmt(fmt)	"<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
56 
57 #define mlx4_ib_warn(ibdev, format, arg...) \
58 	dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
59 
60 enum {
61 	MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
62 	MLX4_IB_MAX_HEADROOM	 = 2048
63 };
64 
65 #define MLX4_IB_SQ_HEADROOM(shift)	((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
66 #define MLX4_IB_SQ_MAX_SPARE		(MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
67 
68 /*module param to indicate if SM assigns the alias_GUID*/
69 extern int mlx4_ib_sm_guid_assign;
70 
71 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
72 #define MLX4_IB_UC_MAX_NUM_QPS     256
73 struct mlx4_ib_ucontext {
74 	struct ib_ucontext	ibucontext;
75 	struct mlx4_uar		uar;
76 	struct list_head	db_page_list;
77 	struct mutex		db_page_mutex;
78 };
79 
80 struct mlx4_ib_pd {
81 	struct ib_pd		ibpd;
82 	u32			pdn;
83 };
84 
85 struct mlx4_ib_xrcd {
86 	struct ib_xrcd		ibxrcd;
87 	u32			xrcdn;
88 	struct ib_pd	       *pd;
89 	struct ib_cq	       *cq;
90 };
91 
92 struct mlx4_ib_cq_buf {
93 	struct mlx4_buf		buf;
94 	struct mlx4_mtt		mtt;
95 	int			entry_size;
96 };
97 
98 struct mlx4_ib_cq_resize {
99 	struct mlx4_ib_cq_buf	buf;
100 	int			cqe;
101 };
102 
103 struct mlx4_ib_cq {
104 	struct ib_cq		ibcq;
105 	struct mlx4_cq		mcq;
106 	struct mlx4_ib_cq_buf	buf;
107 	struct mlx4_ib_cq_resize *resize_buf;
108 	struct mlx4_db		db;
109 	spinlock_t		lock;
110 	struct mutex		resize_mutex;
111 	struct ib_umem	       *umem;
112 	struct ib_umem	       *resize_umem;
113 	int			create_flags;
114 	/* List of qps that it serves.*/
115 	struct list_head		send_qp_list;
116 	struct list_head		recv_qp_list;
117 };
118 
119 struct mlx4_ib_mr {
120 	struct ib_mr		ibmr;
121 	struct mlx4_mr		mmr;
122 	struct ib_umem	       *umem;
123 };
124 
125 struct mlx4_ib_mw {
126 	struct ib_mw		ibmw;
127 	struct mlx4_mw		mmw;
128 };
129 
130 struct mlx4_ib_fast_reg_page_list {
131 	struct ib_fast_reg_page_list	ibfrpl;
132 	__be64			       *mapped_page_list;
133 	dma_addr_t			map;
134 };
135 
136 struct mlx4_ib_fmr {
137 	struct ib_fmr           ibfmr;
138 	struct mlx4_fmr         mfmr;
139 };
140 
141 #define MAX_REGS_PER_FLOW 2
142 
143 struct mlx4_flow_reg_id {
144 	u64 id;
145 	u64 mirror;
146 };
147 
148 struct mlx4_ib_flow {
149 	struct ib_flow ibflow;
150 	/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
151 	struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
152 };
153 
154 struct mlx4_ib_wq {
155 	u64		       *wrid;
156 	spinlock_t		lock;
157 	int			wqe_cnt;
158 	int			max_post;
159 	int			max_gs;
160 	int			offset;
161 	int			wqe_shift;
162 	unsigned		head;
163 	unsigned		tail;
164 };
165 
166 enum mlx4_ib_qp_flags {
167 	MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
168 	MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
169 	MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
170 	MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
171 	MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
172 	MLX4_IB_SRIOV_SQP = 1 << 31,
173 };
174 
175 struct mlx4_ib_gid_entry {
176 	struct list_head	list;
177 	union ib_gid		gid;
178 	int			added;
179 	u8			port;
180 };
181 
182 enum mlx4_ib_qp_type {
183 	/*
184 	 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
185 	 * here (and in that order) since the MAD layer uses them as
186 	 * indices into a 2-entry table.
187 	 */
188 	MLX4_IB_QPT_SMI = IB_QPT_SMI,
189 	MLX4_IB_QPT_GSI = IB_QPT_GSI,
190 
191 	MLX4_IB_QPT_RC = IB_QPT_RC,
192 	MLX4_IB_QPT_UC = IB_QPT_UC,
193 	MLX4_IB_QPT_UD = IB_QPT_UD,
194 	MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
195 	MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
196 	MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
197 	MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
198 	MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
199 
200 	MLX4_IB_QPT_PROXY_SMI_OWNER	= 1 << 16,
201 	MLX4_IB_QPT_PROXY_SMI		= 1 << 17,
202 	MLX4_IB_QPT_PROXY_GSI		= 1 << 18,
203 	MLX4_IB_QPT_TUN_SMI_OWNER	= 1 << 19,
204 	MLX4_IB_QPT_TUN_SMI		= 1 << 20,
205 	MLX4_IB_QPT_TUN_GSI		= 1 << 21,
206 };
207 
208 #define MLX4_IB_QPT_ANY_SRIOV	(MLX4_IB_QPT_PROXY_SMI_OWNER | \
209 	MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
210 	MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
211 
212 enum mlx4_ib_mad_ifc_flags {
213 	MLX4_MAD_IFC_IGNORE_MKEY	= 1,
214 	MLX4_MAD_IFC_IGNORE_BKEY	= 2,
215 	MLX4_MAD_IFC_IGNORE_KEYS	= (MLX4_MAD_IFC_IGNORE_MKEY |
216 					   MLX4_MAD_IFC_IGNORE_BKEY),
217 	MLX4_MAD_IFC_NET_VIEW		= 4,
218 };
219 
220 enum {
221 	MLX4_NUM_TUNNEL_BUFS		= 256,
222 };
223 
224 struct mlx4_ib_tunnel_header {
225 	struct mlx4_av av;
226 	__be32 remote_qpn;
227 	__be32 qkey;
228 	__be16 vlan;
229 	u8 mac[6];
230 	__be16 pkey_index;
231 	u8 reserved[6];
232 };
233 
234 struct mlx4_ib_buf {
235 	void *addr;
236 	dma_addr_t map;
237 };
238 
239 struct mlx4_rcv_tunnel_hdr {
240 	__be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
241 			      * 0x0 - no vlan was in the packet
242 			      * 0x01 - C-VLAN was in the packet */
243 	u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
244 	u8 reserved;
245 	__be16 pkey_index;
246 	__be16 sl_vid;
247 	__be16 slid_mac_47_32;
248 	__be32 mac_31_0;
249 };
250 
251 struct mlx4_ib_proxy_sqp_hdr {
252 	struct ib_grh grh;
253 	struct mlx4_rcv_tunnel_hdr tun;
254 }  __packed;
255 
256 struct mlx4_roce_smac_vlan_info {
257 	u64 smac;
258 	int smac_index;
259 	int smac_port;
260 	u64 candidate_smac;
261 	int candidate_smac_index;
262 	int candidate_smac_port;
263 	u16 vid;
264 	int vlan_index;
265 	int vlan_port;
266 	u16 candidate_vid;
267 	int candidate_vlan_index;
268 	int candidate_vlan_port;
269 	int update_vid;
270 };
271 
272 struct mlx4_ib_qp {
273 	struct ib_qp		ibqp;
274 	struct mlx4_qp		mqp;
275 	struct mlx4_buf		buf;
276 
277 	struct mlx4_db		db;
278 	struct mlx4_ib_wq	rq;
279 
280 	u32			doorbell_qpn;
281 	__be32			sq_signal_bits;
282 	unsigned		sq_next_wqe;
283 	int			sq_max_wqes_per_wr;
284 	int			sq_spare_wqes;
285 	struct mlx4_ib_wq	sq;
286 
287 	enum mlx4_ib_qp_type	mlx4_ib_qp_type;
288 	struct ib_umem	       *umem;
289 	struct mlx4_mtt		mtt;
290 	int			buf_size;
291 	struct mutex		mutex;
292 	u16			xrcdn;
293 	u32			flags;
294 	u8			port;
295 	u8			alt_port;
296 	u8			atomic_rd_en;
297 	u8			resp_depth;
298 	u8			sq_no_prefetch;
299 	u8			state;
300 	int			mlx_type;
301 	struct list_head	gid_list;
302 	struct list_head	steering_rules;
303 	struct mlx4_ib_buf	*sqp_proxy_rcv;
304 	struct mlx4_roce_smac_vlan_info pri;
305 	struct mlx4_roce_smac_vlan_info alt;
306 	u64			reg_id;
307 	struct list_head	qps_list;
308 	struct list_head	cq_recv_list;
309 	struct list_head	cq_send_list;
310 };
311 
312 struct mlx4_ib_srq {
313 	struct ib_srq		ibsrq;
314 	struct mlx4_srq		msrq;
315 	struct mlx4_buf		buf;
316 	struct mlx4_db		db;
317 	u64		       *wrid;
318 	spinlock_t		lock;
319 	int			head;
320 	int			tail;
321 	u16			wqe_ctr;
322 	struct ib_umem	       *umem;
323 	struct mlx4_mtt		mtt;
324 	struct mutex		mutex;
325 };
326 
327 struct mlx4_ib_ah {
328 	struct ib_ah		ibah;
329 	union mlx4_ext_av       av;
330 };
331 
332 /****************************************/
333 /* alias guid support */
334 /****************************************/
335 #define NUM_PORT_ALIAS_GUID		2
336 #define NUM_ALIAS_GUID_IN_REC		8
337 #define NUM_ALIAS_GUID_REC_IN_PORT	16
338 #define GUID_REC_SIZE			8
339 #define NUM_ALIAS_GUID_PER_PORT		128
340 #define MLX4_NOT_SET_GUID		(0x00LL)
341 #define MLX4_GUID_FOR_DELETE_VAL	(~(0x00LL))
342 
343 enum mlx4_guid_alias_rec_status {
344 	MLX4_GUID_INFO_STATUS_IDLE,
345 	MLX4_GUID_INFO_STATUS_SET,
346 };
347 
348 #define GUID_STATE_NEED_PORT_INIT 0x01
349 
350 enum mlx4_guid_alias_rec_method {
351 	MLX4_GUID_INFO_RECORD_SET	= IB_MGMT_METHOD_SET,
352 	MLX4_GUID_INFO_RECORD_DELETE	= IB_SA_METHOD_DELETE,
353 };
354 
355 struct mlx4_sriov_alias_guid_info_rec_det {
356 	u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
357 	ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
358 	enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
359 	unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
360 	u64 time_to_run;
361 };
362 
363 struct mlx4_sriov_alias_guid_port_rec_det {
364 	struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
365 	struct workqueue_struct *wq;
366 	struct delayed_work alias_guid_work;
367 	u8 port;
368 	u32 state_flags;
369 	struct mlx4_sriov_alias_guid *parent;
370 	struct list_head cb_list;
371 };
372 
373 struct mlx4_sriov_alias_guid {
374 	struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
375 	spinlock_t ag_work_lock;
376 	struct ib_sa_client *sa_client;
377 };
378 
379 struct mlx4_ib_demux_work {
380 	struct work_struct	work;
381 	struct mlx4_ib_dev     *dev;
382 	int			slave;
383 	int			do_init;
384 	u8			port;
385 
386 };
387 
388 struct mlx4_ib_tun_tx_buf {
389 	struct mlx4_ib_buf buf;
390 	struct ib_ah *ah;
391 };
392 
393 struct mlx4_ib_demux_pv_qp {
394 	struct ib_qp *qp;
395 	enum ib_qp_type proxy_qpt;
396 	struct mlx4_ib_buf *ring;
397 	struct mlx4_ib_tun_tx_buf *tx_ring;
398 	spinlock_t tx_lock;
399 	unsigned tx_ix_head;
400 	unsigned tx_ix_tail;
401 };
402 
403 enum mlx4_ib_demux_pv_state {
404 	DEMUX_PV_STATE_DOWN,
405 	DEMUX_PV_STATE_STARTING,
406 	DEMUX_PV_STATE_ACTIVE,
407 	DEMUX_PV_STATE_DOWNING,
408 };
409 
410 struct mlx4_ib_demux_pv_ctx {
411 	int port;
412 	int slave;
413 	enum mlx4_ib_demux_pv_state state;
414 	int has_smi;
415 	struct ib_device *ib_dev;
416 	struct ib_cq *cq;
417 	struct ib_pd *pd;
418 	struct ib_mr *mr;
419 	struct work_struct work;
420 	struct workqueue_struct *wq;
421 	struct mlx4_ib_demux_pv_qp qp[2];
422 };
423 
424 struct mlx4_ib_demux_ctx {
425 	struct ib_device *ib_dev;
426 	int port;
427 	struct workqueue_struct *wq;
428 	struct workqueue_struct *ud_wq;
429 	spinlock_t ud_lock;
430 	__be64 subnet_prefix;
431 	__be64 guid_cache[128];
432 	struct mlx4_ib_dev *dev;
433 	/* the following lock protects both mcg_table and mcg_mgid0_list */
434 	struct mutex		mcg_table_lock;
435 	struct rb_root		mcg_table;
436 	struct list_head	mcg_mgid0_list;
437 	struct workqueue_struct	*mcg_wq;
438 	struct mlx4_ib_demux_pv_ctx **tun;
439 	atomic_t tid;
440 	int    flushing; /* flushing the work queue */
441 };
442 
443 struct mlx4_ib_sriov {
444 	struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
445 	struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
446 	/* when using this spinlock you should use "irq" because
447 	 * it may be called from interrupt context.*/
448 	spinlock_t going_down_lock;
449 	int is_going_down;
450 
451 	struct mlx4_sriov_alias_guid alias_guid;
452 
453 	/* CM paravirtualization fields */
454 	struct list_head cm_list;
455 	spinlock_t id_map_lock;
456 	struct rb_root sl_id_map;
457 	struct idr pv_id_table;
458 };
459 
460 struct mlx4_ib_iboe {
461 	spinlock_t		lock;
462 	struct net_device      *netdevs[MLX4_MAX_PORTS];
463 	struct net_device      *masters[MLX4_MAX_PORTS];
464 	atomic64_t		mac[MLX4_MAX_PORTS];
465 	struct notifier_block 	nb;
466 	struct notifier_block	nb_inet;
467 	struct notifier_block	nb_inet6;
468 	union ib_gid		gid_table[MLX4_MAX_PORTS][128];
469 };
470 
471 struct pkey_mgt {
472 	u8			virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
473 	u16			phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
474 	struct list_head	pkey_port_list[MLX4_MFUNC_MAX];
475 	struct kobject	       *device_parent[MLX4_MFUNC_MAX];
476 };
477 
478 struct mlx4_ib_iov_sysfs_attr {
479 	void *ctx;
480 	struct kobject *kobj;
481 	unsigned long data;
482 	u32 entry_num;
483 	char name[15];
484 	struct device_attribute dentry;
485 	struct device *dev;
486 };
487 
488 struct mlx4_ib_iov_sysfs_attr_ar {
489 	struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
490 };
491 
492 struct mlx4_ib_iov_port {
493 	char name[100];
494 	u8 num;
495 	struct mlx4_ib_dev *dev;
496 	struct list_head list;
497 	struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
498 	struct ib_port_attr attr;
499 	struct kobject	*cur_port;
500 	struct kobject	*admin_alias_parent;
501 	struct kobject	*gids_parent;
502 	struct kobject	*pkeys_parent;
503 	struct kobject	*mcgs_parent;
504 	struct mlx4_ib_iov_sysfs_attr mcg_dentry;
505 };
506 
507 struct counter_index {
508 	u32		index;
509 	u8		allocated;
510 };
511 
512 struct mlx4_ib_dev {
513 	struct ib_device	ib_dev;
514 	struct mlx4_dev	       *dev;
515 	int			num_ports;
516 	void __iomem	       *uar_map;
517 
518 	struct mlx4_uar		priv_uar;
519 	u32			priv_pdn;
520 	MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
521 
522 	struct ib_mad_agent    *send_agent[MLX4_MAX_PORTS][2];
523 	struct ib_ah	       *sm_ah[MLX4_MAX_PORTS];
524 	spinlock_t		sm_lock;
525 	struct mlx4_ib_sriov	sriov;
526 
527 	struct mutex		cap_mask_mutex;
528 	bool			ib_active;
529 	struct mlx4_ib_iboe	iboe;
530 	struct counter_index    counters[MLX4_MAX_PORTS];
531 	int		       *eq_table;
532 	struct kobject	       *iov_parent;
533 	struct kobject	       *ports_parent;
534 	struct kobject	       *dev_ports_parent[MLX4_MFUNC_MAX];
535 	struct mlx4_ib_iov_port	iov_ports[MLX4_MAX_PORTS];
536 	struct pkey_mgt		pkeys;
537 	unsigned long *ib_uc_qpns_bitmap;
538 	int steer_qpn_count;
539 	int steer_qpn_base;
540 	int steering_support;
541 	struct mlx4_ib_qp      *qp1_proxy[MLX4_MAX_PORTS];
542 	/* lock when destroying qp1_proxy and getting netdev events */
543 	struct mutex		qp1_proxy_lock[MLX4_MAX_PORTS];
544 	u8			bond_next_port;
545 	/* protect resources needed as part of reset flow */
546 	spinlock_t		reset_flow_resource_lock;
547 	struct list_head		qp_list;
548 };
549 
550 struct ib_event_work {
551 	struct work_struct	work;
552 	struct mlx4_ib_dev	*ib_dev;
553 	struct mlx4_eqe		ib_eqe;
554 };
555 
556 struct mlx4_ib_qp_tunnel_init_attr {
557 	struct ib_qp_init_attr init_attr;
558 	int slave;
559 	enum ib_qp_type proxy_qp_type;
560 	u8 port;
561 };
562 
563 struct mlx4_uverbs_ex_query_device {
564 	__u32 comp_mask;
565 	__u32 reserved;
566 };
567 
568 enum query_device_resp_mask {
569 	QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
570 };
571 
572 struct mlx4_uverbs_ex_query_device_resp {
573 	__u32 comp_mask;
574 	__u32 response_length;
575 	__u64 hca_core_clock_offset;
576 };
577 
578 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
579 {
580 	return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
581 }
582 
583 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
584 {
585 	return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
586 }
587 
588 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
589 {
590 	return container_of(ibpd, struct mlx4_ib_pd, ibpd);
591 }
592 
593 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
594 {
595 	return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
596 }
597 
598 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
599 {
600 	return container_of(ibcq, struct mlx4_ib_cq, ibcq);
601 }
602 
603 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
604 {
605 	return container_of(mcq, struct mlx4_ib_cq, mcq);
606 }
607 
608 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
609 {
610 	return container_of(ibmr, struct mlx4_ib_mr, ibmr);
611 }
612 
613 static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
614 {
615 	return container_of(ibmw, struct mlx4_ib_mw, ibmw);
616 }
617 
618 static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
619 {
620 	return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
621 }
622 
623 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
624 {
625 	return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
626 }
627 
628 static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
629 {
630 	return container_of(ibflow, struct mlx4_ib_flow, ibflow);
631 }
632 
633 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
634 {
635 	return container_of(ibqp, struct mlx4_ib_qp, ibqp);
636 }
637 
638 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
639 {
640 	return container_of(mqp, struct mlx4_ib_qp, mqp);
641 }
642 
643 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
644 {
645 	return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
646 }
647 
648 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
649 {
650 	return container_of(msrq, struct mlx4_ib_srq, msrq);
651 }
652 
653 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
654 {
655 	return container_of(ibah, struct mlx4_ib_ah, ibah);
656 }
657 
658 static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
659 {
660 	dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
661 
662 	return dev->bond_next_port + 1;
663 }
664 
665 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
666 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
667 
668 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
669 			struct mlx4_db *db);
670 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
671 
672 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
673 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
674 			   struct ib_umem *umem);
675 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
676 				  u64 virt_addr, int access_flags,
677 				  struct ib_udata *udata);
678 int mlx4_ib_dereg_mr(struct ib_mr *mr);
679 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
680 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
681 		    struct ib_mw_bind *mw_bind);
682 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
683 struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
684 					int max_page_list_len);
685 struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
686 							       int page_list_len);
687 void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
688 
689 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
690 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
691 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
692 				const struct ib_cq_init_attr *attr,
693 				struct ib_ucontext *context,
694 				struct ib_udata *udata);
695 int mlx4_ib_destroy_cq(struct ib_cq *cq);
696 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
697 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
698 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
699 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
700 
701 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
702 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
703 int mlx4_ib_destroy_ah(struct ib_ah *ah);
704 
705 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
706 				  struct ib_srq_init_attr *init_attr,
707 				  struct ib_udata *udata);
708 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
709 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
710 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
711 int mlx4_ib_destroy_srq(struct ib_srq *srq);
712 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
713 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
714 			  struct ib_recv_wr **bad_wr);
715 
716 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
717 				struct ib_qp_init_attr *init_attr,
718 				struct ib_udata *udata);
719 int mlx4_ib_destroy_qp(struct ib_qp *qp);
720 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
721 		      int attr_mask, struct ib_udata *udata);
722 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
723 		     struct ib_qp_init_attr *qp_init_attr);
724 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
725 		      struct ib_send_wr **bad_wr);
726 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
727 		      struct ib_recv_wr **bad_wr);
728 
729 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
730 		 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
731 		 const void *in_mad, void *response_mad);
732 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,	u8 port_num,
733 			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
734 			const struct ib_mad_hdr *in, size_t in_mad_size,
735 			struct ib_mad_hdr *out, size_t *out_mad_size,
736 			u16 *out_mad_pkey_index);
737 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
738 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
739 
740 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
741 				  struct ib_fmr_attr *fmr_attr);
742 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
743 			 u64 iova);
744 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
745 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
746 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
747 			 struct ib_port_attr *props, int netw_view);
748 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
749 			 u16 *pkey, int netw_view);
750 
751 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
752 			union ib_gid *gid, int netw_view);
753 
754 static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
755 {
756 	u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
757 
758 	if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
759 		return true;
760 
761 	return !!(ah->av.ib.g_slid & 0x80);
762 }
763 
764 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
765 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
766 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
767 int mlx4_ib_mcg_init(void);
768 void mlx4_ib_mcg_destroy(void);
769 
770 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
771 
772 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
773 				  struct ib_sa_mad *sa_mad);
774 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
775 			      struct ib_sa_mad *mad);
776 
777 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
778 		   union ib_gid *gid);
779 
780 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
781 			    enum ib_event_type type);
782 
783 void mlx4_ib_tunnels_update_work(struct work_struct *work);
784 
785 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
786 			  enum ib_qp_type qpt, struct ib_wc *wc,
787 			  struct ib_grh *grh, struct ib_mad *mad);
788 
789 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
790 			 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
791 			 u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
792 			 struct ib_mad *mad);
793 
794 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
795 
796 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
797 		struct ib_mad *mad);
798 
799 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
800 		struct ib_mad *mad);
801 
802 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
803 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
804 
805 /* alias guid support */
806 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
807 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
808 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
809 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
810 
811 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
812 					  int block_num,
813 					  u8 port_num, u8 *p_data);
814 
815 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
816 					 int block_num, u8 port_num,
817 					 u8 *p_data);
818 
819 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
820 			    struct attribute *attr);
821 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
822 			     struct attribute *attr);
823 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
824 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
825 				    int port, int slave_init);
826 
827 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
828 
829 void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
830 
831 __be64 mlx4_ib_gen_node_guid(void);
832 
833 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
834 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
835 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
836 			 int is_attach);
837 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
838 			  u64 start, u64 length, u64 virt_addr,
839 			  int mr_access_flags, struct ib_pd *pd,
840 			  struct ib_udata *udata);
841 
842 #endif /* MLX4_IB_H */
843