xref: /freebsd/sys/dev/qlnx/qlnxr/qlnxr_def.h (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 /*
31  * File: qlnxr_def.h
32  * Author: David C Somayajulu
33  */
34 
35 #ifndef __QLNX_DEF_H_
36 #define __QLNX_DEF_H_
37 
38 #include <sys/ktr.h>
39 
40 #include <linux/list.h>
41 #include <linux/spinlock.h>
42 #include <linux/idr.h>
43 #include <linux/completion.h>
44 #include <linux/sched.h>
45 #include <linux/pci.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/wait.h>
48 #include <linux/kref.h>
49 #include <linux/timer.h>
50 #include <linux/io.h>
51 #include <linux/fs.h>
52 #include <sys/vmem.h>
53 
54 #include <asm/byteorder.h>
55 
56 #include <netinet/in.h>
57 #include <net/ipv6.h>
58 #include <netinet/toecore.h>
59 
60 #include <rdma/ib_smi.h>
61 #include <rdma/ib_user_verbs.h>
62 #include <rdma/ib_addr.h>
63 #include <rdma/ib_verbs.h>
64 #include <rdma/iw_cm.h>
65 #include <rdma/ib_umem.h>
66 #include <rdma/ib_mad.h>
67 #include <rdma/ib_sa.h>
68 #include <rdma/uverbs_ioctl.h>
69 
70 #if __FreeBSD_version < 1100000
71 #undef MODULE_VERSION
72 #endif
73 
74 #include "qlnx_os.h"
75 #include "bcm_osal.h"
76 
77 #include "reg_addr.h"
78 #include "ecore_gtt_reg_addr.h"
79 #include "ecore.h"
80 #include "ecore_chain.h"
81 #include "ecore_status.h"
82 #include "ecore_hw.h"
83 #include "ecore_rt_defs.h"
84 #include "ecore_init_ops.h"
85 #include "ecore_int.h"
86 #include "ecore_cxt.h"
87 #include "ecore_spq.h"
88 #include "ecore_init_fw_funcs.h"
89 #include "ecore_sp_commands.h"
90 #include "ecore_dev_api.h"
91 #include "ecore_l2_api.h"
92 #ifdef CONFIG_ECORE_SRIOV
93 #include "ecore_sriov.h"
94 #include "ecore_vf.h"
95 #endif
96 #ifdef CONFIG_ECORE_LL2
97 #include "ecore_ll2.h"
98 #endif
99 #ifdef CONFIG_ECORE_FCOE
100 #include "ecore_fcoe.h"
101 #endif
102 #ifdef CONFIG_ECORE_ISCSI
103 #include "ecore_iscsi.h"
104 #endif
105 #include "ecore_mcp.h"
106 #include "ecore_hw_defs.h"
107 #include "mcp_public.h"
108 
109 #ifdef CONFIG_ECORE_RDMA
110 #include "ecore_rdma.h"
111 #include "ecore_rdma_api.h"
112 #endif
113 
114 #ifdef CONFIG_ECORE_ROCE
115 #include "ecore_roce.h"
116 #endif
117 
118 #ifdef CONFIG_ECORE_IWARP
119 #include "ecore_iwarp.h"
120 #endif
121 
122 #include "ecore_iro.h"
123 #include "nvm_cfg.h"
124 
125 #include "ecore_dbg_fw_funcs.h"
126 #include "rdma_common.h"
127 
128 #include "qlnx_ioctl.h"
129 #include "qlnx_def.h"
130 #include "qlnx_rdma.h"
131 #include "qlnxr_verbs.h"
132 #include "qlnxr_user.h"
133 #include "qlnx_ver.h"
134 #include <sys/smp.h>
135 
136 #define QLNXR_ROCE_INTERFACE_VERSION     1801
137 
138 #define QLNXR_MODULE_VERSION     "8.18.1.0"
139 #define QLNXR_NODE_DESC "QLogic 579xx RoCE HCA"
140 
141 #define OC_SKH_DEVICE_PF 0x720
142 #define OC_SKH_DEVICE_VF 0x728
143 #define QLNXR_MAX_AH 512
144 
145 /* QLNXR Limitations */
146 
147 /* SQ/RQ Limitations
148  * An S/RQ PBL contains a list a pointers to pages. Each page contains S/RQE
149  * elements. Several S/RQE elements make an S/RQE, up to a certain maximum that
150  * is different between SQ and RQ. The size of the PBL was chosen such as not to
151  * limit the MAX_WR supported by ECORE, and rounded up to a power of two.
152  */
153 /* SQ */
154 #define QLNXR_MAX_SQ_PBL (0x8000) /* 2^15 bytes */
155 #define QLNXR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
156 #define QLNXR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) /* bytes */
157 #define QLNXR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
158                 QLNXR_SQE_ELEMENT_SIZE) /* number */
159 #define QLNXR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
160                 QLNXR_SQE_ELEMENT_SIZE) /* number */
161 #define QLNXR_MAX_SQE ((QLNXR_MAX_SQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
162                 (QLNXR_SQE_ELEMENT_SIZE) / (QLNXR_MAX_SQE_ELEMENTS_PER_SQE))
163 /* RQ */
164 #define QLNXR_MAX_RQ_PBL (0x2000) /* 2^13 bytes */
165 #define QLNXR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
166 #define QLNXR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) /* bytes */
167 #define QLNXR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) /* number */
168 #define QLNXR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
169                 QLNXR_RQE_ELEMENT_SIZE) /* number */
170 #define QLNXR_MAX_RQE ((QLNXR_MAX_RQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
171                 (QLNXR_RQE_ELEMENT_SIZE) / (QLNXR_MAX_RQE_ELEMENTS_PER_RQE))
172 
173 /* CQE Limitation
174  * Although FW supports two layer PBL we use single layer since it is more
175  * than enough. For that layer we use a maximum size of 512 kB, again, because
176  * it reaches the maximum number of page pointers. Notice is the '-1' in the
177  * calculation that comes from having a u16 for the number of pages i.e. 0xffff
178  * is the maximum number of pages (in single layer).
179  */
180 #define QLNXR_CQE_SIZE   (sizeof(union rdma_cqe))
181 #define QLNXR_MAX_CQE_PBL_SIZE (512*1024) /* 512kB */
182 #define QLNXR_MAX_CQE_PBL_ENTRIES (((QLNXR_MAX_CQE_PBL_SIZE) / \
183                                   sizeof(u64)) - 1) /* 64k -1 */
184 #define QLNXR_MAX_CQES ((u32)((QLNXR_MAX_CQE_PBL_ENTRIES) * (ECORE_CHAIN_PAGE_SIZE)\
185                              / QLNXR_CQE_SIZE)) /* 8M -4096/32 = 8,388,480 */
186 
187 /* CNQ size Limitation
188  * The maximum CNQ size is not reachable because the FW supports a chain of u16
189  * (specifically 64k-1). The FW can buffer CNQ elements avoiding an overflow, on
190  * the expense of performance. Hence we set it to an arbitrarily smaller value
191  * than the maximum.
192  */
193 #define QLNXR_ROCE_MAX_CNQ_SIZE          (0x4000) /* 2^16 */
194 
195 #define QLNXR_MAX_PORT                   (1)
196 #define QLNXR_PORT                       (1)
197 
198 #define QLNXR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
199 
200 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
201 
202 /* The following number is used to determine if a handle recevied from the FW
203  * actually point to a CQ/QP.
204  */
205 #define QLNXR_CQ_MAGIC_NUMBER    (0x11223344)
206 #define QLNXR_QP_MAGIC_NUMBER    (0x77889900)
207 
208 /* Fast path debug prints */
209 #define FP_DP_VERBOSE(...)
210 /* #define FP_DP_VERBOSE(...)   DP_VERBOSE(__VA_ARGS__) */
211 
212 #define FW_PAGE_SIZE    (RDMA_RING_PAGE_SIZE)
213 
214 #define QLNXR_MSG_INIT		0x10000,
215 #define QLNXR_MSG_FAIL		0x10000,
216 #define QLNXR_MSG_CQ		0x20000,
217 #define QLNXR_MSG_RQ		0x40000,
218 #define QLNXR_MSG_SQ		0x80000,
219 #define QLNXR_MSG_QP		(QLNXR_MSG_SQ | QLNXR_MSG_RQ),
220 #define QLNXR_MSG_MR		0x100000,
221 #define QLNXR_MSG_GSI		0x200000,
222 #define QLNXR_MSG_MISC		0x400000,
223 #define QLNXR_MSG_SRQ		0x800000,
224 #define QLNXR_MSG_IWARP		0x1000000,
225 
226 #define QLNXR_ROCE_PKEY_MAX		1
227 #define QLNXR_ROCE_PKEY_TABLE_LEN	1
228 #define QLNXR_ROCE_PKEY_DEFAULT		0xffff
229 
230 #define QLNXR_MAX_SGID			128 /* TBD - add more source gids... */
231 
232 #define QLNXR_ENET_STATE_BIT     (0)
233 
234 #define QLNXR_MAX_MSIX		(16)
235 
236 struct qlnxr_cnq {
237         struct qlnxr_dev	*dev;
238         struct ecore_chain	pbl;
239         struct ecore_sb_info	*sb;
240         char			name[32];
241         u64			n_comp;
242         __le16			*hw_cons_ptr;
243         u8			index;
244 	int			irq_rid;
245 	struct resource		*irq;
246 	void			*irq_handle;
247 };
248 
249 struct qlnxr_device_attr {
250         /* Vendor specific information */
251         u32     vendor_id;
252         u32     vendor_part_id;
253         u32     hw_ver;
254         u64     fw_ver;
255 
256         u64     node_guid;      /* node GUID */
257         u64     sys_image_guid; /* System image GUID */
258 
259         u8      max_cnq;
260         u8      max_sge;        /* Maximum # of scatter/gather entries
261                                  * per Work Request supported
262                                  */
263         u16     max_inline;
264         u32     max_sqe;        /* Maximum number of send outstanding send work
265                                  * requests on any Work Queue supported
266                                  */
267         u32     max_rqe;        /* Maximum number of receive outstanding receive
268                                  * work requests on any Work Queue supported
269                                  */
270         u8      max_qp_resp_rd_atomic_resc;     /* Maximum number of RDMA Reads
271                                                  * & atomic operation that can
272                                                  * be outstanding per QP
273                                                  */
274 
275         u8      max_qp_req_rd_atomic_resc;      /* The maximum depth per QP for
276                                                  * initiation of RDMA Read
277                                                  * & atomic operations
278                                                  */
279         u64     max_dev_resp_rd_atomic_resc;
280         u32     max_cq;
281         u32     max_qp;
282         u32     max_mr;         /* Maximum # of MRs supported */
283         u64     max_mr_size;    /* Size (in bytes) of largest contiguous memory
284                                  * block that can be registered by this device
285                                  */
286         u32     max_cqe;
287         u32     max_mw;         /* Maximum # of memory windows supported */
288         u32     max_fmr;
289         u32     max_mr_mw_fmr_pbl;
290         u64     max_mr_mw_fmr_size;
291         u32     max_pd;         /* Maximum # of protection domains supported */
292         u32     max_ah;
293         u8      max_pkey;
294         u32     max_srq;        /* Maximum number of SRQs */
295         u32     max_srq_wr;     /* Maximum number of WRs per SRQ */
296         u8      max_srq_sge;     /* Maximum number of SGE per WQE */
297         u8      max_stats_queues; /* Maximum number of statistics queues */
298         u32     dev_caps;
299 
300         /* Abilty to support RNR-NAK generation */
301 
302 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_MASK                           0x1
303 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_SHIFT                  0
304         /* Abilty to support shutdown port */
305 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_MASK                     0x1
306 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_SHIFT                    1
307         /* Abilty to support port active event */
308 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_MASK         0x1
309 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT                2
310         /* Abilty to support port change event */
311 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_MASK         0x1
312 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_SHIFT                3
313         /* Abilty to support system image GUID */
314 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_MASK                 0x1
315 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_SHIFT                        4
316         /* Abilty to support bad P_Key counter support */
317 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_MASK                      0x1
318 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_SHIFT                     5
319         /* Abilty to support atomic operations */
320 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_MASK                 0x1
321 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_SHIFT                        6
322 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_MASK                 0x1
323 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_SHIFT                        7
324         /* Abilty to support modifying the maximum number of
325          * outstanding work requests per QP
326          */
327 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_MASK                     0x1
328 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_SHIFT                    8
329 
330                 /* Abilty to support automatic path migration */
331 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_MASK                     0x1
332 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_SHIFT                    9
333         /* Abilty to support the base memory management extensions */
334 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_MASK                   0x1
335 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_SHIFT          10
336 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_MASK                    0x1
337 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_SHIFT                   11
338         /* Abilty to support multipile page sizes per memory region */
339 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK             0x1
340 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT            12
341         /* Abilty to support block list physical buffer list */
342 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_MASK                        0x1
343 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_SHIFT                       13
344         /* Abilty to support zero based virtual addresses */
345 #define QLNXR_ROCE_DEV_CAP_ZBVA_MASK                              0x1
346 #define QLNXR_ROCE_DEV_CAP_ZBVA_SHIFT                             14
347         /* Abilty to support local invalidate fencing */
348 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_MASK                   0x1
349 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_SHIFT          15
350         /* Abilty to support Loopback on QP */
351 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_MASK                      0x1
352 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_SHIFT                     16
353         u64                     page_size_caps;
354         u8                      dev_ack_delay;
355         u32                     reserved_lkey;   /* Value of reserved L_key */
356         u32                     bad_pkey_counter;/* Bad P_key counter support
357                                                   * indicator
358                                                   */
359         struct ecore_rdma_events  events;
360 };
361 
362 struct qlnxr_dev {
363 	struct ib_device	ibdev;
364 	qlnx_host_t		*ha;
365 	struct ecore_dev	*cdev;
366 
367 	/* Added to extend Applications Support */
368         struct pci_dev          pdev;
369 	uint32_t		dp_module;
370 	uint8_t			dp_level;
371 
372 	void			*rdma_ctx;
373 
374 	struct mtx		idr_lock;
375 	struct idr		qpidr;
376 
377 	uint32_t		wq_multiplier;
378 	int			num_cnq;
379 
380 	struct ecore_sb_info	sb_array[QLNXR_MAX_MSIX];
381 	struct qlnxr_cnq	cnq_array[QLNXR_MAX_MSIX];
382 
383         int			sb_start;
384 
385         int			gsi_qp_created;
386         struct qlnxr_cq		*gsi_sqcq;
387         struct qlnxr_cq		*gsi_rqcq;
388         struct qlnxr_qp		*gsi_qp;
389 
390         /* TBD: we'll need an array of these probablly per DPI... */
391         void __iomem		*db_addr;
392         uint64_t		db_phys_addr;
393         uint32_t		db_size;
394         uint16_t		dpi;
395 
396         uint64_t		guid;
397         enum ib_atomic_cap	atomic_cap;
398 
399         union ib_gid		sgid_tbl[QLNXR_MAX_SGID];
400         struct mtx		sgid_lock;
401         struct notifier_block	nb_inet;
402         struct notifier_block	nb_inet6;
403 
404         uint8_t			mr_key;
405         struct list_head	entry;
406 
407         struct dentry		*dbgfs;
408 
409         uint8_t			gsi_ll2_mac_address[ETH_ALEN];
410         uint8_t			gsi_ll2_handle;
411 
412 	unsigned long		enet_state;
413 
414 	struct workqueue_struct *iwarp_wq;
415 
416 	volatile uint32_t	pd_count;
417 	struct                  qlnxr_device_attr attr;
418         uint8_t                 user_dpm_enabled;
419 };
420 
421 typedef struct qlnxr_dev qlnxr_dev_t;
422 
423 struct qlnxr_pd {
424         struct ib_pd ibpd;
425         u32 pd_id;
426         struct qlnxr_ucontext *uctx;
427 };
428 
429 struct qlnxr_ucontext {
430         struct ib_ucontext ibucontext;
431         struct qlnxr_dev *dev;
432         struct qlnxr_pd *pd;
433         u64 dpi_addr;
434         u64 dpi_phys_addr;
435         u32 dpi_size;
436         u16 dpi;
437 
438         struct list_head mm_head;
439         struct mutex mm_list_lock;
440 };
441 
442 struct qlnxr_dev_attr {
443         struct ib_device_attr ib_attr;
444 };
445 
446 struct qlnxr_dma_mem {
447         void *va;
448         dma_addr_t pa;
449         u32 size;
450 };
451 
452 struct qlnxr_pbl {
453         struct list_head list_entry;
454         void *va;
455         dma_addr_t pa;
456 };
457 
458 struct qlnxr_queue_info {
459         void *va;
460         dma_addr_t dma;
461         u32 size;
462         u16 len;
463         u16 entry_size;         /* Size of an element in the queue */
464         u16 id;                 /* qid, where to ring the doorbell. */
465         u16 head, tail;
466         bool created;
467 };
468 
469 struct qlnxr_eq {
470         struct qlnxr_queue_info q;
471         u32 vector;
472         int cq_cnt;
473         struct qlnxr_dev *dev;
474         char irq_name[32];
475 };
476 
477 struct qlnxr_mq {
478         struct qlnxr_queue_info sq;
479         struct qlnxr_queue_info cq;
480         bool rearm_cq;
481 };
482 
483 struct phy_info {
484         u16 auto_speeds_supported;
485         u16 fixed_speeds_supported;
486         u16 phy_type;
487         u16 interface_type;
488 };
489 
490 union db_prod64 {
491 	struct rdma_pwm_val32_data data;
492         u64 raw;
493 };
494 
495 enum qlnxr_cq_type {
496         QLNXR_CQ_TYPE_GSI,
497         QLNXR_CQ_TYPE_KERNEL,
498         QLNXR_CQ_TYPE_USER
499 };
500 
501 struct qlnxr_pbl_info {
502         u32 num_pbls;
503         u32 num_pbes;
504         u32 pbl_size;
505         u32 pbe_size;
506         bool two_layered;
507 };
508 
509 struct qlnxr_userq {
510         struct ib_umem *umem;
511         struct qlnxr_pbl_info pbl_info;
512         struct qlnxr_pbl *pbl_tbl;
513         u64 buf_addr;
514         size_t buf_len;
515 };
516 
517 struct qlnxr_cq {
518         struct ib_cq		ibcq; /* must be first */
519 
520         enum qlnxr_cq_type	cq_type;
521         uint32_t		sig;
522         uint16_t		icid;
523 
524         /* relevant to cqs created from kernel space only (ULPs) */
525         spinlock_t		cq_lock;
526         uint8_t			arm_flags;
527         struct ecore_chain	pbl;
528 
529         void __iomem		*db_addr; /* db address for cons update*/
530         union db_prod64		db;
531 
532         uint8_t			pbl_toggle;
533         union rdma_cqe		*latest_cqe;
534         union rdma_cqe		*toggle_cqe;
535 
536         /* TODO: remove since it is redundant with 32 bit chains */
537         uint32_t		cq_cons;
538 
539         /* relevant to cqs created from user space only (applications) */
540         struct qlnxr_userq	q;
541 
542         /* destroy-IRQ handler race prevention */
543         uint8_t			destroyed;
544         uint16_t		cnq_notif;
545 };
546 
547 struct qlnxr_ah {
548         struct ib_ah		ibah;
549         struct ib_ah_attr	attr;
550 };
551 
552 union db_prod32 {
553 	struct rdma_pwm_val16_data data;
554         u32 raw;
555 };
556 
557 struct qlnxr_qp_hwq_info {
558         /* WQE Elements*/
559         struct ecore_chain      pbl;
560         u64                     p_phys_addr_tbl;
561         u32                     max_sges;
562 
563         /* WQE */
564         u16                     prod;     /* WQE prod index for SW ring */
565         u16                     cons;     /* WQE cons index for SW ring */
566         u16                     wqe_cons;
567         u16                     gsi_cons; /* filled in by GSI implementation */
568         u16                     max_wr;
569 
570         /* DB */
571         void __iomem            *db;      /* Doorbell address */
572         union db_prod32         db_data;  /* Doorbell data */
573 
574         /* Required for iwarp_only */
575         void __iomem            *iwarp_db2;      /* Doorbell address */
576         union db_prod32         iwarp_db2_data;  /* Doorbell data */
577 };
578 
579 #define QLNXR_INC_SW_IDX(p_info, index)                          \
580         do {                                                    \
581                 p_info->index = (p_info->index + 1) &           \
582                         ecore_chain_get_capacity(p_info->pbl)     \
583         } while (0)
584 
585 struct qlnxr_srq_hwq_info {
586         u32 max_sges;
587         u32 max_wr;
588         struct ecore_chain pbl;
589         u64 p_phys_addr_tbl;
590         u32 wqe_prod;     /* WQE prod index in HW ring */
591         u32 sge_prod;     /* SGE prod index in HW ring */
592         u32 wr_prod_cnt; /* wr producer count */
593         u32 wr_cons_cnt; /* wr consumer count */
594         u32 num_elems;
595 
596         u32 *virt_prod_pair_addr; /* producer pair virtual address */
597         dma_addr_t phy_prod_pair_addr; /* producer pair physical address */
598 };
599 
600 struct qlnxr_srq {
601         struct ib_srq ibsrq;
602         struct qlnxr_dev *dev;
603         /* relevant to cqs created from user space only (applications) */
604         struct qlnxr_userq       usrq;
605         struct qlnxr_srq_hwq_info hw_srq;
606         struct ib_umem *prod_umem;
607         u16 srq_id;
608         /* lock to protect srq recv post */
609         spinlock_t lock;
610 };
611 
612 enum qlnxr_qp_err_bitmap {
613         QLNXR_QP_ERR_SQ_FULL     = 1 << 0,
614         QLNXR_QP_ERR_RQ_FULL     = 1 << 1,
615         QLNXR_QP_ERR_BAD_SR      = 1 << 2,
616         QLNXR_QP_ERR_BAD_RR      = 1 << 3,
617         QLNXR_QP_ERR_SQ_PBL_FULL = 1 << 4,
618         QLNXR_QP_ERR_RQ_PBL_FULL = 1 << 5,
619 };
620 
621 struct mr_info {
622         struct qlnxr_pbl *pbl_table;
623         struct qlnxr_pbl_info pbl_info;
624         struct list_head free_pbl_list;
625         struct list_head inuse_pbl_list;
626         u32 completed;
627         u32 completed_handled;
628 };
629 
630 #if __FreeBSD_version < 1102000
631 #define DEFINE_IB_FAST_REG
632 #else
633 #define DEFINE_ALLOC_MR
634 #endif
635 
636 #ifdef DEFINE_IB_FAST_REG
637 struct qlnxr_fast_reg_page_list {
638         struct ib_fast_reg_page_list ibfrpl;
639         struct qlnxr_dev *dev;
640         struct mr_info info;
641 };
642 #endif
643 struct qlnxr_qp {
644         struct ib_qp ibqp;              /* must be first */
645         struct qlnxr_dev *dev;
646         struct qlnxr_iw_ep *ep;
647         struct qlnxr_qp_hwq_info sq;
648         struct qlnxr_qp_hwq_info rq;
649 
650         u32 max_inline_data;
651 
652 #if __FreeBSD_version >= 1100000
653         spinlock_t q_lock ____cacheline_aligned;
654 #else
655 	spinlock_t q_lock;
656 #endif
657 
658         struct qlnxr_cq *sq_cq;
659         struct qlnxr_cq *rq_cq;
660         struct qlnxr_srq *srq;
661         enum ecore_roce_qp_state state;   /*  QP state */
662         u32 id;
663         struct qlnxr_pd *pd;
664         enum ib_qp_type qp_type;
665         struct ecore_rdma_qp *ecore_qp;
666         u32 qp_id;
667         u16 icid;
668         u16 mtu;
669         int sgid_idx;
670         u32 rq_psn;
671         u32 sq_psn;
672         u32 qkey;
673         u32 dest_qp_num;
674         u32 sig;                /* unique siganture to identify valid QP */
675 
676         /* relevant to qps created from kernel space only (ULPs) */
677         u8 prev_wqe_size;
678         u16 wqe_cons;
679         u32 err_bitmap;
680         bool signaled;
681         /* SQ shadow */
682         struct {
683                 u64 wr_id;
684                 enum ib_wc_opcode opcode;
685                 u32 bytes_len;
686                 u8 wqe_size;
687                 bool  signaled;
688                 dma_addr_t icrc_mapping;
689                 u32 *icrc;
690 #ifdef DEFINE_IB_FAST_REG
691                 struct qlnxr_fast_reg_page_list *frmr;
692 #endif
693                 struct qlnxr_mr *mr;
694         } *wqe_wr_id;
695 
696         /* RQ shadow */
697         struct {
698                 u64 wr_id;
699                 struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
700                 uint8_t wqe_size;
701 
702                 /* for GSI only */
703                 u8 smac[ETH_ALEN];
704                 u16 vlan_id;
705                 int rc;
706         } *rqe_wr_id;
707 
708         /* relevant to qps created from user space only (applications) */
709         struct qlnxr_userq usq;
710         struct qlnxr_userq urq;
711         atomic_t refcnt;
712 	bool destroyed;
713 };
714 
715 enum qlnxr_mr_type {
716         QLNXR_MR_USER,
717         QLNXR_MR_KERNEL,
718         QLNXR_MR_DMA,
719         QLNXR_MR_FRMR
720 };
721 
722 struct qlnxr_mr {
723         struct ib_mr    ibmr;
724         struct ib_umem  *umem;
725 
726         struct ecore_rdma_register_tid_in_params hw_mr;
727         enum qlnxr_mr_type type;
728 
729         struct qlnxr_dev *dev;
730         struct mr_info info;
731 
732         u64 *pages;
733         u32 npages;
734 
735 	u64 *iova_start; /* valid only for kernel_mr */
736 };
737 
738 struct qlnxr_mm {
739         struct {
740                 u64 phy_addr;
741                 unsigned long len;
742         } key;
743         struct list_head entry;
744 };
745 
746 struct qlnxr_iw_listener {
747         struct qlnxr_dev *dev;
748         struct iw_cm_id *cm_id;
749         int backlog;
750         void *ecore_handle;
751 };
752 
753 struct qlnxr_iw_ep {
754         struct qlnxr_dev *dev;
755         struct iw_cm_id *cm_id;
756         struct qlnxr_qp *qp;
757         void *ecore_context;
758 	u8 during_connect;
759 };
760 
761 static inline void
762 qlnxr_inc_sw_cons(struct qlnxr_qp_hwq_info *info)
763 {
764         info->cons = (info->cons + 1) % info->max_wr;
765         info->wqe_cons++;
766 }
767 
768 static inline void
769 qlnxr_inc_sw_prod(struct qlnxr_qp_hwq_info *info)
770 {
771         info->prod = (info->prod + 1) % info->max_wr;
772 }
773 
774 static inline struct qlnxr_dev *
775 get_qlnxr_dev(struct ib_device *ibdev)
776 {
777         return container_of(ibdev, struct qlnxr_dev, ibdev);
778 }
779 
780 static inline struct qlnxr_ucontext *
781 get_qlnxr_ucontext(struct ib_ucontext *ibucontext)
782 {
783         return container_of(ibucontext, struct qlnxr_ucontext, ibucontext);
784 }
785 
786 static inline struct qlnxr_pd *
787 get_qlnxr_pd(struct ib_pd *ibpd)
788 {
789         return container_of(ibpd, struct qlnxr_pd, ibpd);
790 }
791 
792 static inline struct qlnxr_cq *
793 get_qlnxr_cq(struct ib_cq *ibcq)
794 {
795         return container_of(ibcq, struct qlnxr_cq, ibcq);
796 }
797 
798 static inline struct qlnxr_qp *
799 get_qlnxr_qp(struct ib_qp *ibqp)
800 {
801         return container_of(ibqp, struct qlnxr_qp, ibqp);
802 }
803 
804 static inline struct qlnxr_mr *
805 get_qlnxr_mr(struct ib_mr *ibmr)
806 {
807         return container_of(ibmr, struct qlnxr_mr, ibmr);
808 }
809 
810 static inline struct qlnxr_ah *
811 get_qlnxr_ah(struct ib_ah *ibah)
812 {
813         return container_of(ibah, struct qlnxr_ah, ibah);
814 }
815 
816 static inline struct qlnxr_srq *
817 get_qlnxr_srq(struct ib_srq *ibsrq)
818 {
819         return container_of(ibsrq, struct qlnxr_srq, ibsrq);
820 }
821 
822 static inline bool qlnxr_qp_has_srq(struct qlnxr_qp *qp)
823 {
824         return !!qp->srq;
825 }
826 
827 static inline bool qlnxr_qp_has_sq(struct qlnxr_qp *qp)
828 {
829         if (qp->qp_type == IB_QPT_GSI)
830                 return 0;
831 
832         return 1;
833 }
834 
835 static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
836 {
837         if (qp->qp_type == IB_QPT_GSI || qlnxr_qp_has_srq(qp))
838                 return 0;
839 
840         return 1;
841 }
842 
843 #ifdef DEFINE_IB_FAST_REG
844 static inline struct qlnxr_fast_reg_page_list *get_qlnxr_frmr_list(
845         struct ib_fast_reg_page_list *ifrpl)
846 {
847         return container_of(ifrpl, struct qlnxr_fast_reg_page_list, ibfrpl);
848 }
849 #endif
850 
851 #define SET_FIELD2(value, name, flag)                          \
852         do {                                                   \
853                 (value) |= ((flag) << (name ## _SHIFT));       \
854         } while (0)
855 
856 #define QLNXR_RESP_IMM	(RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
857                          RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
858 #define QLNXR_RESP_RDMA	(RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
859                          RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
860 #define QLNXR_RESP_INV  (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
861                          RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
862 
863 #define QLNXR_RESP_RDMA_IMM (QLNXR_RESP_IMM | QLNXR_RESP_RDMA)
864 
865 static inline int
866 qlnxr_get_dmac(struct qlnxr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr)
867 {
868 #ifdef DEFINE_NO_IP_BASED_GIDS
869         u8 *guid = &ah_attr->grh.dgid.raw[8]; /* GID's 64 MSBs are the GUID */
870 #endif
871         union ib_gid zero_sgid = { { 0 } };
872         struct in6_addr in6;
873 
874         if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
875                 memset(mac_addr, 0x00, ETH_ALEN);
876                 return -EINVAL;
877         }
878 
879         memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
880 
881 #ifdef DEFINE_NO_IP_BASED_GIDS
882         /* get the MAC address from the GUID i.e. EUI-64 to MAC address */
883         mac_addr[0] = guid[0] ^ 2; /* toggle the local/universal bit to local */
884         mac_addr[1] = guid[1];
885         mac_addr[2] = guid[2];
886         mac_addr[3] = guid[5];
887         mac_addr[4] = guid[6];
888         mac_addr[5] = guid[7];
889 #else
890         memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
891 #endif
892         return 0;
893 }
894 
895 extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
896                 uint8_t *new_mac_address);
897 
898 #define QLNXR_ROCE_PKEY_MAX 1
899 #define QLNXR_ROCE_PKEY_TABLE_LEN 1
900 #define QLNXR_ROCE_PKEY_DEFAULT 0xffff
901 
902 #if __FreeBSD_version < 1100000
903 #define DEFINE_IB_AH_ATTR_WITH_DMAC     (0)
904 #define DEFINE_IB_UMEM_WITH_CHUNK	(1)
905 #else
906 #define DEFINE_IB_AH_ATTR_WITH_DMAC     (1)
907 #endif
908 
909 #define QLNX_IS_IWARP(rdev)	IS_IWARP(ECORE_LEADING_HWFN(rdev->cdev))
910 #define QLNX_IS_ROCE(rdev)	IS_ROCE(ECORE_LEADING_HWFN(rdev->cdev))
911 
912 #define MAX_RXMIT_CONNS		16
913 
914 #endif /* #ifndef __QLNX_DEF_H_ */
915