xref: /freebsd/sys/dev/qlnx/qlnxr/qlnxr_def.h (revision a4128aad8503277614f2d214011ef60a19447b83)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnxr_def.h
30  * Author: David C Somayajulu
31  */
32 
33 #ifndef __QLNX_DEF_H_
34 #define __QLNX_DEF_H_
35 
36 #include <sys/ktr.h>
37 
38 #include <linux/list.h>
39 #include <linux/spinlock.h>
40 #include <linux/idr.h>
41 #include <linux/completion.h>
42 #include <linux/sched.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/wait.h>
46 #include <linux/kref.h>
47 #include <linux/timer.h>
48 #include <linux/io.h>
49 #include <linux/fs.h>
50 #include <sys/vmem.h>
51 
52 #include <asm/byteorder.h>
53 
54 #include <netinet/in.h>
55 #include <net/ipv6.h>
56 #include <netinet/toecore.h>
57 
58 #include <rdma/ib_smi.h>
59 #include <rdma/ib_user_verbs.h>
60 #include <rdma/ib_addr.h>
61 #include <rdma/ib_verbs.h>
62 #include <rdma/iw_cm.h>
63 #include <rdma/ib_umem.h>
64 #include <rdma/ib_mad.h>
65 #include <rdma/ib_sa.h>
66 #include <rdma/uverbs_ioctl.h>
67 
68 #include "qlnx_os.h"
69 #include "bcm_osal.h"
70 
71 #include "reg_addr.h"
72 #include "ecore_gtt_reg_addr.h"
73 #include "ecore.h"
74 #include "ecore_chain.h"
75 #include "ecore_status.h"
76 #include "ecore_hw.h"
77 #include "ecore_rt_defs.h"
78 #include "ecore_init_ops.h"
79 #include "ecore_int.h"
80 #include "ecore_cxt.h"
81 #include "ecore_spq.h"
82 #include "ecore_init_fw_funcs.h"
83 #include "ecore_sp_commands.h"
84 #include "ecore_dev_api.h"
85 #include "ecore_l2_api.h"
86 #ifdef CONFIG_ECORE_SRIOV
87 #include "ecore_sriov.h"
88 #include "ecore_vf.h"
89 #endif
90 #ifdef CONFIG_ECORE_LL2
91 #include "ecore_ll2.h"
92 #endif
93 #ifdef CONFIG_ECORE_FCOE
94 #include "ecore_fcoe.h"
95 #endif
96 #ifdef CONFIG_ECORE_ISCSI
97 #include "ecore_iscsi.h"
98 #endif
99 #include "ecore_mcp.h"
100 #include "ecore_hw_defs.h"
101 #include "mcp_public.h"
102 
103 #ifdef CONFIG_ECORE_RDMA
104 #include "ecore_rdma.h"
105 #include "ecore_rdma_api.h"
106 #endif
107 
108 #ifdef CONFIG_ECORE_ROCE
109 #include "ecore_roce.h"
110 #endif
111 
112 #ifdef CONFIG_ECORE_IWARP
113 #include "ecore_iwarp.h"
114 #endif
115 
116 #include "ecore_iro.h"
117 #include "nvm_cfg.h"
118 
119 #include "ecore_dbg_fw_funcs.h"
120 #include "rdma_common.h"
121 
122 #include "qlnx_ioctl.h"
123 #include "qlnx_def.h"
124 #include "qlnx_rdma.h"
125 #include "qlnxr_verbs.h"
126 #include "qlnxr_user.h"
127 #include "qlnx_ver.h"
128 #include <sys/smp.h>
129 
130 #define QLNXR_ROCE_INTERFACE_VERSION     1801
131 
132 #define QLNXR_MODULE_VERSION     "8.18.1.0"
133 #define QLNXR_NODE_DESC "QLogic 579xx RoCE HCA"
134 
135 #define OC_SKH_DEVICE_PF 0x720
136 #define OC_SKH_DEVICE_VF 0x728
137 #define QLNXR_MAX_AH 512
138 
139 /* QLNXR Limitations */
140 
141 /* SQ/RQ Limitations
142  * An S/RQ PBL contains a list a pointers to pages. Each page contains S/RQE
143  * elements. Several S/RQE elements make an S/RQE, up to a certain maximum that
144  * is different between SQ and RQ. The size of the PBL was chosen such as not to
145  * limit the MAX_WR supported by ECORE, and rounded up to a power of two.
146  */
147 /* SQ */
148 #define QLNXR_MAX_SQ_PBL (0x8000) /* 2^15 bytes */
149 #define QLNXR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
150 #define QLNXR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) /* bytes */
151 #define QLNXR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
152                 QLNXR_SQE_ELEMENT_SIZE) /* number */
153 #define QLNXR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
154                 QLNXR_SQE_ELEMENT_SIZE) /* number */
155 #define QLNXR_MAX_SQE ((QLNXR_MAX_SQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
156                 (QLNXR_SQE_ELEMENT_SIZE) / (QLNXR_MAX_SQE_ELEMENTS_PER_SQE))
157 /* RQ */
158 #define QLNXR_MAX_RQ_PBL (0x2000) /* 2^13 bytes */
159 #define QLNXR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
160 #define QLNXR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) /* bytes */
161 #define QLNXR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) /* number */
162 #define QLNXR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
163                 QLNXR_RQE_ELEMENT_SIZE) /* number */
164 #define QLNXR_MAX_RQE ((QLNXR_MAX_RQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
165                 (QLNXR_RQE_ELEMENT_SIZE) / (QLNXR_MAX_RQE_ELEMENTS_PER_RQE))
166 
167 /* CQE Limitation
168  * Although FW supports two layer PBL we use single layer since it is more
169  * than enough. For that layer we use a maximum size of 512 kB, again, because
170  * it reaches the maximum number of page pointers. Notice is the '-1' in the
171  * calculation that comes from having a u16 for the number of pages i.e. 0xffff
172  * is the maximum number of pages (in single layer).
173  */
174 #define QLNXR_CQE_SIZE   (sizeof(union rdma_cqe))
175 #define QLNXR_MAX_CQE_PBL_SIZE (512*1024) /* 512kB */
176 #define QLNXR_MAX_CQE_PBL_ENTRIES (((QLNXR_MAX_CQE_PBL_SIZE) / \
177                                   sizeof(u64)) - 1) /* 64k -1 */
178 #define QLNXR_MAX_CQES ((u32)((QLNXR_MAX_CQE_PBL_ENTRIES) * (ECORE_CHAIN_PAGE_SIZE)\
179                              / QLNXR_CQE_SIZE)) /* 8M -4096/32 = 8,388,480 */
180 
181 /* CNQ size Limitation
182  * The maximum CNQ size is not reachable because the FW supports a chain of u16
183  * (specifically 64k-1). The FW can buffer CNQ elements avoiding an overflow, on
184  * the expense of performance. Hence we set it to an arbitrarily smaller value
185  * than the maximum.
186  */
187 #define QLNXR_ROCE_MAX_CNQ_SIZE          (0x4000) /* 2^16 */
188 
189 #define QLNXR_MAX_PORT                   (1)
190 #define QLNXR_PORT                       (1)
191 
192 #define QLNXR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
193 
194 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
195 
196 /* The following number is used to determine if a handle recevied from the FW
197  * actually point to a CQ/QP.
198  */
199 #define QLNXR_CQ_MAGIC_NUMBER    (0x11223344)
200 #define QLNXR_QP_MAGIC_NUMBER    (0x77889900)
201 
202 /* Fast path debug prints */
203 #define FP_DP_VERBOSE(...)
204 /* #define FP_DP_VERBOSE(...)   DP_VERBOSE(__VA_ARGS__) */
205 
206 #define FW_PAGE_SIZE    (RDMA_RING_PAGE_SIZE)
207 
208 #define QLNXR_MSG_INIT		0x10000,
209 #define QLNXR_MSG_FAIL		0x10000,
210 #define QLNXR_MSG_CQ		0x20000,
211 #define QLNXR_MSG_RQ		0x40000,
212 #define QLNXR_MSG_SQ		0x80000,
213 #define QLNXR_MSG_QP		(QLNXR_MSG_SQ | QLNXR_MSG_RQ),
214 #define QLNXR_MSG_MR		0x100000,
215 #define QLNXR_MSG_GSI		0x200000,
216 #define QLNXR_MSG_MISC		0x400000,
217 #define QLNXR_MSG_SRQ		0x800000,
218 #define QLNXR_MSG_IWARP		0x1000000,
219 
220 #define QLNXR_ROCE_PKEY_MAX		1
221 #define QLNXR_ROCE_PKEY_TABLE_LEN	1
222 #define QLNXR_ROCE_PKEY_DEFAULT		0xffff
223 
224 #define QLNXR_MAX_SGID			128 /* TBD - add more source gids... */
225 
226 #define QLNXR_ENET_STATE_BIT     (0)
227 
228 #define QLNXR_MAX_MSIX		(16)
229 
230 struct qlnxr_cnq {
231         struct qlnxr_dev	*dev;
232         struct ecore_chain	pbl;
233         struct ecore_sb_info	*sb;
234         char			name[32];
235         u64			n_comp;
236         __le16			*hw_cons_ptr;
237         u8			index;
238 	int			irq_rid;
239 	struct resource		*irq;
240 	void			*irq_handle;
241 };
242 
243 struct qlnxr_device_attr {
244         /* Vendor specific information */
245         u32     vendor_id;
246         u32     vendor_part_id;
247         u32     hw_ver;
248         u64     fw_ver;
249 
250         u64     node_guid;      /* node GUID */
251         u64     sys_image_guid; /* System image GUID */
252 
253         u8      max_cnq;
254         u8      max_sge;        /* Maximum # of scatter/gather entries
255                                  * per Work Request supported
256                                  */
257         u16     max_inline;
258         u32     max_sqe;        /* Maximum number of send outstanding send work
259                                  * requests on any Work Queue supported
260                                  */
261         u32     max_rqe;        /* Maximum number of receive outstanding receive
262                                  * work requests on any Work Queue supported
263                                  */
264         u8      max_qp_resp_rd_atomic_resc;     /* Maximum number of RDMA Reads
265                                                  * & atomic operation that can
266                                                  * be outstanding per QP
267                                                  */
268 
269         u8      max_qp_req_rd_atomic_resc;      /* The maximum depth per QP for
270                                                  * initiation of RDMA Read
271                                                  * & atomic operations
272                                                  */
273         u64     max_dev_resp_rd_atomic_resc;
274         u32     max_cq;
275         u32     max_qp;
276         u32     max_mr;         /* Maximum # of MRs supported */
277         u64     max_mr_size;    /* Size (in bytes) of largest contiguous memory
278                                  * block that can be registered by this device
279                                  */
280         u32     max_cqe;
281         u32     max_mw;         /* Maximum # of memory windows supported */
282         u32     max_fmr;
283         u32     max_mr_mw_fmr_pbl;
284         u64     max_mr_mw_fmr_size;
285         u32     max_pd;         /* Maximum # of protection domains supported */
286         u32     max_ah;
287         u8      max_pkey;
288         u32     max_srq;        /* Maximum number of SRQs */
289         u32     max_srq_wr;     /* Maximum number of WRs per SRQ */
290         u8      max_srq_sge;     /* Maximum number of SGE per WQE */
291         u8      max_stats_queues; /* Maximum number of statistics queues */
292         u32     dev_caps;
293 
294         /* Abilty to support RNR-NAK generation */
295 
296 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_MASK                           0x1
297 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_SHIFT                  0
298         /* Abilty to support shutdown port */
299 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_MASK                     0x1
300 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_SHIFT                    1
301         /* Abilty to support port active event */
302 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_MASK         0x1
303 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT                2
304         /* Abilty to support port change event */
305 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_MASK         0x1
306 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_SHIFT                3
307         /* Abilty to support system image GUID */
308 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_MASK                 0x1
309 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_SHIFT                        4
310         /* Abilty to support bad P_Key counter support */
311 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_MASK                      0x1
312 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_SHIFT                     5
313         /* Abilty to support atomic operations */
314 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_MASK                 0x1
315 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_SHIFT                        6
316 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_MASK                 0x1
317 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_SHIFT                        7
318         /* Abilty to support modifying the maximum number of
319          * outstanding work requests per QP
320          */
321 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_MASK                     0x1
322 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_SHIFT                    8
323 
324                 /* Abilty to support automatic path migration */
325 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_MASK                     0x1
326 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_SHIFT                    9
327         /* Abilty to support the base memory management extensions */
328 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_MASK                   0x1
329 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_SHIFT          10
330 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_MASK                    0x1
331 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_SHIFT                   11
332         /* Abilty to support multipile page sizes per memory region */
333 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK             0x1
334 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT            12
335         /* Abilty to support block list physical buffer list */
336 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_MASK                        0x1
337 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_SHIFT                       13
338         /* Abilty to support zero based virtual addresses */
339 #define QLNXR_ROCE_DEV_CAP_ZBVA_MASK                              0x1
340 #define QLNXR_ROCE_DEV_CAP_ZBVA_SHIFT                             14
341         /* Abilty to support local invalidate fencing */
342 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_MASK                   0x1
343 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_SHIFT          15
344         /* Abilty to support Loopback on QP */
345 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_MASK                      0x1
346 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_SHIFT                     16
347         u64                     page_size_caps;
348         u8                      dev_ack_delay;
349         u32                     reserved_lkey;   /* Value of reserved L_key */
350         u32                     bad_pkey_counter;/* Bad P_key counter support
351                                                   * indicator
352                                                   */
353         struct ecore_rdma_events  events;
354 };
355 
356 struct qlnxr_dev {
357 	struct ib_device	ibdev;
358 	qlnx_host_t		*ha;
359 	struct ecore_dev	*cdev;
360 
361 	/* Added to extend Applications Support */
362         struct pci_dev          pdev;
363 	uint32_t		dp_module;
364 	uint8_t			dp_level;
365 
366 	void			*rdma_ctx;
367 
368 	struct mtx		idr_lock;
369 	struct idr		qpidr;
370 
371 	uint32_t		wq_multiplier;
372 	int			num_cnq;
373 
374 	struct ecore_sb_info	sb_array[QLNXR_MAX_MSIX];
375 	struct qlnxr_cnq	cnq_array[QLNXR_MAX_MSIX];
376 
377         int			sb_start;
378 
379         int			gsi_qp_created;
380         struct qlnxr_cq		*gsi_sqcq;
381         struct qlnxr_cq		*gsi_rqcq;
382         struct qlnxr_qp		*gsi_qp;
383 
384         /* TBD: we'll need an array of these probablly per DPI... */
385         void __iomem		*db_addr;
386         uint64_t		db_phys_addr;
387         uint32_t		db_size;
388         uint16_t		dpi;
389 
390         uint64_t		guid;
391         enum ib_atomic_cap	atomic_cap;
392 
393         union ib_gid		sgid_tbl[QLNXR_MAX_SGID];
394         struct mtx		sgid_lock;
395         struct notifier_block	nb_inet;
396         struct notifier_block	nb_inet6;
397 
398         uint8_t			mr_key;
399         struct list_head	entry;
400 
401         struct dentry		*dbgfs;
402 
403         uint8_t			gsi_ll2_mac_address[ETH_ALEN];
404         uint8_t			gsi_ll2_handle;
405 
406 	unsigned long		enet_state;
407 
408 	struct workqueue_struct *iwarp_wq;
409 
410 	volatile uint32_t	pd_count;
411 	struct                  qlnxr_device_attr attr;
412         uint8_t                 user_dpm_enabled;
413 };
414 
415 typedef struct qlnxr_dev qlnxr_dev_t;
416 
417 struct qlnxr_pd {
418         struct ib_pd ibpd;
419         u32 pd_id;
420         struct qlnxr_ucontext *uctx;
421 };
422 
423 struct qlnxr_ucontext {
424         struct ib_ucontext ibucontext;
425         struct qlnxr_dev *dev;
426         struct qlnxr_pd *pd;
427         u64 dpi_addr;
428         u64 dpi_phys_addr;
429         u32 dpi_size;
430         u16 dpi;
431 
432         struct list_head mm_head;
433         struct mutex mm_list_lock;
434 };
435 
436 struct qlnxr_dev_attr {
437         struct ib_device_attr ib_attr;
438 };
439 
440 struct qlnxr_dma_mem {
441         void *va;
442         dma_addr_t pa;
443         u32 size;
444 };
445 
446 struct qlnxr_pbl {
447         struct list_head list_entry;
448         void *va;
449         dma_addr_t pa;
450 };
451 
452 struct qlnxr_queue_info {
453         void *va;
454         dma_addr_t dma;
455         u32 size;
456         u16 len;
457         u16 entry_size;         /* Size of an element in the queue */
458         u16 id;                 /* qid, where to ring the doorbell. */
459         u16 head, tail;
460         bool created;
461 };
462 
463 struct qlnxr_eq {
464         struct qlnxr_queue_info q;
465         u32 vector;
466         int cq_cnt;
467         struct qlnxr_dev *dev;
468         char irq_name[32];
469 };
470 
471 struct qlnxr_mq {
472         struct qlnxr_queue_info sq;
473         struct qlnxr_queue_info cq;
474         bool rearm_cq;
475 };
476 
477 struct phy_info {
478         u16 auto_speeds_supported;
479         u16 fixed_speeds_supported;
480         u16 phy_type;
481         u16 interface_type;
482 };
483 
484 union db_prod64 {
485 	struct rdma_pwm_val32_data data;
486         u64 raw;
487 };
488 
489 enum qlnxr_cq_type {
490         QLNXR_CQ_TYPE_GSI,
491         QLNXR_CQ_TYPE_KERNEL,
492         QLNXR_CQ_TYPE_USER
493 };
494 
495 struct qlnxr_pbl_info {
496         u32 num_pbls;
497         u32 num_pbes;
498         u32 pbl_size;
499         u32 pbe_size;
500         bool two_layered;
501 };
502 
503 struct qlnxr_userq {
504         struct ib_umem *umem;
505         struct qlnxr_pbl_info pbl_info;
506         struct qlnxr_pbl *pbl_tbl;
507         u64 buf_addr;
508         size_t buf_len;
509 };
510 
511 struct qlnxr_cq {
512         struct ib_cq		ibcq; /* must be first */
513 
514         enum qlnxr_cq_type	cq_type;
515         uint32_t		sig;
516         uint16_t		icid;
517 
518         /* relevant to cqs created from kernel space only (ULPs) */
519         spinlock_t		cq_lock;
520         uint8_t			arm_flags;
521         struct ecore_chain	pbl;
522 
523         void __iomem		*db_addr; /* db address for cons update*/
524         union db_prod64		db;
525 
526         uint8_t			pbl_toggle;
527         union rdma_cqe		*latest_cqe;
528         union rdma_cqe		*toggle_cqe;
529 
530         /* TODO: remove since it is redundant with 32 bit chains */
531         uint32_t		cq_cons;
532 
533         /* relevant to cqs created from user space only (applications) */
534         struct qlnxr_userq	q;
535 
536         /* destroy-IRQ handler race prevention */
537         uint8_t			destroyed;
538         uint16_t		cnq_notif;
539 };
540 
541 struct qlnxr_ah {
542         struct ib_ah		ibah;
543         struct ib_ah_attr	attr;
544 };
545 
546 union db_prod32 {
547 	struct rdma_pwm_val16_data data;
548         u32 raw;
549 };
550 
551 struct qlnxr_qp_hwq_info {
552         /* WQE Elements*/
553         struct ecore_chain      pbl;
554         u64                     p_phys_addr_tbl;
555         u32                     max_sges;
556 
557         /* WQE */
558         u16                     prod;     /* WQE prod index for SW ring */
559         u16                     cons;     /* WQE cons index for SW ring */
560         u16                     wqe_cons;
561         u16                     gsi_cons; /* filled in by GSI implementation */
562         u16                     max_wr;
563 
564         /* DB */
565         void __iomem            *db;      /* Doorbell address */
566         union db_prod32         db_data;  /* Doorbell data */
567 
568         /* Required for iwarp_only */
569         void __iomem            *iwarp_db2;      /* Doorbell address */
570         union db_prod32         iwarp_db2_data;  /* Doorbell data */
571 };
572 
573 #define QLNXR_INC_SW_IDX(p_info, index)                          \
574         do {                                                    \
575                 p_info->index = (p_info->index + 1) &           \
576                         ecore_chain_get_capacity(p_info->pbl)     \
577         } while (0)
578 
579 struct qlnxr_srq_hwq_info {
580         u32 max_sges;
581         u32 max_wr;
582         struct ecore_chain pbl;
583         u64 p_phys_addr_tbl;
584         u32 wqe_prod;     /* WQE prod index in HW ring */
585         u32 sge_prod;     /* SGE prod index in HW ring */
586         u32 wr_prod_cnt; /* wr producer count */
587         u32 wr_cons_cnt; /* wr consumer count */
588         u32 num_elems;
589 
590         u32 *virt_prod_pair_addr; /* producer pair virtual address */
591         dma_addr_t phy_prod_pair_addr; /* producer pair physical address */
592 };
593 
594 struct qlnxr_srq {
595         struct ib_srq ibsrq;
596         struct qlnxr_dev *dev;
597         /* relevant to cqs created from user space only (applications) */
598         struct qlnxr_userq       usrq;
599         struct qlnxr_srq_hwq_info hw_srq;
600         struct ib_umem *prod_umem;
601         u16 srq_id;
602         /* lock to protect srq recv post */
603         spinlock_t lock;
604 };
605 
606 enum qlnxr_qp_err_bitmap {
607         QLNXR_QP_ERR_SQ_FULL     = 1 << 0,
608         QLNXR_QP_ERR_RQ_FULL     = 1 << 1,
609         QLNXR_QP_ERR_BAD_SR      = 1 << 2,
610         QLNXR_QP_ERR_BAD_RR      = 1 << 3,
611         QLNXR_QP_ERR_SQ_PBL_FULL = 1 << 4,
612         QLNXR_QP_ERR_RQ_PBL_FULL = 1 << 5,
613 };
614 
615 struct mr_info {
616         struct qlnxr_pbl *pbl_table;
617         struct qlnxr_pbl_info pbl_info;
618         struct list_head free_pbl_list;
619         struct list_head inuse_pbl_list;
620         u32 completed;
621         u32 completed_handled;
622 };
623 
624 struct qlnxr_qp {
625         struct ib_qp ibqp;              /* must be first */
626         struct qlnxr_dev *dev;
627         struct qlnxr_iw_ep *ep;
628         struct qlnxr_qp_hwq_info sq;
629         struct qlnxr_qp_hwq_info rq;
630 
631         u32 max_inline_data;
632 
633         spinlock_t q_lock ____cacheline_aligned;
634 
635         struct qlnxr_cq *sq_cq;
636         struct qlnxr_cq *rq_cq;
637         struct qlnxr_srq *srq;
638         enum ecore_roce_qp_state state;   /*  QP state */
639         u32 id;
640         struct qlnxr_pd *pd;
641         enum ib_qp_type qp_type;
642         struct ecore_rdma_qp *ecore_qp;
643         u32 qp_id;
644         u16 icid;
645         u16 mtu;
646         int sgid_idx;
647         u32 rq_psn;
648         u32 sq_psn;
649         u32 qkey;
650         u32 dest_qp_num;
651         u32 sig;                /* unique siganture to identify valid QP */
652 
653         /* relevant to qps created from kernel space only (ULPs) */
654         u8 prev_wqe_size;
655         u16 wqe_cons;
656         u32 err_bitmap;
657         bool signaled;
658         /* SQ shadow */
659         struct {
660                 u64 wr_id;
661                 enum ib_wc_opcode opcode;
662                 u32 bytes_len;
663                 u8 wqe_size;
664                 bool  signaled;
665                 dma_addr_t icrc_mapping;
666                 u32 *icrc;
667                 struct qlnxr_mr *mr;
668         } *wqe_wr_id;
669 
670         /* RQ shadow */
671         struct {
672                 u64 wr_id;
673                 struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
674                 uint8_t wqe_size;
675 
676                 /* for GSI only */
677                 u8 smac[ETH_ALEN];
678                 u16 vlan_id;
679                 int rc;
680         } *rqe_wr_id;
681 
682         /* relevant to qps created from user space only (applications) */
683         struct qlnxr_userq usq;
684         struct qlnxr_userq urq;
685         atomic_t refcnt;
686 	bool destroyed;
687 };
688 
689 enum qlnxr_mr_type {
690         QLNXR_MR_USER,
691         QLNXR_MR_KERNEL,
692         QLNXR_MR_DMA,
693         QLNXR_MR_FRMR
694 };
695 
696 struct qlnxr_mr {
697         struct ib_mr    ibmr;
698         struct ib_umem  *umem;
699 
700         struct ecore_rdma_register_tid_in_params hw_mr;
701         enum qlnxr_mr_type type;
702 
703         struct qlnxr_dev *dev;
704         struct mr_info info;
705 
706         u64 *pages;
707         u32 npages;
708 
709 	u64 *iova_start; /* valid only for kernel_mr */
710 };
711 
712 struct qlnxr_mm {
713         struct {
714                 u64 phy_addr;
715                 unsigned long len;
716         } key;
717         struct list_head entry;
718 };
719 
720 struct qlnxr_iw_listener {
721         struct qlnxr_dev *dev;
722         struct iw_cm_id *cm_id;
723         int backlog;
724         void *ecore_handle;
725 };
726 
727 struct qlnxr_iw_ep {
728         struct qlnxr_dev *dev;
729         struct iw_cm_id *cm_id;
730         struct qlnxr_qp *qp;
731         void *ecore_context;
732 	u8 during_connect;
733 };
734 
735 static inline void
736 qlnxr_inc_sw_cons(struct qlnxr_qp_hwq_info *info)
737 {
738         info->cons = (info->cons + 1) % info->max_wr;
739         info->wqe_cons++;
740 }
741 
742 static inline void
743 qlnxr_inc_sw_prod(struct qlnxr_qp_hwq_info *info)
744 {
745         info->prod = (info->prod + 1) % info->max_wr;
746 }
747 
748 static inline struct qlnxr_dev *
749 get_qlnxr_dev(struct ib_device *ibdev)
750 {
751         return container_of(ibdev, struct qlnxr_dev, ibdev);
752 }
753 
754 static inline struct qlnxr_ucontext *
755 get_qlnxr_ucontext(struct ib_ucontext *ibucontext)
756 {
757         return container_of(ibucontext, struct qlnxr_ucontext, ibucontext);
758 }
759 
760 static inline struct qlnxr_pd *
761 get_qlnxr_pd(struct ib_pd *ibpd)
762 {
763         return container_of(ibpd, struct qlnxr_pd, ibpd);
764 }
765 
766 static inline struct qlnxr_cq *
767 get_qlnxr_cq(struct ib_cq *ibcq)
768 {
769         return container_of(ibcq, struct qlnxr_cq, ibcq);
770 }
771 
772 static inline struct qlnxr_qp *
773 get_qlnxr_qp(struct ib_qp *ibqp)
774 {
775         return container_of(ibqp, struct qlnxr_qp, ibqp);
776 }
777 
778 static inline struct qlnxr_mr *
779 get_qlnxr_mr(struct ib_mr *ibmr)
780 {
781         return container_of(ibmr, struct qlnxr_mr, ibmr);
782 }
783 
784 static inline struct qlnxr_ah *
785 get_qlnxr_ah(struct ib_ah *ibah)
786 {
787         return container_of(ibah, struct qlnxr_ah, ibah);
788 }
789 
790 static inline struct qlnxr_srq *
791 get_qlnxr_srq(struct ib_srq *ibsrq)
792 {
793         return container_of(ibsrq, struct qlnxr_srq, ibsrq);
794 }
795 
796 static inline bool qlnxr_qp_has_srq(struct qlnxr_qp *qp)
797 {
798         return !!qp->srq;
799 }
800 
801 static inline bool qlnxr_qp_has_sq(struct qlnxr_qp *qp)
802 {
803         if (qp->qp_type == IB_QPT_GSI)
804                 return 0;
805 
806         return 1;
807 }
808 
809 static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
810 {
811         if (qp->qp_type == IB_QPT_GSI || qlnxr_qp_has_srq(qp))
812                 return 0;
813 
814         return 1;
815 }
816 
817 #define SET_FIELD2(value, name, flag)                          \
818         do {                                                   \
819                 (value) |= ((flag) << (name ## _SHIFT));       \
820         } while (0)
821 
822 #define QLNXR_RESP_IMM	(RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
823                          RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
824 #define QLNXR_RESP_RDMA	(RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
825                          RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
826 #define QLNXR_RESP_INV  (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
827                          RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
828 
829 #define QLNXR_RESP_RDMA_IMM (QLNXR_RESP_IMM | QLNXR_RESP_RDMA)
830 
831 static inline int
832 qlnxr_get_dmac(struct qlnxr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr)
833 {
834 #ifdef DEFINE_NO_IP_BASED_GIDS
835         u8 *guid = &ah_attr->grh.dgid.raw[8]; /* GID's 64 MSBs are the GUID */
836 #endif
837         union ib_gid zero_sgid = { { 0 } };
838         struct in6_addr in6;
839 
840         if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
841                 memset(mac_addr, 0x00, ETH_ALEN);
842                 return -EINVAL;
843         }
844 
845         memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
846 
847 #ifdef DEFINE_NO_IP_BASED_GIDS
848         /* get the MAC address from the GUID i.e. EUI-64 to MAC address */
849         mac_addr[0] = guid[0] ^ 2; /* toggle the local/universal bit to local */
850         mac_addr[1] = guid[1];
851         mac_addr[2] = guid[2];
852         mac_addr[3] = guid[5];
853         mac_addr[4] = guid[6];
854         mac_addr[5] = guid[7];
855 #else
856         memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
857 #endif
858         return 0;
859 }
860 
861 extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
862                 uint8_t *new_mac_address);
863 
864 #define QLNXR_ROCE_PKEY_MAX 1
865 #define QLNXR_ROCE_PKEY_TABLE_LEN 1
866 #define QLNXR_ROCE_PKEY_DEFAULT 0xffff
867 
868 #define QLNX_IS_IWARP(rdev)	IS_IWARP(ECORE_LEADING_HWFN(rdev->cdev))
869 #define QLNX_IS_ROCE(rdev)	IS_ROCE(ECORE_LEADING_HWFN(rdev->cdev))
870 
871 #define MAX_RXMIT_CONNS		16
872 
873 #endif /* #ifndef __QLNX_DEF_H_ */
874