xref: /freebsd/sys/dev/qlnx/qlnxr/qlnxr_verbs.c (revision 66df505066f51e6d8411b966765d828817f88971)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnxr_verbs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "qlnxr_def.h"
35 #include "rdma_common.h"
36 #include "qlnxr_roce.h"
37 #include "qlnxr_cm.h"
38 
39 #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
40 
41 #define TYPEPTR_ADDR_SET(type_ptr, field, vaddr)			\
42 	do {								\
43 		(type_ptr)->field.hi = cpu_to_le32(upper_32_bits(vaddr));\
44 		(type_ptr)->field.lo = cpu_to_le32(lower_32_bits(vaddr));\
45 	} while (0)
46 
47 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
48 	do {							\
49 		TYPEPTR_ADDR_SET(sge, addr, vaddr);		\
50 		(sge)->length = cpu_to_le32(vlength);		\
51 		(sge)->flags = cpu_to_le32(vflags);		\
52 	} while (0)
53 
54 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
55 	do {							\
56 		TYPEPTR_ADDR_SET(hdr, wr_id, vwr_id);		\
57 		(hdr)->num_sges = num_sge;			\
58 	} while (0)
59 
60 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
61 	do {							\
62 		TYPEPTR_ADDR_SET(sge, addr, vaddr);		\
63 		(sge)->length = cpu_to_le32(vlength);		\
64 		(sge)->l_key = cpu_to_le32(vlkey);		\
65 	} while (0)
66 
67 #define NIPQUAD(addr) \
68 	((unsigned char *)&addr)[0], \
69 	((unsigned char *)&addr)[1], \
70 	((unsigned char *)&addr)[2], \
71 	((unsigned char *)&addr)[3]
72 
73 static int
74 qlnxr_check_srq_params(struct qlnxr_dev *dev,
75 	struct ib_srq_init_attr *attrs);
76 
77 static int
78 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
79 	struct qlnxr_srq *srq,
80 	struct qlnxr_create_srq_ureq *ureq,
81 	int access, int dmasync);
82 
83 static int
84 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
85 	struct qlnxr_dev *dev,
86 	struct ib_srq_init_attr *init_attr);
87 
88 static int
89 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
90 	struct qlnxr_srq *srq,
91 	struct ib_udata *udata);
92 
93 static void
94 qlnxr_free_srq_user_params(struct qlnxr_srq *srq);
95 
96 static void
97 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq);
98 
99 static u32
100 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq);
101 
102 int
103 qlnxr_iw_query_gid(struct ib_device *ibdev, u8 port, int index,
104 	union ib_gid *sgid)
105 {
106 	struct qlnxr_dev	*dev;
107 	qlnx_host_t		*ha;
108 
109 	dev = get_qlnxr_dev(ibdev);
110 	ha = dev->ha;
111 
112 	QL_DPRINT12(ha, "enter\n");
113 
114 	memset(sgid->raw, 0, sizeof(sgid->raw));
115 
116 	memcpy(sgid->raw, dev->ha->primary_mac, sizeof (dev->ha->primary_mac));
117 
118 	QL_DPRINT12(ha, "exit\n");
119 
120 	return 0;
121 }
122 
123 int
124 qlnxr_query_gid(struct ib_device *ibdev, u8 port, int index,
125 	union ib_gid *sgid)
126 {
127 	struct qlnxr_dev	*dev;
128 	qlnx_host_t		*ha;
129 
130 	dev = get_qlnxr_dev(ibdev);
131 	ha = dev->ha;
132 	QL_DPRINT12(ha, "enter index: %d\n", index);
133 #if 0
134 	int ret = 0;
135 	/* @@@: if DEFINE_ROCE_GID_TABLE to be used here */
136 	//if (!rdma_cap_roce_gid_table(ibdev, port)) {
137 	if (!(rdma_protocol_roce(ibdev, port) &&
138 		ibdev->add_gid && ibdev->del_gid)) {
139 		QL_DPRINT11(ha, "acquire gid failed\n");
140 		return -ENODEV;
141 	}
142 
143 	ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
144 	if (ret == -EAGAIN) {
145 		memcpy(sgid, &zgid, sizeof(*sgid));
146 		return 0;
147 	}
148 #endif
149 	if ((index >= QLNXR_MAX_SGID) || (index < 0)) {
150 		QL_DPRINT12(ha, "invalid gid index %d\n", index);
151 		memset(sgid, 0, sizeof(*sgid));
152 		return -EINVAL;
153 	}
154 	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
155 
156 	QL_DPRINT12(ha, "exit : %p\n", sgid);
157 
158 	return 0;
159 }
160 
161 int
162 qlnxr_create_srq(struct ib_srq *ibsrq,
163 		 struct ib_srq_init_attr *init_attr,
164 		 struct ib_udata *udata)
165 {
166 	struct qlnxr_dev	*dev;
167 	qlnx_host_t		*ha;
168 	struct ecore_rdma_destroy_srq_in_params destroy_in_params;
169 	struct ecore_rdma_create_srq_out_params out_params;
170 	struct ecore_rdma_create_srq_in_params in_params;
171 	u64 pbl_base_addr, phy_prod_pair_addr;
172 	struct qlnxr_srq_hwq_info *hw_srq;
173 	struct qlnxr_ucontext *ctx;
174 	struct qlnxr_create_srq_ureq ureq;
175 	u32 page_cnt, page_size;
176 	struct qlnxr_srq *srq = get_qlnxr_srq(ibsrq);
177 	int ret = 0;
178 
179 	dev = get_qlnxr_dev(ibsrq->device);
180 	ha = dev->ha;
181 
182 	QL_DPRINT12(ha, "enter\n");
183 
184 	ret = qlnxr_check_srq_params(dev, init_attr);
185 
186 	srq->dev = dev;
187 	hw_srq = &srq->hw_srq;
188 	spin_lock_init(&srq->lock);
189 	memset(&in_params, 0, sizeof(in_params));
190 
191 	if (udata) {
192 		ctx = rdma_udata_to_drv_context(
193 		    udata, struct qlnxr_ucontext, ibucontext);
194 
195 		memset(&ureq, 0, sizeof(ureq));
196 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
197 			udata->inlen))) {
198 			QL_DPRINT11(ha, "problem"
199 				" copying data from user space\n");
200 			goto err0;
201 		}
202 
203 		ret = qlnxr_init_srq_user_params(&ctx->ibucontext, srq, &ureq, 0, 0);
204 		if (ret)
205 			goto err0;
206 
207 		page_cnt = srq->usrq.pbl_info.num_pbes;
208 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
209 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
210 		// @@@ : if DEFINE_IB_UMEM_PAGE_SHIFT
211 		// page_size = BIT(srq->usrq.umem->page_shift);
212 		// else
213 		page_size = srq->usrq.umem->page_size;
214 	} else {
215 		struct ecore_chain *pbl;
216 		ret = qlnxr_alloc_srq_kernel_params(srq, dev, init_attr);
217 		if (ret)
218 			goto err0;
219 		pbl = &hw_srq->pbl;
220 
221 		page_cnt = ecore_chain_get_page_cnt(pbl);
222 		pbl_base_addr = ecore_chain_get_pbl_phys(pbl);
223 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
224 		page_size = pbl->elem_per_page << 4;
225 	}
226 
227 	in_params.pd_id = get_qlnxr_pd(ibsrq->pd)->pd_id;
228 	in_params.pbl_base_addr = pbl_base_addr;
229 	in_params.prod_pair_addr = phy_prod_pair_addr;
230 	in_params.num_pages = page_cnt;
231 	in_params.page_size = page_size;
232 
233 	ret = ecore_rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
234 	if (ret)
235 		goto err1;
236 
237 	srq->srq_id = out_params.srq_id;
238 
239 	if (udata) {
240 		ret = qlnxr_copy_srq_uresp(dev, srq, udata);
241 		if (ret)
242 			goto err2;
243 	}
244 
245 	QL_DPRINT12(ha, "created srq with srq_id = 0x%0x\n", srq->srq_id);
246 	return (0);
247 err2:
248 	memset(&in_params, 0, sizeof(in_params));
249 	destroy_in_params.srq_id = srq->srq_id;
250 	ecore_rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
251 
252 err1:
253 	if (udata)
254 		qlnxr_free_srq_user_params(srq);
255 	else
256 		qlnxr_free_srq_kernel_params(srq);
257 
258 err0:
259 	return (-EFAULT);
260 }
261 
262 void
263 qlnxr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
264 {
265 	struct qlnxr_dev	*dev;
266 	struct qlnxr_srq	*srq;
267 	qlnx_host_t		*ha;
268 	struct ecore_rdma_destroy_srq_in_params in_params;
269 
270 	srq = get_qlnxr_srq(ibsrq);
271 	dev = srq->dev;
272 	ha = dev->ha;
273 
274 	memset(&in_params, 0, sizeof(in_params));
275 	in_params.srq_id = srq->srq_id;
276 
277 	ecore_rdma_destroy_srq(dev->rdma_ctx, &in_params);
278 
279 	if (ibsrq->pd->uobject && ibsrq->pd->uobject->context)
280 		qlnxr_free_srq_user_params(srq);
281 	else
282 		qlnxr_free_srq_kernel_params(srq);
283 
284 	QL_DPRINT12(ha, "destroyed srq_id=0x%0x\n", srq->srq_id);
285 }
286 
287 int
288 qlnxr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
289 	enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
290 {
291 	struct qlnxr_dev	*dev;
292 	struct qlnxr_srq	*srq;
293 	qlnx_host_t		*ha;
294 	struct ecore_rdma_modify_srq_in_params in_params;
295 	int ret = 0;
296 
297 	srq = get_qlnxr_srq(ibsrq);
298 	dev = srq->dev;
299 	ha = dev->ha;
300 
301 	QL_DPRINT12(ha, "enter\n");
302 	if (attr_mask & IB_SRQ_MAX_WR) {
303 		QL_DPRINT12(ha, "invalid attribute mask=0x%x"
304 			" specified for %p\n", attr_mask, srq);
305 		return -EINVAL;
306 	}
307 
308 	if (attr_mask & IB_SRQ_LIMIT) {
309 		if (attr->srq_limit >= srq->hw_srq.max_wr) {
310 			QL_DPRINT12(ha, "invalid srq_limit=0x%x"
311 				" (max_srq_limit = 0x%x)\n",
312 			       attr->srq_limit, srq->hw_srq.max_wr);
313 			return -EINVAL;
314 		}
315 		memset(&in_params, 0, sizeof(in_params));
316 		in_params.srq_id = srq->srq_id;
317 		in_params.wqe_limit = attr->srq_limit;
318 		ret = ecore_rdma_modify_srq(dev->rdma_ctx, &in_params);
319 		if (ret)
320 			return ret;
321 	}
322 
323 	QL_DPRINT12(ha, "modified srq with srq_id = 0x%0x\n", srq->srq_id);
324 	return 0;
325 }
326 
327 int
328 qlnxr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
329 {
330 	struct qlnxr_dev	*dev;
331 	struct qlnxr_srq	*srq;
332 	qlnx_host_t		*ha;
333 	struct ecore_rdma_device *qattr;
334 	srq = get_qlnxr_srq(ibsrq);
335 	dev = srq->dev;
336 	ha = dev->ha;
337 	//qattr = &dev->attr;
338 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
339 	QL_DPRINT12(ha, "enter\n");
340 
341 	if (!dev->rdma_ctx) {
342 		QL_DPRINT12(ha, "called with invalid params"
343 			" rdma_ctx is NULL\n");
344 		return -EINVAL;
345 	}
346 
347 	srq_attr->srq_limit = qattr->max_srq;
348 	srq_attr->max_wr = qattr->max_srq_wr;
349 	srq_attr->max_sge = qattr->max_sge;
350 
351 	QL_DPRINT12(ha, "exit\n");
352 	return 0;
353 }
354 
355 /* Increment srq wr producer by one */
356 static
357 void qlnxr_inc_srq_wr_prod (struct qlnxr_srq_hwq_info *info)
358 {
359 	info->wr_prod_cnt++;
360 }
361 
362 /* Increment srq wr consumer by one */
363 static
364 void qlnxr_inc_srq_wr_cons(struct qlnxr_srq_hwq_info *info)
365 {
366         info->wr_cons_cnt++;
367 }
368 
369 /* get_port_immutable verb is not available in FreeBSD */
370 #if 0
371 int
372 qlnxr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
373 	struct ib_port_immutable *immutable)
374 {
375 	struct qlnxr_dev                *dev;
376 	qlnx_host_t                     *ha;
377 	dev = get_qlnxr_dev(ibdev);
378 	ha = dev->ha;
379 
380 	QL_DPRINT12(ha, "entered but not implemented!!!\n");
381 }
382 #endif
383 
384 int
385 qlnxr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
386 	const struct ib_recv_wr **bad_wr)
387 {
388 	struct qlnxr_dev	*dev;
389 	struct qlnxr_srq	*srq;
390 	qlnx_host_t		*ha;
391 	struct qlnxr_srq_hwq_info *hw_srq;
392 	struct ecore_chain *pbl;
393 	unsigned long flags;
394 	int status = 0;
395 	u32 num_sge, offset;
396 
397 	srq = get_qlnxr_srq(ibsrq);
398 	dev = srq->dev;
399 	ha = dev->ha;
400 	hw_srq = &srq->hw_srq;
401 
402 	QL_DPRINT12(ha, "enter\n");
403 	spin_lock_irqsave(&srq->lock, flags);
404 
405 	pbl = &srq->hw_srq.pbl;
406 	while (wr) {
407 		struct rdma_srq_wqe_header *hdr;
408 		int i;
409 
410 		if (!qlnxr_srq_elem_left(hw_srq) ||
411 		    wr->num_sge > srq->hw_srq.max_sges) {
412 			QL_DPRINT11(ha, "WR cannot be posted"
413 			    " (%d, %d) || (%d > %d)\n",
414 			    hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
415 			    wr->num_sge, srq->hw_srq.max_sges);
416 			status = -ENOMEM;
417 			*bad_wr = wr;
418 			break;
419 		}
420 
421 		hdr = ecore_chain_produce(pbl);
422 		num_sge = wr->num_sge;
423 		/* Set number of sge and WR id in header */
424 		SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
425 
426                 /* PBL is maintained in case of WR granularity.
427                  * So increment WR producer in case we post a WR.
428                  */
429 		qlnxr_inc_srq_wr_prod(hw_srq);
430 		hw_srq->wqe_prod++;
431 		hw_srq->sge_prod++;
432 
433 		QL_DPRINT12(ha, "SRQ WR : SGEs: %d with wr_id[%d] = %llx\n",
434 			wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
435 
436 		for (i = 0; i < wr->num_sge; i++) {
437 			struct rdma_srq_sge *srq_sge =
438 			    ecore_chain_produce(pbl);
439 			/* Set SGE length, lkey and address */
440 			SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
441 				wr->sg_list[i].length, wr->sg_list[i].lkey);
442 
443 			QL_DPRINT12(ha, "[%d]: len %d, key %x, addr %x:%x\n",
444 				i, srq_sge->length, srq_sge->l_key,
445 				srq_sge->addr.hi, srq_sge->addr.lo);
446 			hw_srq->sge_prod++;
447 		}
448 		wmb();
449 		/*
450 		 * SRQ prod is 8 bytes. Need to update SGE prod in index
451 		 * in first 4 bytes and need to update WQE prod in next
452 		 * 4 bytes.
453 		 */
454 		*(srq->hw_srq.virt_prod_pair_addr) = hw_srq->sge_prod;
455 		offset = offsetof(struct rdma_srq_producers, wqe_prod);
456 		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
457 			hw_srq->wqe_prod;
458 		/* Flush prod after updating it */
459 		wmb();
460 		wr = wr->next;
461 	}
462 
463 	QL_DPRINT12(ha, "Elements in SRQ: %d\n",
464 		ecore_chain_get_elem_left(pbl));
465 
466 	spin_unlock_irqrestore(&srq->lock, flags);
467 	QL_DPRINT12(ha, "exit\n");
468 	return status;
469 }
470 
471 int
472 #if __FreeBSD_version < 1102000
473 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
474 #else
475 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
476 	struct ib_udata *udata)
477 #endif /* #if __FreeBSD_version < 1102000 */
478 
479 {
480 	struct qlnxr_dev		*dev;
481 	struct ecore_rdma_device	*qattr;
482 	qlnx_host_t			*ha;
483 
484 	dev = get_qlnxr_dev(ibdev);
485 	ha = dev->ha;
486 
487 	QL_DPRINT12(ha, "enter\n");
488 
489 #if __FreeBSD_version > 1102000
490 	if (udata->inlen || udata->outlen)
491 		return -EINVAL;
492 #endif /* #if __FreeBSD_version > 1102000 */
493 
494 	if (dev->rdma_ctx == NULL) {
495 		return -EINVAL;
496 	}
497 
498 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
499 
500 	memset(attr, 0, sizeof *attr);
501 
502 	attr->fw_ver = qattr->fw_ver;
503 	attr->sys_image_guid = qattr->sys_image_guid;
504 	attr->max_mr_size = qattr->max_mr_size;
505 	attr->page_size_cap = qattr->page_size_caps;
506 	attr->vendor_id = qattr->vendor_id;
507 	attr->vendor_part_id = qattr->vendor_part_id;
508 	attr->hw_ver = qattr->hw_ver;
509 	attr->max_qp = qattr->max_qp;
510 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
511 					IB_DEVICE_RC_RNR_NAK_GEN |
512 					IB_DEVICE_LOCAL_DMA_LKEY |
513 					IB_DEVICE_MEM_MGT_EXTENSIONS;
514 
515 	attr->max_sge = qattr->max_sge;
516 	attr->max_sge_rd = qattr->max_sge;
517 	attr->max_cq = qattr->max_cq;
518 	attr->max_cqe = qattr->max_cqe;
519 	attr->max_mr = qattr->max_mr;
520 	attr->max_mw = qattr->max_mw;
521 	attr->max_pd = qattr->max_pd;
522 	attr->atomic_cap = dev->atomic_cap;
523 	attr->max_fmr = qattr->max_fmr;
524 	attr->max_map_per_fmr = 16; /* TBD: FMR */
525 
526 	/* There is an implicit assumption in some of the ib_xxx apps that the
527 	 * qp_rd_atom is smaller than the qp_init_rd_atom. Specifically, in
528 	 * communication the qp_rd_atom is passed to the other side and used as
529 	 * init_rd_atom without check device capabilities for init_rd_atom.
530 	 * for this reason, we set the qp_rd_atom to be the minimum between the
531 	 * two...There is an additional assumption in mlx4 driver that the
532 	 * values are power of two, fls is performed on the value - 1, which
533 	 * in fact gives a larger power of two for values which are not a power
534 	 * of two. This should be fixed in mlx4 driver, but until then ->
535 	 * we provide a value that is a power of two in our code.
536 	 */
537 	attr->max_qp_init_rd_atom =
538 		1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
539 	attr->max_qp_rd_atom =
540 		min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
541 		    attr->max_qp_init_rd_atom);
542 
543 	attr->max_srq = qattr->max_srq;
544 	attr->max_srq_sge = qattr->max_srq_sge;
545 	attr->max_srq_wr = qattr->max_srq_wr;
546 
547 	/* TODO: R&D to more properly configure the following */
548 	attr->local_ca_ack_delay = qattr->dev_ack_delay;
549 	attr->max_fast_reg_page_list_len = qattr->max_mr/8;
550 	attr->max_pkeys = QLNXR_ROCE_PKEY_MAX;
551 	attr->max_ah = qattr->max_ah;
552 
553 	QL_DPRINT12(ha, "exit\n");
554 	return 0;
555 }
556 
557 static inline void
558 get_link_speed_and_width(int speed, uint8_t *ib_speed, uint8_t *ib_width)
559 {
560 	switch (speed) {
561 	case 1000:
562 		*ib_speed = IB_SPEED_SDR;
563 		*ib_width = IB_WIDTH_1X;
564 		break;
565 	case 10000:
566 		*ib_speed = IB_SPEED_QDR;
567 		*ib_width = IB_WIDTH_1X;
568 		break;
569 
570 	case 20000:
571 		*ib_speed = IB_SPEED_DDR;
572 		*ib_width = IB_WIDTH_4X;
573 		break;
574 
575 	case 25000:
576 		*ib_speed = IB_SPEED_EDR;
577 		*ib_width = IB_WIDTH_1X;
578 		break;
579 
580 	case 40000:
581 		*ib_speed = IB_SPEED_QDR;
582 		*ib_width = IB_WIDTH_4X;
583 		break;
584 
585 	case 50000:
586 		*ib_speed = IB_SPEED_QDR;
587 		*ib_width = IB_WIDTH_4X; // TODO doesn't add up to 50...
588 		break;
589 
590 	case 100000:
591 		*ib_speed = IB_SPEED_EDR;
592 		*ib_width = IB_WIDTH_4X;
593 		break;
594 
595 	default:
596 		/* Unsupported */
597 		*ib_speed = IB_SPEED_SDR;
598 		*ib_width = IB_WIDTH_1X;
599 	}
600 	return;
601 }
602 
603 int
604 qlnxr_query_port(struct ib_device *ibdev, uint8_t port,
605 	struct ib_port_attr *attr)
606 {
607 	struct qlnxr_dev	*dev;
608 	struct ecore_rdma_port	*rdma_port;
609 	qlnx_host_t		*ha;
610 
611 	dev = get_qlnxr_dev(ibdev);
612 	ha = dev->ha;
613 
614 	QL_DPRINT12(ha, "enter\n");
615 
616 	if (port > 1) {
617 		QL_DPRINT12(ha, "port [%d] > 1 \n", port);
618 		return -EINVAL;
619 	}
620 
621 	if (dev->rdma_ctx == NULL) {
622 		QL_DPRINT12(ha, "rdma_ctx == NULL\n");
623 		return -EINVAL;
624 	}
625 
626 	rdma_port = ecore_rdma_query_port(dev->rdma_ctx);
627 	memset(attr, 0, sizeof *attr);
628 
629 	if (rdma_port->port_state == ECORE_RDMA_PORT_UP) {
630 		attr->state = IB_PORT_ACTIVE;
631 		attr->phys_state = 5;
632 	} else {
633 		attr->state = IB_PORT_DOWN;
634 		attr->phys_state = 3;
635 	}
636 
637 	attr->max_mtu = IB_MTU_4096;
638 	attr->active_mtu = iboe_get_mtu(dev->ha->ifp->if_mtu);
639 	attr->lid = 0;
640 	attr->lmc = 0;
641 	attr->sm_lid = 0;
642 	attr->sm_sl = 0;
643 	attr->port_cap_flags = 0;
644 
645 	if (QLNX_IS_IWARP(dev)) {
646 		attr->gid_tbl_len = 1;
647 		attr->pkey_tbl_len = 1;
648 	} else {
649 		attr->gid_tbl_len = QLNXR_MAX_SGID;
650 		attr->pkey_tbl_len = QLNXR_ROCE_PKEY_TABLE_LEN;
651 	}
652 
653 	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
654 	attr->qkey_viol_cntr = 0;
655 
656 	get_link_speed_and_width(rdma_port->link_speed,
657 				 &attr->active_speed, &attr->active_width);
658 
659 	attr->max_msg_sz = rdma_port->max_msg_size;
660 	attr->max_vl_num = 4; /* TODO -> figure this one out... */
661 
662 	QL_DPRINT12(ha, "state = %d phys_state = %d "
663 		" link_speed = %d active_speed = %d active_width = %d"
664 		" attr->gid_tbl_len = %d attr->pkey_tbl_len = %d"
665 		" max_msg_sz = 0x%x max_vl_num = 0x%x \n",
666 		attr->state, attr->phys_state,
667 		rdma_port->link_speed, attr->active_speed,
668 		attr->active_width, attr->gid_tbl_len, attr->pkey_tbl_len,
669 		attr->max_msg_sz, attr->max_vl_num);
670 
671 	QL_DPRINT12(ha, "exit\n");
672 	return 0;
673 }
674 
675 int
676 qlnxr_modify_port(struct ib_device *ibdev, uint8_t port, int mask,
677 	struct ib_port_modify *props)
678 {
679 	struct qlnxr_dev	*dev;
680 	qlnx_host_t		*ha;
681 
682 	dev = get_qlnxr_dev(ibdev);
683 	ha = dev->ha;
684 
685 	QL_DPRINT12(ha, "enter\n");
686 
687 	if (port > 1) {
688 		QL_DPRINT12(ha, "port (%d) > 1\n", port);
689 		return -EINVAL;
690 	}
691 
692 	QL_DPRINT12(ha, "exit\n");
693 	return 0;
694 }
695 
696 enum rdma_link_layer
697 qlnxr_link_layer(struct ib_device *ibdev, uint8_t port_num)
698 {
699 	struct qlnxr_dev	*dev;
700 	qlnx_host_t		*ha;
701 
702 	dev = get_qlnxr_dev(ibdev);
703 	ha = dev->ha;
704 
705 	QL_DPRINT12(ha, "ibdev = %p port_num = 0x%x\n", ibdev, port_num);
706 
707         return IB_LINK_LAYER_ETHERNET;
708 }
709 
710 int
711 qlnxr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
712 {
713 	struct ib_device *ibdev = ibpd->device;
714 	struct qlnxr_pd		*pd = get_qlnxr_pd(ibpd);
715 	u16			pd_id;
716 	int			rc;
717 	struct qlnxr_dev	*dev;
718 	qlnx_host_t		*ha;
719 
720 	dev = get_qlnxr_dev(ibdev);
721 	ha = dev->ha;
722 
723 	QL_DPRINT12(ha, "ibdev = %p udata = %p enter\n", ibdev, udata);
724 
725 	if (dev->rdma_ctx == NULL) {
726 		QL_DPRINT11(ha, "dev->rdma_ctx = NULL\n");
727 		rc = -1;
728 		goto err;
729 	}
730 
731 	rc = ecore_rdma_alloc_pd(dev->rdma_ctx, &pd_id);
732 	if (rc)	{
733 		QL_DPRINT11(ha, "ecore_rdma_alloc_pd failed\n");
734 		goto err;
735 	}
736 
737 	pd->pd_id = pd_id;
738 
739 	if (udata) {
740 		rc = ib_copy_to_udata(udata, &pd->pd_id, sizeof(pd->pd_id));
741 		if (rc) {
742 			QL_DPRINT11(ha, "ib_copy_to_udata failed\n");
743 			ecore_rdma_free_pd(dev->rdma_ctx, pd_id);
744 			goto err;
745 		}
746 
747 		pd->uctx = rdma_udata_to_drv_context(
748 		    udata, struct qlnxr_ucontext, ibucontext);
749 		pd->uctx->pd = pd;
750 	}
751 
752 	atomic_add_rel_32(&dev->pd_count, 1);
753 	QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
754 		pd, pd_id, dev->pd_count);
755 
756 	return (0);
757 
758 err:
759 	QL_DPRINT12(ha, "exit -1\n");
760 	return (rc);
761 }
762 
763 void
764 qlnxr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
765 {
766 	struct qlnxr_pd		*pd;
767 	struct qlnxr_dev	*dev;
768 	qlnx_host_t		*ha;
769 
770 	pd = get_qlnxr_pd(ibpd);
771 	dev = get_qlnxr_dev((ibpd->device));
772 	ha = dev->ha;
773 
774 	QL_DPRINT12(ha, "enter\n");
775 
776 	if (pd == NULL) {
777 		QL_DPRINT11(ha, "pd = NULL\n");
778 	} else {
779 		ecore_rdma_free_pd(dev->rdma_ctx, pd->pd_id);
780 		atomic_subtract_rel_32(&dev->pd_count, 1);
781 		QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
782 			pd, pd->pd_id, dev->pd_count);
783 	}
784 
785 	QL_DPRINT12(ha, "exit\n");
786 }
787 
788 #define ROCE_WQE_ELEM_SIZE	sizeof(struct rdma_sq_sge)
789 #define	RDMA_MAX_SGE_PER_SRQ	(4) /* Should be part of HSI */
790 /* Should be part of HSI */
791 #define RDMA_MAX_SRQ_WQE_SIZE	(RDMA_MAX_SGE_PER_SRQ + 1) /* +1 for header */
792 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
793 
794 static void qlnxr_cleanup_user(struct qlnxr_dev *, struct qlnxr_qp *);
795 static void qlnxr_cleanup_kernel(struct qlnxr_dev *, struct qlnxr_qp *);
796 
797 int
798 qlnxr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
799 {
800 	struct qlnxr_dev	*dev;
801 	qlnx_host_t		*ha;
802 
803 	dev = get_qlnxr_dev(ibdev);
804 	ha = dev->ha;
805 
806 	QL_DPRINT12(ha, "enter index = 0x%x\n", index);
807 
808 	if (index > QLNXR_ROCE_PKEY_TABLE_LEN)
809 		return -EINVAL;
810 
811 	*pkey = QLNXR_ROCE_PKEY_DEFAULT;
812 
813 	QL_DPRINT12(ha, "exit\n");
814 	return 0;
815 }
816 
817 static inline bool
818 qlnxr_get_vlan_id_qp(qlnx_host_t *ha, struct ib_qp_attr *attr, int attr_mask,
819        u16 *vlan_id)
820 {
821 	bool ret = false;
822 
823 	QL_DPRINT12(ha, "enter \n");
824 
825 	*vlan_id = 0;
826 
827 #if __FreeBSD_version >= 1100000
828 	u16 tmp_vlan_id;
829 
830 #if __FreeBSD_version >= 1102000
831 	union ib_gid *dgid;
832 
833 	dgid = &attr->ah_attr.grh.dgid;
834 	tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
835 
836 	if (!(tmp_vlan_id & ~EVL_VLID_MASK)) {
837 		*vlan_id = tmp_vlan_id;
838 		ret = true;
839 	}
840 #else
841 	tmp_vlan_id = attr->vlan_id;
842 
843 	if ((attr_mask & IB_QP_VID) && (!(tmp_vlan_id & ~EVL_VLID_MASK))) {
844 		*vlan_id = tmp_vlan_id;
845 		ret = true;
846 	}
847 
848 #endif /* #if __FreeBSD_version > 1102000 */
849 
850 #else
851 	ret = true;
852 
853 #endif /* #if __FreeBSD_version >= 1100000 */
854 
855 	QL_DPRINT12(ha, "exit vlan_id = 0x%x ret = %d \n", *vlan_id, ret);
856 
857 	return (ret);
858 }
859 
860 static inline void
861 get_gid_info(struct ib_qp *ibqp, struct ib_qp_attr *attr,
862 	int attr_mask,
863 	struct qlnxr_dev *dev,
864 	struct qlnxr_qp *qp,
865 	struct ecore_rdma_modify_qp_in_params *qp_params)
866 {
867 	int		i;
868 	qlnx_host_t	*ha;
869 
870 	ha = dev->ha;
871 
872 	QL_DPRINT12(ha, "enter\n");
873 
874 	memcpy(&qp_params->sgid.bytes[0],
875 	       &dev->sgid_tbl[qp->sgid_idx].raw[0],
876 	       sizeof(qp_params->sgid.bytes));
877 	memcpy(&qp_params->dgid.bytes[0],
878 	       &attr->ah_attr.grh.dgid.raw[0],
879 	       sizeof(qp_params->dgid));
880 
881 	qlnxr_get_vlan_id_qp(ha, attr, attr_mask, &qp_params->vlan_id);
882 
883 	for (i = 0; i < (sizeof(qp_params->sgid.dwords)/sizeof(uint32_t)); i++) {
884 		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
885 		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
886 	}
887 
888 	QL_DPRINT12(ha, "exit\n");
889 	return;
890 }
891 
892 static int
893 qlnxr_add_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
894 {
895 	struct qlnxr_mm	*mm;
896 	qlnx_host_t	*ha;
897 
898 	ha = uctx->dev->ha;
899 
900 	QL_DPRINT12(ha, "enter\n");
901 
902 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
903 	if (mm == NULL) {
904 		QL_DPRINT11(ha, "mm = NULL\n");
905 		return -ENOMEM;
906 	}
907 
908 	mm->key.phy_addr = phy_addr;
909 
910 	/* This function might be called with a length which is not a multiple
911 	 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
912 	 * forces this granularity by increasing the requested size if needed.
913 	 * When qedr_mmap is called, it will search the list with the updated
914 	 * length as a key. To prevent search failures, the length is rounded up
915 	 * in advance to PAGE_SIZE.
916 	 */
917 	mm->key.len = roundup(len, PAGE_SIZE);
918 	INIT_LIST_HEAD(&mm->entry);
919 
920 	mutex_lock(&uctx->mm_list_lock);
921 	list_add(&mm->entry, &uctx->mm_head);
922 	mutex_unlock(&uctx->mm_list_lock);
923 
924 	QL_DPRINT12(ha, "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
925 		(unsigned long long)mm->key.phy_addr,
926 		(unsigned long)mm->key.len, uctx);
927 
928 	return 0;
929 }
930 
931 static bool
932 qlnxr_search_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
933 {
934 	bool		found = false;
935 	struct qlnxr_mm	*mm;
936 	qlnx_host_t	*ha;
937 
938 	ha = uctx->dev->ha;
939 
940 	QL_DPRINT12(ha, "enter\n");
941 
942 	mutex_lock(&uctx->mm_list_lock);
943 	list_for_each_entry(mm, &uctx->mm_head, entry) {
944 		if (len != mm->key.len || phy_addr != mm->key.phy_addr)
945 			continue;
946 
947 		found = true;
948 		break;
949 	}
950 	mutex_unlock(&uctx->mm_list_lock);
951 
952 	QL_DPRINT12(ha,
953 		"searched for (addr=0x%llx,len=0x%lx) for ctx=%p, found=%d\n",
954 		mm->key.phy_addr, mm->key.len, uctx, found);
955 
956 	return found;
957 }
958 
959 int
960 qlnxr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
961 {
962         int rc;
963         struct qlnxr_ucontext *ctx = get_qlnxr_ucontext(uctx);
964         struct qlnxr_alloc_ucontext_resp uresp;
965         struct qlnxr_dev *dev = get_qlnxr_dev(uctx->device);
966         qlnx_host_t *ha = dev->ha;
967         struct ecore_rdma_add_user_out_params oparams;
968 
969         if (!udata)
970                 return -EFAULT;
971 
972 	rc = ecore_rdma_add_user(dev->rdma_ctx, &oparams);
973 	if (rc) {
974 		QL_DPRINT12(ha,
975 			"Failed to allocate a DPI for a new RoCE application "
976 			",rc = %d. To overcome this, consider to increase "
977 			"the number of DPIs, increase the doorbell BAR size "
978 			"or just close unnecessary RoCE applications. In "
979 			"order to increase the number of DPIs consult the "
980 			"README\n", rc);
981 		goto err;
982 	}
983 
984 	ctx->dpi = oparams.dpi;
985 	ctx->dpi_addr = oparams.dpi_addr;
986 	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
987 	ctx->dpi_size = oparams.dpi_size;
988 	INIT_LIST_HEAD(&ctx->mm_head);
989 	mutex_init(&ctx->mm_list_lock);
990 
991 	memset(&uresp, 0, sizeof(uresp));
992 	uresp.dpm_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, dpm_enabled)
993 				< udata->outlen ? dev->user_dpm_enabled : 0; //TODO: figure this out
994 	uresp.wids_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, wids_enabled)
995 				< udata->outlen ? 1 : 0; //TODO: figure this out
996 	uresp.wid_count = offsetof(struct qlnxr_alloc_ucontext_resp, wid_count)
997 				< udata->outlen ? oparams.wid_count : 0; //TODO: figure this out
998         uresp.db_pa = ctx->dpi_phys_addr;
999         uresp.db_size = ctx->dpi_size;
1000         uresp.max_send_wr = dev->attr.max_sqe;
1001         uresp.max_recv_wr = dev->attr.max_rqe;
1002         uresp.max_srq_wr = dev->attr.max_srq_wr;
1003         uresp.sges_per_send_wr = QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
1004         uresp.sges_per_recv_wr = QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
1005         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
1006         uresp.max_cqes = QLNXR_MAX_CQES;
1007 
1008 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1009 	if (rc)
1010 		goto err;
1011 
1012 	ctx->dev = dev;
1013 
1014 	rc = qlnxr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
1015 	if (rc)
1016 		goto err;
1017 	QL_DPRINT12(ha, "Allocated user context %p\n",
1018 		&ctx->ibucontext);
1019 
1020 	return (0);
1021 err:
1022 	return (rc);
1023 }
1024 
1025 void
1026 qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx)
1027 {
1028         struct qlnxr_ucontext *uctx = get_qlnxr_ucontext(ibctx);
1029         struct qlnxr_dev *dev = uctx->dev;
1030         qlnx_host_t *ha = dev->ha;
1031         struct qlnxr_mm *mm, *tmp;
1032 
1033         QL_DPRINT12(ha, "Deallocating user context %p\n",
1034                         uctx);
1035 
1036         if (dev) {
1037                 ecore_rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
1038         }
1039 
1040         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
1041                 QL_DPRINT12(ha, "deleted addr= 0x%llx, len = 0x%lx for"
1042                                 " ctx=%p\n",
1043                                 mm->key.phy_addr, mm->key.len, uctx);
1044                 list_del(&mm->entry);
1045                 kfree(mm);
1046         }
1047 }
1048 
1049 int
1050 qlnxr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1051 {
1052 	struct qlnxr_ucontext	*ucontext = get_qlnxr_ucontext(context);
1053 	struct qlnxr_dev	*dev = get_qlnxr_dev((context->device));
1054 	unsigned long		vm_page = vma->vm_pgoff << PAGE_SHIFT;
1055 	u64 			unmapped_db;
1056 	unsigned long 		len = (vma->vm_end - vma->vm_start);
1057 	int 			rc = 0;
1058 	bool 			found;
1059 	qlnx_host_t		*ha;
1060 
1061 	ha = dev->ha;
1062 
1063 #if __FreeBSD_version > 1102000
1064 	unmapped_db = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
1065 #else
1066 	unmapped_db = dev->db_phys_addr;
1067 #endif /* #if __FreeBSD_version > 1102000 */
1068 
1069 	QL_DPRINT12(ha, "qedr_mmap enter vm_page=0x%lx"
1070 		" vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
1071 		vm_page, vma->vm_pgoff, unmapped_db,
1072 		dev->db_size, len);
1073 
1074 	if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
1075 		QL_DPRINT11(ha, "Vma_start not page aligned "
1076 			"vm_start = %ld vma_end = %ld\n", vma->vm_start,
1077 			vma->vm_end);
1078 		return -EINVAL;
1079 	}
1080 
1081 	found = qlnxr_search_mmap(ucontext, vm_page, len);
1082 	if (!found) {
1083 		QL_DPRINT11(ha, "Vma_pgoff not found in mapped array = %ld\n",
1084 			vma->vm_pgoff);
1085 		return -EINVAL;
1086 	}
1087 
1088 	QL_DPRINT12(ha, "Mapping doorbell bar\n");
1089 
1090 #if __FreeBSD_version > 1102000
1091 
1092 	if ((vm_page < unmapped_db) ||
1093 		((vm_page + len) > (unmapped_db + ucontext->dpi_size))) {
1094 		QL_DPRINT11(ha, "failed pages are outside of dpi;"
1095 			"page address=0x%lx, unmapped_db=0x%lx, dpi_size=0x%x\n",
1096 			vm_page, unmapped_db, ucontext->dpi_size);
1097 		return -EINVAL;
1098 	}
1099 
1100 	if (vma->vm_flags & VM_READ) {
1101 		QL_DPRINT11(ha, "failed mmap, cannot map doorbell bar for read\n");
1102 		return -EINVAL;
1103 	}
1104 
1105 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1106 	rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
1107 			vma->vm_page_prot);
1108 
1109 #else
1110 
1111 	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
1112 		dev->db_size))) {
1113 		QL_DPRINT12(ha, "Mapping doorbell bar\n");
1114 
1115 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1116 
1117 		rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1118 					    PAGE_SIZE, vma->vm_page_prot);
1119 	} else {
1120 		QL_DPRINT12(ha, "Mapping chains\n");
1121 		rc = io_remap_pfn_range(vma, vma->vm_start,
1122 					 vma->vm_pgoff, len, vma->vm_page_prot);
1123 	}
1124 
1125 #endif /* #if __FreeBSD_version > 1102000 */
1126 
1127 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1128 	return rc;
1129 }
1130 
1131 struct ib_mr *
1132 qlnxr_get_dma_mr(struct ib_pd *ibpd, int acc)
1133 {
1134 	struct qlnxr_mr		*mr;
1135 	struct qlnxr_dev	*dev = get_qlnxr_dev((ibpd->device));
1136 	struct qlnxr_pd		*pd = get_qlnxr_pd(ibpd);
1137 	int			rc;
1138 	qlnx_host_t		*ha;
1139 
1140 	ha = dev->ha;
1141 
1142 	QL_DPRINT12(ha, "enter\n");
1143 
1144 	if (acc & IB_ACCESS_MW_BIND) {
1145 		QL_DPRINT12(ha, "Unsupported access flags received for dma mr\n");
1146 	}
1147 
1148 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1149 	if (!mr) {
1150 		rc = -ENOMEM;
1151 		QL_DPRINT12(ha, "kzalloc(mr) failed %d\n", rc);
1152 		goto err0;
1153 	}
1154 
1155 	mr->type = QLNXR_MR_DMA;
1156 
1157 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1158 	if (rc) {
1159 		QL_DPRINT12(ha, "ecore_rdma_alloc_tid failed %d\n", rc);
1160 		goto err1;
1161 	}
1162 
1163 	/* index only, 18 bit long, lkey = itid << 8 | key */
1164 	mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1165 	mr->hw_mr.pd = pd->pd_id;
1166 	mr->hw_mr.local_read = 1;
1167 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1168 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1169 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1170 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1171 	mr->hw_mr.dma_mr = true;
1172 
1173 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1174 	if (rc) {
1175 		QL_DPRINT12(ha, "ecore_rdma_register_tid failed %d\n", rc);
1176 		goto err2;
1177 	}
1178 
1179 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1180 
1181 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1182 		mr->hw_mr.remote_atomic) {
1183 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1184 	}
1185 
1186 	QL_DPRINT12(ha, "lkey = %x\n", mr->ibmr.lkey);
1187 
1188 	return &mr->ibmr;
1189 
1190 err2:
1191 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1192 err1:
1193 	kfree(mr);
1194 err0:
1195 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1196 
1197 	return ERR_PTR(rc);
1198 }
1199 
1200 static void
1201 qlnxr_free_pbl(struct qlnxr_dev *dev, struct qlnxr_pbl_info *pbl_info,
1202 	struct qlnxr_pbl *pbl)
1203 {
1204 	int		i;
1205 	qlnx_host_t	*ha;
1206 
1207 	ha = dev->ha;
1208 
1209 	QL_DPRINT12(ha, "enter\n");
1210 
1211 	for (i = 0; i < pbl_info->num_pbls; i++) {
1212 		if (!pbl[i].va)
1213 			continue;
1214 		qlnx_dma_free_coherent(&dev->ha->cdev, pbl[i].va, pbl[i].pa,
1215 			pbl_info->pbl_size);
1216 	}
1217 	kfree(pbl);
1218 
1219 	QL_DPRINT12(ha, "exit\n");
1220 	return;
1221 }
1222 
1223 #define MIN_FW_PBL_PAGE_SIZE (4*1024)
1224 #define MAX_FW_PBL_PAGE_SIZE (64*1024)
1225 
1226 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
1227 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
1228 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE*MAX_PBES_ON_PAGE)
1229 
1230 static struct qlnxr_pbl *
1231 qlnxr_alloc_pbl_tbl(struct qlnxr_dev *dev,
1232 	struct qlnxr_pbl_info *pbl_info, gfp_t flags)
1233 {
1234 	void			*va;
1235 	dma_addr_t		pa;
1236 	dma_addr_t		*pbl_main_tbl;
1237 	struct qlnxr_pbl	*pbl_table;
1238 	int			i, rc = 0;
1239 	qlnx_host_t		*ha;
1240 
1241 	ha = dev->ha;
1242 
1243 	QL_DPRINT12(ha, "enter\n");
1244 
1245 	pbl_table = kzalloc(sizeof(*pbl_table) * pbl_info->num_pbls, flags);
1246 
1247 	if (!pbl_table) {
1248 		QL_DPRINT12(ha, "pbl_table = NULL\n");
1249 		return NULL;
1250 	}
1251 
1252 	for (i = 0; i < pbl_info->num_pbls; i++) {
1253 		va = qlnx_dma_alloc_coherent(&dev->ha->cdev, &pa, pbl_info->pbl_size);
1254 		if (!va) {
1255 			QL_DPRINT11(ha, "Failed to allocate pbl#%d\n", i);
1256 			rc = -ENOMEM;
1257 			goto err;
1258 		}
1259 		memset(va, 0, pbl_info->pbl_size);
1260 		pbl_table[i].va = va;
1261 		pbl_table[i].pa = pa;
1262 	}
1263 
1264 	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
1265 	 * the first one with physical pointers to all of the rest
1266 	 */
1267 	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
1268 	for (i = 0; i < pbl_info->num_pbls - 1; i++)
1269 		pbl_main_tbl[i] = pbl_table[i + 1].pa;
1270 
1271 	QL_DPRINT12(ha, "exit\n");
1272 	return pbl_table;
1273 
1274 err:
1275 	qlnxr_free_pbl(dev, pbl_info, pbl_table);
1276 
1277 	QL_DPRINT12(ha, "exit with error\n");
1278 	return NULL;
1279 }
1280 
1281 static int
1282 qlnxr_prepare_pbl_tbl(struct qlnxr_dev *dev,
1283 	struct qlnxr_pbl_info *pbl_info,
1284 	u32 num_pbes,
1285 	int two_layer_capable)
1286 {
1287 	u32		pbl_capacity;
1288 	u32		pbl_size;
1289 	u32		num_pbls;
1290 	qlnx_host_t	*ha;
1291 
1292 	ha = dev->ha;
1293 
1294 	QL_DPRINT12(ha, "enter\n");
1295 
1296 	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
1297 		if (num_pbes > MAX_PBES_TWO_LAYER) {
1298 			QL_DPRINT11(ha, "prepare pbl table: too many pages %d\n",
1299 				num_pbes);
1300 			return -EINVAL;
1301 		}
1302 
1303 		/* calculate required pbl page size */
1304 		pbl_size = MIN_FW_PBL_PAGE_SIZE;
1305 		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
1306 			NUM_PBES_ON_PAGE(pbl_size);
1307 
1308 		while (pbl_capacity < num_pbes) {
1309 			pbl_size *= 2;
1310 			pbl_capacity = pbl_size / sizeof(u64);
1311 			pbl_capacity = pbl_capacity * pbl_capacity;
1312 		}
1313 
1314 		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
1315 		num_pbls++; /* One for the layer0 ( points to the pbls) */
1316 		pbl_info->two_layered = true;
1317 	} else {
1318 		/* One layered PBL */
1319 		num_pbls = 1;
1320 		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, \
1321 				roundup_pow_of_two((num_pbes * sizeof(u64))));
1322 		pbl_info->two_layered = false;
1323 	}
1324 
1325 	pbl_info->num_pbls = num_pbls;
1326 	pbl_info->pbl_size = pbl_size;
1327 	pbl_info->num_pbes = num_pbes;
1328 
1329 	QL_DPRINT12(ha, "prepare pbl table: num_pbes=%d, num_pbls=%d pbl_size=%d\n",
1330 		pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
1331 
1332 	return 0;
1333 }
1334 
1335 static void
1336 qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
1337 	struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
1338 {
1339 	struct regpair		*pbe;
1340 	struct qlnxr_pbl	*pbl_tbl;
1341 	struct scatterlist	*sg;
1342 	int			shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
1343 	qlnx_host_t		*ha;
1344 
1345 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1346         int                     i;
1347         struct                  ib_umem_chunk *chunk = NULL;
1348 #else
1349         int                     entry;
1350 #endif
1351 
1352 	ha = dev->ha;
1353 
1354 	QL_DPRINT12(ha, "enter\n");
1355 
1356 	if (!pbl_info) {
1357 		QL_DPRINT11(ha, "PBL_INFO not initialized\n");
1358 		return;
1359 	}
1360 
1361 	if (!pbl_info->num_pbes) {
1362 		QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
1363 		return;
1364 	}
1365 
1366 	/* If we have a two layered pbl, the first pbl points to the rest
1367 	 * of the pbls and the first entry lays on the second pbl in the table
1368 	 */
1369 	if (pbl_info->two_layered)
1370 		pbl_tbl = &pbl[1];
1371 	else
1372 		pbl_tbl = pbl;
1373 
1374 	pbe = (struct regpair *)pbl_tbl->va;
1375 	if (!pbe) {
1376 		QL_DPRINT12(ha, "pbe is NULL\n");
1377 		return;
1378 	}
1379 
1380 	pbe_cnt = 0;
1381 
1382 	shift = ilog2(umem->page_size);
1383 
1384 #ifndef DEFINE_IB_UMEM_WITH_CHUNK
1385 
1386 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
1387 #else
1388 	list_for_each_entry(chunk, &umem->chunk_list, list) {
1389 		/* get all the dma regions from the chunk. */
1390 		for (i = 0; i < chunk->nmap; i++) {
1391 			sg = &chunk->page_list[i];
1392 #endif
1393 			pages = sg_dma_len(sg) >> shift;
1394 			for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
1395 				/* store the page address in pbe */
1396 				pbe->lo =
1397 				    cpu_to_le32(sg_dma_address(sg) +
1398 						(umem->page_size * pg_cnt));
1399 				pbe->hi =
1400 				    cpu_to_le32(upper_32_bits
1401 						((sg_dma_address(sg) +
1402 						  umem->page_size * pg_cnt)));
1403 
1404 				QL_DPRINT12(ha,
1405 					"Populate pbl table:"
1406 					" pbe->addr=0x%x:0x%x "
1407 					" pbe_cnt = %d total_num_pbes=%d"
1408 					" pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
1409 					total_num_pbes, pbe);
1410 
1411 				pbe_cnt ++;
1412 				total_num_pbes ++;
1413 				pbe++;
1414 
1415 				if (total_num_pbes == pbl_info->num_pbes)
1416 					return;
1417 
1418 				/* if the given pbl is full storing the pbes,
1419 				 * move to next pbl.
1420 				 */
1421 				if (pbe_cnt ==
1422 					(pbl_info->pbl_size / sizeof(u64))) {
1423 					pbl_tbl++;
1424 					pbe = (struct regpair *)pbl_tbl->va;
1425 					pbe_cnt = 0;
1426 				}
1427 			}
1428 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1429 		}
1430 #endif
1431 	}
1432 	QL_DPRINT12(ha, "exit\n");
1433 	return;
1434 }
1435 
1436 static void
1437 free_mr_info(struct qlnxr_dev *dev, struct mr_info *info)
1438 {
1439 	struct qlnxr_pbl *pbl, *tmp;
1440 	qlnx_host_t		*ha;
1441 
1442 	ha = dev->ha;
1443 
1444 	QL_DPRINT12(ha, "enter\n");
1445 
1446 	if (info->pbl_table)
1447 		list_add_tail(&info->pbl_table->list_entry,
1448 			      &info->free_pbl_list);
1449 
1450 	if (!list_empty(&info->inuse_pbl_list))
1451 		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
1452 
1453 	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
1454 		list_del(&pbl->list_entry);
1455 		qlnxr_free_pbl(dev, &info->pbl_info, pbl);
1456 	}
1457 	QL_DPRINT12(ha, "exit\n");
1458 
1459 	return;
1460 }
1461 
1462 static int
1463 qlnxr_init_mr_info(struct qlnxr_dev *dev, struct mr_info *info,
1464 	size_t page_list_len, bool two_layered)
1465 {
1466 	int			rc;
1467 	struct qlnxr_pbl	*tmp;
1468 	qlnx_host_t		*ha;
1469 
1470 	ha = dev->ha;
1471 
1472 	QL_DPRINT12(ha, "enter\n");
1473 
1474 	INIT_LIST_HEAD(&info->free_pbl_list);
1475 	INIT_LIST_HEAD(&info->inuse_pbl_list);
1476 
1477 	rc = qlnxr_prepare_pbl_tbl(dev, &info->pbl_info,
1478 				  page_list_len, two_layered);
1479 	if (rc) {
1480 		QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl [%d]\n", rc);
1481 		goto done;
1482 	}
1483 
1484 	info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1485 
1486 	if (!info->pbl_table) {
1487 		rc = -ENOMEM;
1488 		QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl returned NULL\n");
1489 		goto done;
1490 	}
1491 
1492 	QL_DPRINT12(ha, "pbl_table_pa = %pa\n", &info->pbl_table->pa);
1493 
1494 	/* in usual case we use 2 PBLs, so we add one to free
1495 	 * list and allocating another one
1496 	 */
1497 	tmp = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1498 
1499 	if (!tmp) {
1500 		QL_DPRINT11(ha, "Extra PBL is not allocated\n");
1501 		goto done; /* it's OK if second allocation fails, so rc = 0*/
1502 	}
1503 
1504 	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
1505 
1506 	QL_DPRINT12(ha, "extra pbl_table_pa = %pa\n", &tmp->pa);
1507 
1508 done:
1509 	if (rc)
1510 		free_mr_info(dev, info);
1511 
1512 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1513 
1514 	return rc;
1515 }
1516 
1517 struct ib_mr *
1518 #if __FreeBSD_version >= 1102000
1519 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1520 	u64 usr_addr, int acc, struct ib_udata *udata)
1521 #else
1522 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1523 	u64 usr_addr, int acc, struct ib_udata *udata, int mr_id)
1524 #endif /* #if __FreeBSD_version >= 1102000 */
1525 {
1526 	int		rc = -ENOMEM;
1527 	struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
1528 	struct qlnxr_mr *mr;
1529 	struct qlnxr_pd *pd;
1530 	qlnx_host_t	*ha;
1531 
1532 	ha = dev->ha;
1533 
1534 	QL_DPRINT12(ha, "enter\n");
1535 
1536 	pd = get_qlnxr_pd(ibpd);
1537 
1538 	QL_DPRINT12(ha, "qedr_register user mr pd = %d"
1539 		" start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
1540 		pd->pd_id, start, len, usr_addr, acc);
1541 
1542 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
1543 		QL_DPRINT11(ha,
1544 			"(acc & IB_ACCESS_REMOTE_WRITE &&"
1545 			" !(acc & IB_ACCESS_LOCAL_WRITE))\n");
1546 		return ERR_PTR(-EINVAL);
1547 	}
1548 
1549 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1550 	if (!mr) {
1551 		QL_DPRINT11(ha, "kzalloc(mr) failed\n");
1552 		return ERR_PTR(rc);
1553 	}
1554 
1555 	mr->type = QLNXR_MR_USER;
1556 
1557 	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
1558 	if (IS_ERR(mr->umem)) {
1559 		rc = -EFAULT;
1560 		QL_DPRINT11(ha, "ib_umem_get failed [%p]\n", mr->umem);
1561 		goto err0;
1562 	}
1563 
1564 	rc = qlnxr_init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
1565 	if (rc) {
1566 		QL_DPRINT11(ha,
1567 			"qlnxr_init_mr_info failed [%d]\n", rc);
1568 		goto err1;
1569 	}
1570 
1571 	qlnxr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
1572 			   &mr->info.pbl_info);
1573 
1574 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1575 
1576 	if (rc) {
1577 		QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
1578 		goto err1;
1579 	}
1580 
1581 	/* index only, 18 bit long, lkey = itid << 8 | key */
1582 	mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1583 	mr->hw_mr.key = 0;
1584 	mr->hw_mr.pd = pd->pd_id;
1585 	mr->hw_mr.local_read = 1;
1586 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1587 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1588 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1589 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1590 	mr->hw_mr.mw_bind = false; /* TBD MW BIND */
1591 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
1592 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
1593 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
1594 	mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); /* for the MR pages */
1595 
1596 #if __FreeBSD_version >= 1102000
1597 	mr->hw_mr.fbo = ib_umem_offset(mr->umem);
1598 #else
1599 	mr->hw_mr.fbo = mr->umem->offset;
1600 #endif
1601 	mr->hw_mr.length = len;
1602 	mr->hw_mr.vaddr = usr_addr;
1603 	mr->hw_mr.zbva = false; /* TBD figure when this should be true */
1604 	mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
1605 	mr->hw_mr.dma_mr = false;
1606 
1607 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1608 	if (rc) {
1609 		QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
1610 		goto err2;
1611 	}
1612 
1613 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1614 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1615 		mr->hw_mr.remote_atomic)
1616 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1617 
1618 	QL_DPRINT12(ha, "register user mr lkey: %x\n", mr->ibmr.lkey);
1619 
1620 	return (&mr->ibmr);
1621 
1622 err2:
1623 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1624 err1:
1625 	qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1626 err0:
1627 	kfree(mr);
1628 
1629 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1630 	return (ERR_PTR(rc));
1631 }
1632 
1633 int
1634 qlnxr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1635 {
1636 	struct qlnxr_mr	*mr = get_qlnxr_mr(ib_mr);
1637 	struct qlnxr_dev *dev = get_qlnxr_dev((ib_mr->device));
1638 	int		rc = 0;
1639 	qlnx_host_t	*ha;
1640 
1641 	ha = dev->ha;
1642 
1643 	QL_DPRINT12(ha, "enter\n");
1644 
1645 	if ((mr->type != QLNXR_MR_DMA) && (mr->type != QLNXR_MR_FRMR))
1646 		qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1647 
1648 	/* it could be user registered memory. */
1649 	if (mr->umem)
1650 		ib_umem_release(mr->umem);
1651 
1652 	kfree(mr->pages);
1653 
1654 	kfree(mr);
1655 
1656 	QL_DPRINT12(ha, "exit\n");
1657 	return rc;
1658 }
1659 
1660 static int
1661 qlnxr_copy_cq_uresp(struct qlnxr_dev *dev,
1662 	struct qlnxr_cq *cq, struct ib_udata *udata)
1663 {
1664 	struct qlnxr_create_cq_uresp	uresp;
1665 	int				rc;
1666 	qlnx_host_t			*ha;
1667 
1668 	ha = dev->ha;
1669 
1670 	QL_DPRINT12(ha, "enter\n");
1671 
1672 	memset(&uresp, 0, sizeof(uresp));
1673 
1674 	uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1675 	uresp.icid = cq->icid;
1676 
1677 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1678 
1679 	if (rc) {
1680 		QL_DPRINT12(ha, "ib_copy_to_udata error cqid=0x%x[%d]\n",
1681 			cq->icid, rc);
1682 	}
1683 
1684 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1685 	return rc;
1686 }
1687 
1688 static void
1689 consume_cqe(struct qlnxr_cq *cq)
1690 {
1691 
1692 	if (cq->latest_cqe == cq->toggle_cqe)
1693 		cq->pbl_toggle ^= RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1694 
1695 	cq->latest_cqe = ecore_chain_consume(&cq->pbl);
1696 }
1697 
1698 static inline int
1699 qlnxr_align_cq_entries(int entries)
1700 {
1701 	u64 size, aligned_size;
1702 
1703 	/* We allocate an extra entry that we don't report to the FW.
1704 	 * Why?
1705 	 * The CQE size is 32 bytes but the FW writes in chunks of 64 bytes
1706 	 * (for performance purposes). Allocating an extra entry and telling
1707 	 * the FW we have less prevents overwriting the first entry in case of
1708 	 * a wrap i.e. when the FW writes the last entry and the application
1709 	 * hasn't read the first one.
1710 	 */
1711 	size = (entries + 1) * QLNXR_CQE_SIZE;
1712 
1713 	/* We align to PAGE_SIZE.
1714 	 * Why?
1715 	 * Since the CQ is going to be mapped and the mapping is anyhow in whole
1716 	 * kernel pages we benefit from the possibly extra CQEs.
1717 	 */
1718 	aligned_size = ALIGN(size, PAGE_SIZE);
1719 
1720 	/* note: for CQs created in user space the result of this function
1721 	 * should match the size mapped in user space
1722 	 */
1723 	return (aligned_size / QLNXR_CQE_SIZE);
1724 }
1725 
1726 static inline int
1727 qlnxr_init_user_queue(struct ib_ucontext *ib_ctx, struct qlnxr_dev *dev,
1728 	struct qlnxr_userq *q, u64 buf_addr, size_t buf_len,
1729 	int access, int dmasync, int alloc_and_init)
1730 {
1731 	int		page_cnt;
1732 	int		rc;
1733 	qlnx_host_t	*ha;
1734 
1735 	ha = dev->ha;
1736 
1737 	QL_DPRINT12(ha, "enter\n");
1738 
1739 	q->buf_addr = buf_addr;
1740 	q->buf_len = buf_len;
1741 
1742 	QL_DPRINT12(ha, "buf_addr : %llx, buf_len : %x, access : %x"
1743 	      " dmasync : %x\n", q->buf_addr, q->buf_len,
1744 		access, dmasync);
1745 
1746 	q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
1747 
1748 	if (IS_ERR(q->umem)) {
1749 		QL_DPRINT11(ha, "ib_umem_get failed [%lx]\n", PTR_ERR(q->umem));
1750 		return PTR_ERR(q->umem);
1751 	}
1752 
1753 	page_cnt = ib_umem_page_count(q->umem);
1754 	rc = qlnxr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt,
1755 				  0 /* SQ and RQ don't support dual layer pbl.
1756 				     * CQ may, but this is yet uncoded.
1757 				     */);
1758 	if (rc) {
1759 		QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl failed [%d]\n", rc);
1760 		goto err;
1761 	}
1762 
1763 	if (alloc_and_init) {
1764 		q->pbl_tbl = qlnxr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
1765 
1766 		if (!q->pbl_tbl) {
1767 			QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1768 			rc = -ENOMEM;
1769 			goto err;
1770 		}
1771 
1772 		qlnxr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
1773 	} else {
1774 		q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
1775 
1776 		if (!q->pbl_tbl) {
1777 			QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1778 			rc = -ENOMEM;
1779 			goto err;
1780 		}
1781 	}
1782 
1783 	QL_DPRINT12(ha, "exit\n");
1784 	return 0;
1785 
1786 err:
1787 	ib_umem_release(q->umem);
1788 	q->umem = NULL;
1789 
1790 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1791 	return rc;
1792 }
1793 
1794 int
1795 qlnxr_create_cq(struct ib_cq *ibcq,
1796 		const struct ib_cq_init_attr *attr,
1797 		struct ib_udata *udata)
1798 {
1799 	struct qlnxr_ucontext			*ctx;
1800 	struct ecore_rdma_destroy_cq_out_params destroy_oparams;
1801 	struct ecore_rdma_destroy_cq_in_params	destroy_iparams;
1802 	struct qlnxr_dev			*dev;
1803 	struct ecore_rdma_create_cq_in_params	params;
1804 	struct qlnxr_create_cq_ureq		ureq;
1805 
1806 #if __FreeBSD_version >= 1100000
1807 	int					vector = attr->comp_vector;
1808 	int					entries = attr->cqe;
1809 #endif
1810 	struct qlnxr_cq				*cq = get_qlnxr_cq(ibcq);
1811 	int					chain_entries, rc, page_cnt;
1812 	u64					pbl_ptr;
1813 	u16					icid;
1814 	qlnx_host_t				*ha;
1815 
1816 	dev = get_qlnxr_dev(ibcq->device);
1817 	ha = dev->ha;
1818 
1819 	QL_DPRINT12(ha, "called from %s. entries = %d, "
1820 		"vector = %d\n",
1821 		(udata ? "User Lib" : "Kernel"), entries, vector);
1822 
1823         memset(&params, 0, sizeof(struct ecore_rdma_create_cq_in_params));
1824         memset(&destroy_iparams, 0, sizeof(struct ecore_rdma_destroy_cq_in_params));
1825         memset(&destroy_oparams, 0, sizeof(struct ecore_rdma_destroy_cq_out_params));
1826 
1827 	if (entries > QLNXR_MAX_CQES) {
1828 		QL_DPRINT11(ha,
1829 			"the number of entries %d is too high. "
1830 			"Must be equal or below %d.\n",
1831 			entries, QLNXR_MAX_CQES);
1832 		return -EINVAL;
1833 	}
1834 	chain_entries = qlnxr_align_cq_entries(entries);
1835 	chain_entries = min_t(int, chain_entries, QLNXR_MAX_CQES);
1836 
1837 	if (udata) {
1838 		ctx = rdma_udata_to_drv_context(
1839 		    udata, struct qlnxr_ucontext, ibucontext);
1840 
1841 		memset(&ureq, 0, sizeof(ureq));
1842 
1843 		if (ib_copy_from_udata(&ureq, udata,
1844 			min(sizeof(ureq), udata->inlen))) {
1845 			QL_DPRINT11(ha, "ib_copy_from_udata failed\n");
1846 			goto err0;
1847 		}
1848 
1849 		if (!ureq.len) {
1850 			QL_DPRINT11(ha, "ureq.len == 0\n");
1851 			goto err0;
1852 		}
1853 
1854 		cq->cq_type = QLNXR_CQ_TYPE_USER;
1855 
1856 		qlnxr_init_user_queue(&ctx->ibucontext, dev, &cq->q, ureq.addr, ureq.len,
1857 				     IB_ACCESS_LOCAL_WRITE, 1, 1);
1858 
1859 		pbl_ptr = cq->q.pbl_tbl->pa;
1860 		page_cnt = cq->q.pbl_info.num_pbes;
1861 		cq->ibcq.cqe = chain_entries;
1862 	} else {
1863 		ctx = NULL;
1864 
1865 		cq->cq_type = QLNXR_CQ_TYPE_KERNEL;
1866 
1867                 rc = ecore_chain_alloc(&dev->ha->cdev,
1868                            ECORE_CHAIN_USE_TO_CONSUME,
1869                            ECORE_CHAIN_MODE_PBL,
1870                            ECORE_CHAIN_CNT_TYPE_U32,
1871                            chain_entries,
1872                            sizeof(union roce_cqe),
1873                            &cq->pbl, NULL);
1874 
1875 		if (rc)
1876 			goto err1;
1877 
1878 		page_cnt = ecore_chain_get_page_cnt(&cq->pbl);
1879 		pbl_ptr = ecore_chain_get_pbl_phys(&cq->pbl);
1880 		cq->ibcq.cqe = cq->pbl.capacity;
1881 	}
1882 
1883         params.cq_handle_hi = upper_32_bits((uintptr_t)cq);
1884         params.cq_handle_lo = lower_32_bits((uintptr_t)cq);
1885         params.cnq_id = vector;
1886         params.cq_size = chain_entries - 1;
1887         params.pbl_num_pages = page_cnt;
1888         params.pbl_ptr = pbl_ptr;
1889         params.pbl_two_level = 0;
1890 
1891 	if (udata) {
1892         	params.dpi = ctx->dpi;
1893 	} else {
1894         	params.dpi = dev->dpi;
1895 	}
1896 
1897 	rc = ecore_rdma_create_cq(dev->rdma_ctx, &params, &icid);
1898 	if (rc)
1899 		goto err2;
1900 
1901 	cq->icid = icid;
1902 	cq->sig = QLNXR_CQ_MAGIC_NUMBER;
1903 	spin_lock_init(&cq->cq_lock);
1904 
1905 	if (udata) {
1906 		rc = qlnxr_copy_cq_uresp(dev, cq, udata);
1907 		if (rc)
1908 			goto err3;
1909 	} else {
1910 		/* Generate doorbell address.
1911 		 * Configure bits 3-9 with DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT.
1912 		 * TODO: consider moving to device scope as it is a function of
1913 		 *       the device.
1914 		 * TODO: add ifdef if plan to support 16 bit.
1915 		 */
1916 		cq->db_addr = dev->db_addr +
1917 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1918 		cq->db.data.icid = cq->icid;
1919 		cq->db.data.params = DB_AGG_CMD_SET <<
1920 				     RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1921 
1922 		/* point to the very last element, passing it we will toggle */
1923 		cq->toggle_cqe = ecore_chain_get_last_elem(&cq->pbl);
1924 		cq->pbl_toggle = RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1925 
1926 		/* must be different from pbl_toggle */
1927 		cq->latest_cqe = NULL;
1928 		consume_cqe(cq);
1929 		cq->cq_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
1930 	}
1931 
1932 	QL_DPRINT12(ha, "exit icid = 0x%0x, addr = %p,"
1933 		" number of entries = 0x%x\n",
1934 		cq->icid, cq, params.cq_size);
1935 	QL_DPRINT12(ha,"cq_addr = %p\n", cq);
1936 	return (0);
1937 
1938 err3:
1939 	destroy_iparams.icid = cq->icid;
1940 	ecore_rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, &destroy_oparams);
1941 err2:
1942 	if (udata)
1943 		qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1944 	else
1945 		ecore_chain_free(&dev->ha->cdev, &cq->pbl);
1946 err1:
1947 	if (udata)
1948 		ib_umem_release(cq->q.umem);
1949 err0:
1950 	QL_DPRINT12(ha, "exit error\n");
1951 
1952 	return (-EINVAL);
1953 }
1954 
1955 int qlnxr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1956 {
1957 	int			status = 0;
1958 	struct qlnxr_dev	*dev = get_qlnxr_dev((ibcq->device));
1959 	qlnx_host_t		*ha;
1960 
1961 	ha = dev->ha;
1962 
1963 	QL_DPRINT12(ha, "enter/exit\n");
1964 
1965 	return status;
1966 }
1967 
1968 void
1969 qlnxr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1970 {
1971 	struct qlnxr_dev			*dev = get_qlnxr_dev((ibcq->device));
1972 	struct ecore_rdma_destroy_cq_out_params oparams;
1973 	struct ecore_rdma_destroy_cq_in_params	iparams;
1974 	struct qlnxr_cq				*cq = get_qlnxr_cq(ibcq);
1975 	int					rc = 0;
1976 	qlnx_host_t				*ha;
1977 
1978 	ha = dev->ha;
1979 
1980 	QL_DPRINT12(ha, "enter cq_id = %d\n", cq->icid);
1981 
1982 	cq->destroyed = 1;
1983 
1984 	/* TODO: Syncronize irq of the CNQ the CQ belongs to for validation
1985 	 * that all completions with notification are dealt with. The rest
1986 	 * of the completions are not interesting
1987 	 */
1988 
1989 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
1990 
1991 	if (cq->cq_type != QLNXR_CQ_TYPE_GSI) {
1992 		iparams.icid = cq->icid;
1993 
1994 		rc = ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1995 
1996 		if (rc) {
1997 			QL_DPRINT12(ha, "ecore_rdma_destroy_cq failed cq_id = %d\n",
1998 				cq->icid);
1999 			return;
2000 		}
2001 
2002 		QL_DPRINT12(ha, "free cq->pbl cq_id = %d\n", cq->icid);
2003 		ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2004 	}
2005 
2006 	if (udata) {
2007 		qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2008 		ib_umem_release(cq->q.umem);
2009 	}
2010 
2011 	cq->sig = ~cq->sig;
2012 
2013 	QL_DPRINT12(ha, "exit cq_id = %d\n", cq->icid);
2014 }
2015 
2016 static int
2017 qlnxr_check_qp_attrs(struct ib_pd *ibpd,
2018 	struct qlnxr_dev *dev,
2019 	struct ib_qp_init_attr *attrs,
2020 	struct ib_udata *udata)
2021 {
2022 	struct ecore_rdma_device	*qattr;
2023 	qlnx_host_t			*ha;
2024 
2025 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
2026 	ha = dev->ha;
2027 
2028 	QL_DPRINT12(ha, "enter\n");
2029 
2030 	QL_DPRINT12(ha, "attrs->sq_sig_type = %d\n", attrs->sq_sig_type);
2031 	QL_DPRINT12(ha, "attrs->qp_type = %d\n", attrs->qp_type);
2032 	QL_DPRINT12(ha, "attrs->create_flags = %d\n", attrs->create_flags);
2033 
2034 #if __FreeBSD_version < 1102000
2035 	QL_DPRINT12(ha, "attrs->qpg_type = %d\n", attrs->qpg_type);
2036 #endif
2037 
2038 	QL_DPRINT12(ha, "attrs->port_num = %d\n", attrs->port_num);
2039 	QL_DPRINT12(ha, "attrs->cap.max_send_wr = 0x%x\n", attrs->cap.max_send_wr);
2040 	QL_DPRINT12(ha, "attrs->cap.max_recv_wr = 0x%x\n", attrs->cap.max_recv_wr);
2041 	QL_DPRINT12(ha, "attrs->cap.max_send_sge = 0x%x\n", attrs->cap.max_send_sge);
2042 	QL_DPRINT12(ha, "attrs->cap.max_recv_sge = 0x%x\n", attrs->cap.max_recv_sge);
2043 	QL_DPRINT12(ha, "attrs->cap.max_inline_data = 0x%x\n",
2044 		attrs->cap.max_inline_data);
2045 
2046 #if __FreeBSD_version < 1102000
2047 	QL_DPRINT12(ha, "attrs->cap.qpg_tss_mask_sz = 0x%x\n",
2048 		attrs->cap.qpg_tss_mask_sz);
2049 #endif
2050 
2051 	QL_DPRINT12(ha, "\n\nqattr->vendor_id = 0x%x\n", qattr->vendor_id);
2052 	QL_DPRINT12(ha, "qattr->vendor_part_id = 0x%x\n", qattr->vendor_part_id);
2053 	QL_DPRINT12(ha, "qattr->hw_ver = 0x%x\n", qattr->hw_ver);
2054 	QL_DPRINT12(ha, "qattr->fw_ver = %p\n", (void *)qattr->fw_ver);
2055 	QL_DPRINT12(ha, "qattr->node_guid = %p\n", (void *)qattr->node_guid);
2056 	QL_DPRINT12(ha, "qattr->sys_image_guid = %p\n",
2057 		(void *)qattr->sys_image_guid);
2058 	QL_DPRINT12(ha, "qattr->max_cnq = 0x%x\n", qattr->max_cnq);
2059 	QL_DPRINT12(ha, "qattr->max_sge = 0x%x\n", qattr->max_sge);
2060 	QL_DPRINT12(ha, "qattr->max_srq_sge = 0x%x\n", qattr->max_srq_sge);
2061 	QL_DPRINT12(ha, "qattr->max_inline = 0x%x\n", qattr->max_inline);
2062 	QL_DPRINT12(ha, "qattr->max_wqe = 0x%x\n", qattr->max_wqe);
2063 	QL_DPRINT12(ha, "qattr->max_srq_wqe = 0x%x\n", qattr->max_srq_wqe);
2064 	QL_DPRINT12(ha, "qattr->max_qp_resp_rd_atomic_resc = 0x%x\n",
2065 		qattr->max_qp_resp_rd_atomic_resc);
2066 	QL_DPRINT12(ha, "qattr->max_qp_req_rd_atomic_resc = 0x%x\n",
2067 		qattr->max_qp_req_rd_atomic_resc);
2068 	QL_DPRINT12(ha, "qattr->max_dev_resp_rd_atomic_resc = 0x%x\n",
2069 		qattr->max_dev_resp_rd_atomic_resc);
2070 	QL_DPRINT12(ha, "qattr->max_cq = 0x%x\n", qattr->max_cq);
2071 	QL_DPRINT12(ha, "qattr->max_qp = 0x%x\n", qattr->max_qp);
2072 	QL_DPRINT12(ha, "qattr->max_srq = 0x%x\n", qattr->max_srq);
2073 	QL_DPRINT12(ha, "qattr->max_mr = 0x%x\n", qattr->max_mr);
2074 	QL_DPRINT12(ha, "qattr->max_mr_size = %p\n", (void *)qattr->max_mr_size);
2075 	QL_DPRINT12(ha, "qattr->max_cqe = 0x%x\n", qattr->max_cqe);
2076 	QL_DPRINT12(ha, "qattr->max_mw = 0x%x\n", qattr->max_mw);
2077 	QL_DPRINT12(ha, "qattr->max_fmr = 0x%x\n", qattr->max_fmr);
2078 	QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_pbl = 0x%x\n",
2079 		qattr->max_mr_mw_fmr_pbl);
2080 	QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_size = %p\n",
2081 		(void *)qattr->max_mr_mw_fmr_size);
2082 	QL_DPRINT12(ha, "qattr->max_pd = 0x%x\n", qattr->max_pd);
2083 	QL_DPRINT12(ha, "qattr->max_ah = 0x%x\n", qattr->max_ah);
2084 	QL_DPRINT12(ha, "qattr->max_pkey = 0x%x\n", qattr->max_pkey);
2085 	QL_DPRINT12(ha, "qattr->max_srq_wr = 0x%x\n", qattr->max_srq_wr);
2086 	QL_DPRINT12(ha, "qattr->max_stats_queues = 0x%x\n",
2087 		qattr->max_stats_queues);
2088 	//QL_DPRINT12(ha, "qattr->dev_caps = 0x%x\n", qattr->dev_caps);
2089 	QL_DPRINT12(ha, "qattr->page_size_caps = %p\n",
2090 		(void *)qattr->page_size_caps);
2091 	QL_DPRINT12(ha, "qattr->dev_ack_delay = 0x%x\n", qattr->dev_ack_delay);
2092 	QL_DPRINT12(ha, "qattr->reserved_lkey = 0x%x\n", qattr->reserved_lkey);
2093 	QL_DPRINT12(ha, "qattr->bad_pkey_counter = 0x%x\n",
2094 		qattr->bad_pkey_counter);
2095 
2096 	if ((attrs->qp_type == IB_QPT_GSI) && udata) {
2097 		QL_DPRINT12(ha, "unexpected udata when creating GSI QP\n");
2098 		return -EINVAL;
2099 	}
2100 
2101 	if (udata && !(ibpd->uobject && ibpd->uobject->context)) {
2102 		QL_DPRINT12(ha, "called from user without context\n");
2103 		return -EINVAL;
2104 	}
2105 
2106 	/* QP0... attrs->qp_type == IB_QPT_GSI */
2107 	if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
2108 		QL_DPRINT12(ha, "unsupported qp type=0x%x requested\n",
2109 			   attrs->qp_type);
2110 		return -EINVAL;
2111 	}
2112 	if (attrs->qp_type == IB_QPT_GSI && attrs->srq) {
2113 		QL_DPRINT12(ha, "cannot create GSI qp with SRQ\n");
2114 		return -EINVAL;
2115 	}
2116 	/* Skip the check for QP1 to support CM size of 128 */
2117 	if (attrs->cap.max_send_wr > qattr->max_wqe) {
2118 		QL_DPRINT12(ha, "cannot create a SQ with %d elements "
2119 			" (max_send_wr=0x%x)\n",
2120 			attrs->cap.max_send_wr, qattr->max_wqe);
2121 		return -EINVAL;
2122 	}
2123 	if (!attrs->srq && (attrs->cap.max_recv_wr > qattr->max_wqe)) {
2124 		QL_DPRINT12(ha, "cannot create a RQ with %d elements"
2125 			" (max_recv_wr=0x%x)\n",
2126 			attrs->cap.max_recv_wr, qattr->max_wqe);
2127 		return -EINVAL;
2128 	}
2129 	if (attrs->cap.max_inline_data > qattr->max_inline) {
2130 		QL_DPRINT12(ha,
2131 			"unsupported inline data size=0x%x "
2132 			"requested (max_inline=0x%x)\n",
2133 			attrs->cap.max_inline_data, qattr->max_inline);
2134 		return -EINVAL;
2135 	}
2136 	if (attrs->cap.max_send_sge > qattr->max_sge) {
2137 		QL_DPRINT12(ha,
2138 			"unsupported send_sge=0x%x "
2139 			"requested (max_send_sge=0x%x)\n",
2140 			attrs->cap.max_send_sge, qattr->max_sge);
2141 		return -EINVAL;
2142 	}
2143 	if (attrs->cap.max_recv_sge > qattr->max_sge) {
2144 		QL_DPRINT12(ha,
2145 			"unsupported recv_sge=0x%x requested "
2146 			" (max_recv_sge=0x%x)\n",
2147 			attrs->cap.max_recv_sge, qattr->max_sge);
2148 		return -EINVAL;
2149 	}
2150 	/* unprivileged user space cannot create special QP */
2151 	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
2152 		QL_DPRINT12(ha,
2153 			"userspace can't create special QPs of type=0x%x\n",
2154 			attrs->qp_type);
2155 		return -EINVAL;
2156 	}
2157 	/* allow creating only one GSI type of QP */
2158 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
2159 		QL_DPRINT12(ha,
2160 			"create qp: GSI special QPs already created.\n");
2161 		return -EINVAL;
2162 	}
2163 
2164 	/* verify consumer QPs are not trying to use GSI QP's CQ */
2165 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
2166 		struct qlnxr_cq *send_cq = get_qlnxr_cq(attrs->send_cq);
2167 		struct qlnxr_cq *recv_cq = get_qlnxr_cq(attrs->recv_cq);
2168 
2169 		if ((send_cq->cq_type == QLNXR_CQ_TYPE_GSI) ||
2170 		    (recv_cq->cq_type == QLNXR_CQ_TYPE_GSI)) {
2171 			QL_DPRINT11(ha, "consumer QP cannot use GSI CQs.\n");
2172 			return -EINVAL;
2173 		}
2174 	}
2175 	QL_DPRINT12(ha, "exit\n");
2176 	return 0;
2177 }
2178 
2179 static int
2180 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
2181 	struct qlnxr_srq *srq,
2182 	struct ib_udata *udata)
2183 {
2184 	struct qlnxr_create_srq_uresp	uresp;
2185 	qlnx_host_t			*ha;
2186 	int				rc;
2187 
2188 	ha = dev->ha;
2189 
2190 	QL_DPRINT12(ha, "enter\n");
2191 
2192 	memset(&uresp, 0, sizeof(uresp));
2193 
2194 	uresp.srq_id = srq->srq_id;
2195 
2196 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2197 
2198 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2199 	return rc;
2200 }
2201 
2202 static void
2203 qlnxr_copy_rq_uresp(struct qlnxr_dev *dev,
2204 	struct qlnxr_create_qp_uresp *uresp,
2205 	struct qlnxr_qp *qp)
2206 {
2207 	qlnx_host_t	*ha;
2208 
2209 	ha = dev->ha;
2210 
2211 	/* Return if QP is associated with SRQ instead of RQ */
2212 	QL_DPRINT12(ha, "enter qp->srq = %p\n", qp->srq);
2213 
2214 	if (qp->srq)
2215 		return;
2216 
2217 	/* iWARP requires two doorbells per RQ. */
2218 	if (QLNX_IS_IWARP(dev)) {
2219 		uresp->rq_db_offset =
2220 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2221 		uresp->rq_db2_offset =
2222 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2223 
2224 		QL_DPRINT12(ha, "uresp->rq_db_offset = 0x%x "
2225 			"uresp->rq_db2_offset = 0x%x\n",
2226 			uresp->rq_db_offset, uresp->rq_db2_offset);
2227 	} else {
2228 		uresp->rq_db_offset =
2229 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2230 	}
2231 	uresp->rq_icid = qp->icid;
2232 
2233 	QL_DPRINT12(ha, "exit\n");
2234 	return;
2235 }
2236 
2237 static void
2238 qlnxr_copy_sq_uresp(struct qlnxr_dev *dev,
2239 	struct qlnxr_create_qp_uresp *uresp,
2240 	struct qlnxr_qp *qp)
2241 {
2242 	qlnx_host_t	*ha;
2243 
2244 	ha = dev->ha;
2245 
2246 	QL_DPRINT12(ha, "enter\n");
2247 
2248 	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2249 
2250 	/* iWARP uses the same cid for rq and sq*/
2251 	if (QLNX_IS_IWARP(dev)) {
2252 		uresp->sq_icid = qp->icid;
2253 		QL_DPRINT12(ha, "uresp->sq_icid = 0x%x\n", uresp->sq_icid);
2254 	} else
2255 		uresp->sq_icid = qp->icid + 1;
2256 
2257 	QL_DPRINT12(ha, "exit\n");
2258 	return;
2259 }
2260 
2261 static int
2262 qlnxr_copy_qp_uresp(struct qlnxr_dev *dev,
2263 	struct qlnxr_qp *qp,
2264 	struct ib_udata *udata)
2265 {
2266 	int				rc;
2267 	struct qlnxr_create_qp_uresp	uresp;
2268 	qlnx_host_t			*ha;
2269 
2270 	ha = dev->ha;
2271 
2272 	QL_DPRINT12(ha, "enter qp->icid =0x%x\n", qp->icid);
2273 
2274 	memset(&uresp, 0, sizeof(uresp));
2275 	qlnxr_copy_sq_uresp(dev, &uresp, qp);
2276 	qlnxr_copy_rq_uresp(dev, &uresp, qp);
2277 
2278 	uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
2279 	uresp.qp_id = qp->qp_id;
2280 
2281 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2282 
2283 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2284 	return rc;
2285 }
2286 
2287 static void
2288 qlnxr_set_common_qp_params(struct qlnxr_dev *dev,
2289 	struct qlnxr_qp *qp,
2290 	struct qlnxr_pd *pd,
2291 	struct ib_qp_init_attr *attrs)
2292 {
2293 	qlnx_host_t			*ha;
2294 
2295 	ha = dev->ha;
2296 
2297 	QL_DPRINT12(ha, "enter\n");
2298 
2299 	spin_lock_init(&qp->q_lock);
2300 
2301 	atomic_set(&qp->refcnt, 1);
2302 	qp->pd = pd;
2303 	qp->sig = QLNXR_QP_MAGIC_NUMBER;
2304 	qp->qp_type = attrs->qp_type;
2305 	qp->max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2306 	qp->sq.max_sges = attrs->cap.max_send_sge;
2307 	qp->state = ECORE_ROCE_QP_STATE_RESET;
2308 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
2309 	qp->sq_cq = get_qlnxr_cq(attrs->send_cq);
2310 	qp->rq_cq = get_qlnxr_cq(attrs->recv_cq);
2311 	qp->dev = dev;
2312 
2313 	if (!attrs->srq) {
2314 		/* QP is associated with RQ instead of SRQ */
2315 		qp->rq.max_sges = attrs->cap.max_recv_sge;
2316 		QL_DPRINT12(ha, "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
2317 			qp->rq.max_sges, qp->rq_cq->icid);
2318 	} else {
2319 		qp->srq = get_qlnxr_srq(attrs->srq);
2320 	}
2321 
2322 	QL_DPRINT12(ha,
2323 		"QP params:\tpd = %d, qp_type = %d, max_inline_data = %d,"
2324 		" state = %d, signaled = %d, use_srq=%d\n",
2325 		pd->pd_id, qp->qp_type, qp->max_inline_data,
2326 		qp->state, qp->signaled, ((attrs->srq) ? 1 : 0));
2327 	QL_DPRINT12(ha, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
2328 		qp->sq.max_sges, qp->sq_cq->icid);
2329 	return;
2330 }
2331 
2332 static int
2333 qlnxr_check_srq_params(struct qlnxr_dev *dev,
2334 	struct ib_srq_init_attr *attrs)
2335 {
2336 	struct ecore_rdma_device *qattr;
2337 	qlnx_host_t		*ha;
2338 
2339 	ha = dev->ha;
2340 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
2341 
2342 	QL_DPRINT12(ha, "enter\n");
2343 
2344 	if (attrs->attr.max_wr > qattr->max_srq_wqe) {
2345 		QL_DPRINT12(ha, "unsupported srq_wr=0x%x"
2346 			" requested (max_srq_wr=0x%x)\n",
2347 			attrs->attr.max_wr, qattr->max_srq_wr);
2348 		return -EINVAL;
2349 	}
2350 
2351 	if (attrs->attr.max_sge > qattr->max_sge) {
2352 		QL_DPRINT12(ha,
2353 			"unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
2354 			attrs->attr.max_sge, qattr->max_sge);
2355 		return -EINVAL;
2356 	}
2357 
2358 	if (attrs->attr.srq_limit > attrs->attr.max_wr) {
2359 		QL_DPRINT12(ha,
2360 		       "unsupported srq_limit=0x%x requested"
2361 			" (max_srq_limit=0x%x)\n",
2362 			attrs->attr.srq_limit, attrs->attr.srq_limit);
2363 		return -EINVAL;
2364 	}
2365 
2366 	QL_DPRINT12(ha, "exit\n");
2367 	return 0;
2368 }
2369 
2370 static void
2371 qlnxr_free_srq_user_params(struct qlnxr_srq *srq)
2372 {
2373 	struct qlnxr_dev	*dev = srq->dev;
2374 	qlnx_host_t		*ha;
2375 
2376 	ha = dev->ha;
2377 
2378 	QL_DPRINT12(ha, "enter\n");
2379 
2380 	qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2381 	ib_umem_release(srq->usrq.umem);
2382 	ib_umem_release(srq->prod_umem);
2383 
2384 	QL_DPRINT12(ha, "exit\n");
2385 	return;
2386 }
2387 
2388 static void
2389 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq)
2390 {
2391 	struct qlnxr_srq_hwq_info *hw_srq  = &srq->hw_srq;
2392 	struct qlnxr_dev	*dev = srq->dev;
2393 	qlnx_host_t		*ha;
2394 
2395 	ha = dev->ha;
2396 
2397 	QL_DPRINT12(ha, "enter\n");
2398 
2399 	ecore_chain_free(dev->cdev, &hw_srq->pbl);
2400 
2401 	qlnx_dma_free_coherent(&dev->cdev,
2402 		hw_srq->virt_prod_pair_addr,
2403 		hw_srq->phy_prod_pair_addr,
2404 		sizeof(struct rdma_srq_producers));
2405 
2406 	QL_DPRINT12(ha, "exit\n");
2407 
2408 	return;
2409 }
2410 
2411 static int
2412 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
2413 	struct qlnxr_srq *srq,
2414 	struct qlnxr_create_srq_ureq *ureq,
2415 	int access, int dmasync)
2416 {
2417 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2418 	struct ib_umem_chunk	*chunk;
2419 #endif
2420 	struct scatterlist	*sg;
2421 	int			rc;
2422 	struct qlnxr_dev	*dev = srq->dev;
2423 	qlnx_host_t		*ha;
2424 
2425 	ha = dev->ha;
2426 
2427 	QL_DPRINT12(ha, "enter\n");
2428 
2429 	rc = qlnxr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
2430 				  ureq->srq_len, access, dmasync, 1);
2431 	if (rc)
2432 		return rc;
2433 
2434 	srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
2435 				     sizeof(struct rdma_srq_producers),
2436 				     access, dmasync);
2437 	if (IS_ERR(srq->prod_umem)) {
2438 		qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2439 		ib_umem_release(srq->usrq.umem);
2440 
2441 		QL_DPRINT12(ha, "ib_umem_get failed for producer [%p]\n",
2442 			PTR_ERR(srq->prod_umem));
2443 
2444 		return PTR_ERR(srq->prod_umem);
2445 	}
2446 
2447 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2448 	chunk = container_of((&srq->prod_umem->chunk_list)->next,
2449 			     typeof(*chunk), list);
2450 	sg = &chunk->page_list[0];
2451 #else
2452 	sg = srq->prod_umem->sg_head.sgl;
2453 #endif
2454 	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
2455 
2456 	QL_DPRINT12(ha, "exit\n");
2457 	return 0;
2458 }
2459 
2460 static int
2461 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
2462 	struct qlnxr_dev *dev,
2463 	struct ib_srq_init_attr *init_attr)
2464 {
2465 	struct qlnxr_srq_hwq_info	*hw_srq  = &srq->hw_srq;
2466 	dma_addr_t			phy_prod_pair_addr;
2467 	u32				num_elems, max_wr;
2468 	void				*va;
2469 	int				rc;
2470 	qlnx_host_t			*ha;
2471 
2472 	ha = dev->ha;
2473 
2474 	QL_DPRINT12(ha, "enter\n");
2475 
2476 	va = qlnx_dma_alloc_coherent(&dev->cdev,
2477 			&phy_prod_pair_addr,
2478 			sizeof(struct rdma_srq_producers));
2479 	if (!va) {
2480 		QL_DPRINT11(ha, "qlnx_dma_alloc_coherent failed for produceer\n");
2481 		return -ENOMEM;
2482 	}
2483 
2484 	hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
2485 	hw_srq->virt_prod_pair_addr = va;
2486 
2487 	max_wr = init_attr->attr.max_wr;
2488 
2489 	num_elems = max_wr * RDMA_MAX_SRQ_WQE_SIZE;
2490 
2491         rc = ecore_chain_alloc(dev->cdev,
2492                    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2493                    ECORE_CHAIN_MODE_PBL,
2494                    ECORE_CHAIN_CNT_TYPE_U32,
2495                    num_elems,
2496                    ECORE_RDMA_SRQ_WQE_ELEM_SIZE,
2497                    &hw_srq->pbl, NULL);
2498 
2499 	if (rc) {
2500 		QL_DPRINT11(ha, "ecore_chain_alloc failed [%d]\n", rc);
2501 		goto err0;
2502 	}
2503 
2504 	hw_srq->max_wr = max_wr;
2505 	hw_srq->num_elems = num_elems;
2506 	hw_srq->max_sges = RDMA_MAX_SGE_PER_SRQ;
2507 
2508 	QL_DPRINT12(ha, "exit\n");
2509 	return 0;
2510 
2511 err0:
2512 	qlnx_dma_free_coherent(&dev->cdev, va, phy_prod_pair_addr,
2513 		sizeof(struct rdma_srq_producers));
2514 
2515 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2516 	return rc;
2517 }
2518 
2519 static inline void
2520 qlnxr_init_common_qp_in_params(struct qlnxr_dev *dev,
2521 	struct qlnxr_pd *pd,
2522 	struct qlnxr_qp *qp,
2523 	struct ib_qp_init_attr *attrs,
2524 	bool fmr_and_reserved_lkey,
2525 	struct ecore_rdma_create_qp_in_params *params)
2526 {
2527 	qlnx_host_t	*ha;
2528 
2529 	ha = dev->ha;
2530 
2531 	QL_DPRINT12(ha, "enter\n");
2532 
2533 	/* QP handle to be written in an async event */
2534 	params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
2535 	params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
2536 
2537 	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
2538 	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
2539 	params->pd = pd->pd_id;
2540 	params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
2541 	params->sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
2542 	params->stats_queue = 0;
2543 
2544 	params->rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
2545 
2546 	if (qp->srq) {
2547 		/* QP is associated with SRQ instead of RQ */
2548 		params->srq_id = qp->srq->srq_id;
2549 		params->use_srq = true;
2550 		QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
2551 			params->srq_id, params->use_srq);
2552 		return;
2553 	}
2554 
2555 	params->srq_id = 0;
2556 	params->use_srq = false;
2557 
2558 	QL_DPRINT12(ha, "exit\n");
2559 	return;
2560 }
2561 
2562 static inline void
2563 qlnxr_qp_user_print( struct qlnxr_dev *dev,
2564 	struct qlnxr_qp *qp)
2565 {
2566 	QL_DPRINT12((dev->ha), "qp=%p. sq_addr=0x%llx, sq_len=%zd, "
2567 		"rq_addr=0x%llx, rq_len=%zd\n",
2568 		qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
2569 		qp->urq.buf_len);
2570 	return;
2571 }
2572 
2573 static int
2574 qlnxr_idr_add(struct qlnxr_dev *dev, void *ptr, u32 id)
2575 {
2576 	u32		newid;
2577 	int		rc;
2578 	qlnx_host_t	*ha;
2579 
2580 	ha = dev->ha;
2581 
2582 	QL_DPRINT12(ha, "enter\n");
2583 
2584 	if (!QLNX_IS_IWARP(dev))
2585 		return 0;
2586 
2587 	do {
2588 		if (!idr_pre_get(&dev->qpidr, GFP_KERNEL)) {
2589 			QL_DPRINT11(ha, "idr_pre_get failed\n");
2590 			return -ENOMEM;
2591 		}
2592 
2593 		mtx_lock(&dev->idr_lock);
2594 
2595 		rc = idr_get_new_above(&dev->qpidr, ptr, id, &newid);
2596 
2597 		mtx_unlock(&dev->idr_lock);
2598 
2599 	} while (rc == -EAGAIN);
2600 
2601 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2602 
2603 	return rc;
2604 }
2605 
2606 static void
2607 qlnxr_idr_remove(struct qlnxr_dev *dev, u32 id)
2608 {
2609 	qlnx_host_t	*ha;
2610 
2611 	ha = dev->ha;
2612 
2613 	QL_DPRINT12(ha, "enter\n");
2614 
2615 	if (!QLNX_IS_IWARP(dev))
2616 		return;
2617 
2618 	mtx_lock(&dev->idr_lock);
2619 	idr_remove(&dev->qpidr, id);
2620 	mtx_unlock(&dev->idr_lock);
2621 
2622 	QL_DPRINT12(ha, "exit \n");
2623 
2624 	return;
2625 }
2626 
2627 static inline void
2628 qlnxr_iwarp_populate_user_qp(struct qlnxr_dev *dev,
2629 	struct qlnxr_qp *qp,
2630 	struct ecore_rdma_create_qp_out_params *out_params)
2631 {
2632 	qlnx_host_t	*ha;
2633 
2634 	ha = dev->ha;
2635 
2636 	QL_DPRINT12(ha, "enter\n");
2637 
2638 	qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
2639 	qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
2640 
2641 	qlnxr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
2642 			   &qp->usq.pbl_info);
2643 
2644 	if (qp->srq) {
2645 		QL_DPRINT11(ha, "qp->srq = %p\n", qp->srq);
2646 		return;
2647 	}
2648 
2649 	qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
2650 	qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
2651 
2652 	qlnxr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
2653 			   &qp->urq.pbl_info);
2654 
2655 	QL_DPRINT12(ha, "exit\n");
2656 	return;
2657 }
2658 
2659 static int
2660 qlnxr_create_user_qp(struct qlnxr_dev *dev,
2661 	struct qlnxr_qp *qp,
2662 	struct ib_pd *ibpd,
2663 	struct ib_udata *udata,
2664 	struct ib_qp_init_attr *attrs)
2665 {
2666 	struct ecore_rdma_destroy_qp_out_params d_out_params;
2667 	struct ecore_rdma_create_qp_in_params in_params;
2668 	struct ecore_rdma_create_qp_out_params out_params;
2669 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
2670 	struct ib_ucontext *ib_ctx = NULL;
2671 	struct qlnxr_ucontext *ctx = NULL;
2672 	struct qlnxr_create_qp_ureq ureq;
2673 	int alloc_and_init = QLNX_IS_ROCE(dev);
2674 	int rc = -EINVAL;
2675 	qlnx_host_t	*ha;
2676 
2677 	ha = dev->ha;
2678 
2679 	QL_DPRINT12(ha, "enter\n");
2680 
2681 	ib_ctx = ibpd->uobject->context;
2682 	ctx = get_qlnxr_ucontext(ib_ctx);
2683 
2684 	memset(&ureq, 0, sizeof(ureq));
2685 	rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
2686 
2687 	if (rc) {
2688 		QL_DPRINT11(ha, "ib_copy_from_udata failed [%d]\n", rc);
2689 		return rc;
2690 	}
2691 
2692 	/* SQ - read access only (0), dma sync not required (0) */
2693 	rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
2694 				  ureq.sq_len, 0, 0,
2695 				  alloc_and_init);
2696 	if (rc) {
2697 		QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2698 		return rc;
2699 	}
2700 
2701 	if (!qp->srq) {
2702 		/* RQ - read access only (0), dma sync not required (0) */
2703 		rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
2704 					  ureq.rq_len, 0, 0,
2705 					  alloc_and_init);
2706 
2707 		if (rc) {
2708 			QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2709 			return rc;
2710 		}
2711 	}
2712 
2713 	memset(&in_params, 0, sizeof(in_params));
2714 	qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
2715 	in_params.qp_handle_lo = ureq.qp_handle_lo;
2716 	in_params.qp_handle_hi = ureq.qp_handle_hi;
2717 	in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
2718 	in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
2719 
2720 	if (!qp->srq) {
2721 		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
2722 		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
2723 	}
2724 
2725 	qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, &in_params, &out_params);
2726 
2727 	if (!qp->ecore_qp) {
2728 		rc = -ENOMEM;
2729 		QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2730 		goto err1;
2731 	}
2732 
2733 	if (QLNX_IS_IWARP(dev))
2734 		qlnxr_iwarp_populate_user_qp(dev, qp, &out_params);
2735 
2736 	qp->qp_id = out_params.qp_id;
2737 	qp->icid = out_params.icid;
2738 
2739 	rc = qlnxr_copy_qp_uresp(dev, qp, udata);
2740 
2741 	if (rc) {
2742 		QL_DPRINT11(ha, "qlnxr_copy_qp_uresp failed\n");
2743 		goto err;
2744 	}
2745 
2746 	qlnxr_qp_user_print(dev, qp);
2747 
2748 	QL_DPRINT12(ha, "exit\n");
2749 	return 0;
2750 err:
2751 	rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
2752 
2753 	if (rc)
2754 		QL_DPRINT12(ha, "fatal fault\n");
2755 
2756 err1:
2757 	qlnxr_cleanup_user(dev, qp);
2758 
2759 	QL_DPRINT12(ha, "exit[%d]\n", rc);
2760 	return rc;
2761 }
2762 
2763 static void
2764 qlnxr_set_roce_db_info(struct qlnxr_dev *dev,
2765 	struct qlnxr_qp *qp)
2766 {
2767 	qlnx_host_t	*ha;
2768 
2769 	ha = dev->ha;
2770 
2771 	QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2772 
2773 	qp->sq.db = dev->db_addr +
2774 		DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2775 	qp->sq.db_data.data.icid = qp->icid + 1;
2776 
2777 	if (!qp->srq) {
2778 		qp->rq.db = dev->db_addr +
2779 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2780 		qp->rq.db_data.data.icid = qp->icid;
2781 	}
2782 
2783 	QL_DPRINT12(ha, "exit\n");
2784 	return;
2785 }
2786 
2787 static void
2788 qlnxr_set_iwarp_db_info(struct qlnxr_dev *dev,
2789 	struct qlnxr_qp *qp)
2790 
2791 {
2792 	qlnx_host_t	*ha;
2793 
2794 	ha = dev->ha;
2795 
2796 	QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2797 
2798 	qp->sq.db = dev->db_addr +
2799 		DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2800 	qp->sq.db_data.data.icid = qp->icid;
2801 
2802 	if (!qp->srq) {
2803 		qp->rq.db = dev->db_addr +
2804 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2805 		qp->rq.db_data.data.icid = qp->icid;
2806 
2807 		qp->rq.iwarp_db2 = dev->db_addr +
2808 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2809 		qp->rq.iwarp_db2_data.data.icid = qp->icid;
2810 		qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2811 	}
2812 
2813 	QL_DPRINT12(ha,
2814 		"qp->sq.db = %p qp->sq.db_data.data.icid =0x%x\n"
2815 		"\t\t\tqp->rq.db = %p qp->rq.db_data.data.icid =0x%x\n"
2816 		"\t\t\tqp->rq.iwarp_db2 = %p qp->rq.iwarp_db2.data.icid =0x%x"
2817 		" qp->rq.iwarp_db2.data.prod_val =0x%x\n",
2818 		qp->sq.db, qp->sq.db_data.data.icid,
2819 		qp->rq.db, qp->rq.db_data.data.icid,
2820 		qp->rq.iwarp_db2, qp->rq.iwarp_db2_data.data.icid,
2821 		qp->rq.iwarp_db2_data.data.value);
2822 
2823 	QL_DPRINT12(ha, "exit\n");
2824 	return;
2825 }
2826 
2827 static int
2828 qlnxr_roce_create_kernel_qp(struct qlnxr_dev *dev,
2829 	struct qlnxr_qp *qp,
2830 	struct ecore_rdma_create_qp_in_params *in_params,
2831 	u32 n_sq_elems,
2832 	u32 n_rq_elems)
2833 {
2834 	struct ecore_rdma_create_qp_out_params out_params;
2835 	int		rc;
2836 	qlnx_host_t	*ha;
2837 
2838 	ha = dev->ha;
2839 
2840 	QL_DPRINT12(ha, "enter\n");
2841 
2842         rc = ecore_chain_alloc(
2843                 dev->cdev,
2844                 ECORE_CHAIN_USE_TO_PRODUCE,
2845                 ECORE_CHAIN_MODE_PBL,
2846                 ECORE_CHAIN_CNT_TYPE_U32,
2847                 n_sq_elems,
2848                 QLNXR_SQE_ELEMENT_SIZE,
2849                 &qp->sq.pbl,
2850                 NULL);
2851 
2852 	if (rc) {
2853 		QL_DPRINT11(ha, "ecore_chain_alloc qp->sq.pbl failed[%d]\n", rc);
2854 		return rc;
2855 	}
2856 
2857 	in_params->sq_num_pages = ecore_chain_get_page_cnt(&qp->sq.pbl);
2858 	in_params->sq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->sq.pbl);
2859 
2860 	if (!qp->srq) {
2861                 rc = ecore_chain_alloc(
2862                         dev->cdev,
2863                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2864                         ECORE_CHAIN_MODE_PBL,
2865                         ECORE_CHAIN_CNT_TYPE_U32,
2866                         n_rq_elems,
2867                         QLNXR_RQE_ELEMENT_SIZE,
2868                         &qp->rq.pbl,
2869                         NULL);
2870 
2871 		if (rc) {
2872 			QL_DPRINT11(ha,
2873 				"ecore_chain_alloc qp->rq.pbl failed[%d]\n", rc);
2874 			return rc;
2875 		}
2876 
2877 		in_params->rq_num_pages = ecore_chain_get_page_cnt(&qp->rq.pbl);
2878 		in_params->rq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->rq.pbl);
2879 	}
2880 
2881 	qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2882 
2883 	if (!qp->ecore_qp) {
2884 		QL_DPRINT11(ha, "qp->ecore_qp == NULL\n");
2885 		return -EINVAL;
2886 	}
2887 
2888 	qp->qp_id = out_params.qp_id;
2889 	qp->icid = out_params.icid;
2890 
2891 	qlnxr_set_roce_db_info(dev, qp);
2892 
2893 	QL_DPRINT12(ha, "exit\n");
2894 	return 0;
2895 }
2896 
2897 static int
2898 qlnxr_iwarp_create_kernel_qp(struct qlnxr_dev *dev,
2899 	struct qlnxr_qp *qp,
2900 	struct ecore_rdma_create_qp_in_params *in_params,
2901 	u32 n_sq_elems,
2902 	u32 n_rq_elems)
2903 {
2904 	struct ecore_rdma_destroy_qp_out_params d_out_params;
2905 	struct ecore_rdma_create_qp_out_params out_params;
2906 	struct ecore_chain_ext_pbl ext_pbl;
2907 	int rc;
2908 	qlnx_host_t	*ha;
2909 
2910 	ha = dev->ha;
2911 
2912 	QL_DPRINT12(ha, "enter\n");
2913 
2914 	in_params->sq_num_pages = ECORE_CHAIN_PAGE_CNT(n_sq_elems,
2915 						     QLNXR_SQE_ELEMENT_SIZE,
2916 						     ECORE_CHAIN_MODE_PBL);
2917 	in_params->rq_num_pages = ECORE_CHAIN_PAGE_CNT(n_rq_elems,
2918 						     QLNXR_RQE_ELEMENT_SIZE,
2919 						     ECORE_CHAIN_MODE_PBL);
2920 
2921 	QL_DPRINT12(ha, "n_sq_elems = 0x%x"
2922 		" n_rq_elems = 0x%x in_params\n"
2923 		"\t\t\tqp_handle_lo\t\t= 0x%08x\n"
2924 		"\t\t\tqp_handle_hi\t\t= 0x%08x\n"
2925 		"\t\t\tqp_handle_async_lo\t\t= 0x%08x\n"
2926 		"\t\t\tqp_handle_async_hi\t\t= 0x%08x\n"
2927 		"\t\t\tuse_srq\t\t\t= 0x%x\n"
2928 		"\t\t\tsignal_all\t\t= 0x%x\n"
2929 		"\t\t\tfmr_and_reserved_lkey\t= 0x%x\n"
2930 		"\t\t\tpd\t\t\t= 0x%x\n"
2931 		"\t\t\tdpi\t\t\t= 0x%x\n"
2932 		"\t\t\tsq_cq_id\t\t\t= 0x%x\n"
2933 		"\t\t\tsq_num_pages\t\t= 0x%x\n"
2934 		"\t\t\tsq_pbl_ptr\t\t= %p\n"
2935 		"\t\t\tmax_sq_sges\t\t= 0x%x\n"
2936 		"\t\t\trq_cq_id\t\t\t= 0x%x\n"
2937 		"\t\t\trq_num_pages\t\t= 0x%x\n"
2938 		"\t\t\trq_pbl_ptr\t\t= %p\n"
2939 		"\t\t\tsrq_id\t\t\t= 0x%x\n"
2940 		"\t\t\tstats_queue\t\t= 0x%x\n",
2941 		n_sq_elems, n_rq_elems,
2942 		in_params->qp_handle_lo,
2943 		in_params->qp_handle_hi,
2944 		in_params->qp_handle_async_lo,
2945 		in_params->qp_handle_async_hi,
2946 		in_params->use_srq,
2947 		in_params->signal_all,
2948 		in_params->fmr_and_reserved_lkey,
2949 		in_params->pd,
2950 		in_params->dpi,
2951 		in_params->sq_cq_id,
2952 		in_params->sq_num_pages,
2953 		(void *)in_params->sq_pbl_ptr,
2954 		in_params->max_sq_sges,
2955 		in_params->rq_cq_id,
2956 		in_params->rq_num_pages,
2957 		(void *)in_params->rq_pbl_ptr,
2958 		in_params->srq_id,
2959 		in_params->stats_queue );
2960 
2961 	memset(&out_params, 0, sizeof (struct ecore_rdma_create_qp_out_params));
2962 	memset(&ext_pbl, 0, sizeof (struct ecore_chain_ext_pbl));
2963 
2964 	qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2965 
2966 	if (!qp->ecore_qp) {
2967 		QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2968 		return -EINVAL;
2969 	}
2970 
2971 	/* Now we allocate the chain */
2972 	ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
2973 	ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
2974 
2975 	QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
2976 		"ext_pbl.p_pbl_phys = %p\n",
2977 		ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
2978 
2979         rc = ecore_chain_alloc(
2980                 dev->cdev,
2981                 ECORE_CHAIN_USE_TO_PRODUCE,
2982                 ECORE_CHAIN_MODE_PBL,
2983                 ECORE_CHAIN_CNT_TYPE_U32,
2984                 n_sq_elems,
2985                 QLNXR_SQE_ELEMENT_SIZE,
2986                 &qp->sq.pbl,
2987                 &ext_pbl);
2988 
2989 	if (rc) {
2990 		QL_DPRINT11(ha,
2991 			"ecore_chain_alloc qp->sq.pbl failed rc = %d\n", rc);
2992 		goto err;
2993 	}
2994 
2995 	ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
2996 	ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
2997 
2998 	QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
2999 		"ext_pbl.p_pbl_phys = %p\n",
3000 		ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
3001 
3002 	if (!qp->srq) {
3003                 rc = ecore_chain_alloc(
3004                         dev->cdev,
3005                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
3006                         ECORE_CHAIN_MODE_PBL,
3007                         ECORE_CHAIN_CNT_TYPE_U32,
3008                         n_rq_elems,
3009                         QLNXR_RQE_ELEMENT_SIZE,
3010                         &qp->rq.pbl,
3011                         &ext_pbl);
3012 
3013 		if (rc) {
3014 			QL_DPRINT11(ha,, "ecore_chain_alloc qp->rq.pbl"
3015 				" failed rc = %d\n", rc);
3016 			goto err;
3017 		}
3018 	}
3019 
3020 	QL_DPRINT12(ha, "qp_id = 0x%x icid =0x%x\n",
3021 		out_params.qp_id, out_params.icid);
3022 
3023 	qp->qp_id = out_params.qp_id;
3024 	qp->icid = out_params.icid;
3025 
3026 	qlnxr_set_iwarp_db_info(dev, qp);
3027 
3028 	QL_DPRINT12(ha, "exit\n");
3029 	return 0;
3030 
3031 err:
3032 	ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
3033 
3034 	QL_DPRINT12(ha, "exit rc = %d\n", rc);
3035 	return rc;
3036 }
3037 
3038 static int
3039 qlnxr_create_kernel_qp(struct qlnxr_dev *dev,
3040 	struct qlnxr_qp *qp,
3041 	struct ib_pd *ibpd,
3042 	struct ib_qp_init_attr *attrs)
3043 {
3044 	struct ecore_rdma_create_qp_in_params in_params;
3045 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3046 	int rc = -EINVAL;
3047 	u32 n_rq_elems;
3048 	u32 n_sq_elems;
3049 	u32 n_sq_entries;
3050 	struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3051 	qlnx_host_t	*ha;
3052 
3053 	ha = dev->ha;
3054 
3055 	QL_DPRINT12(ha, "enter\n");
3056 
3057 	memset(&in_params, 0, sizeof(in_params));
3058 
3059 	/* A single work request may take up to MAX_SQ_WQE_SIZE elements in
3060 	 * the ring. The ring should allow at least a single WR, even if the
3061 	 * user requested none, due to allocation issues.
3062 	 * We should add an extra WR since the prod and cons indices of
3063 	 * wqe_wr_id are managed in such a way that the WQ is considered full
3064 	 * when (prod+1)%max_wr==cons. We currently don't do that because we
3065 	 * double the number of entries due an iSER issue that pushes far more
3066 	 * WRs than indicated. If we decline its ib_post_send() then we get
3067 	 * error prints in the dmesg we'd like to avoid.
3068 	 */
3069 	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
3070 			      qattr->max_wqe);
3071 
3072 	qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
3073 			GFP_KERNEL);
3074 	if (!qp->wqe_wr_id) {
3075 		QL_DPRINT11(ha, "failed SQ shadow memory allocation\n");
3076 		return -ENOMEM;
3077 	}
3078 
3079 	/* QP handle to be written in CQE */
3080 	in_params.qp_handle_lo = lower_32_bits((uintptr_t)qp);
3081 	in_params.qp_handle_hi = upper_32_bits((uintptr_t)qp);
3082 
3083 	/* A single work request may take up to MAX_RQ_WQE_SIZE elements in
3084 	 * the ring. There ring should allow at least a single WR, even if the
3085 	 * user requested none, due to allocation issues.
3086 	 */
3087 	qp->rq.max_wr = (u16)max_t(u32, attrs->cap.max_recv_wr, 1);
3088 
3089 	/* Allocate driver internal RQ array */
3090 	if (!qp->srq) {
3091 		qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
3092 					GFP_KERNEL);
3093 		if (!qp->rqe_wr_id) {
3094 			QL_DPRINT11(ha, "failed RQ shadow memory allocation\n");
3095 			kfree(qp->wqe_wr_id);
3096 			return -ENOMEM;
3097 		}
3098 	}
3099 
3100 	//qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
3101 
3102         in_params.qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
3103         in_params.qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
3104 
3105         in_params.signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
3106         in_params.fmr_and_reserved_lkey = true;
3107         in_params.pd = pd->pd_id;
3108         in_params.dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
3109         in_params.sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
3110         in_params.stats_queue = 0;
3111 
3112         in_params.rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
3113 
3114         if (qp->srq) {
3115                 /* QP is associated with SRQ instead of RQ */
3116                 in_params.srq_id = qp->srq->srq_id;
3117                 in_params.use_srq = true;
3118                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
3119                         in_params.srq_id, in_params.use_srq);
3120         } else {
3121         	in_params.srq_id = 0;
3122 		in_params.use_srq = false;
3123 	}
3124 
3125 	n_sq_entries = attrs->cap.max_send_wr;
3126 	n_sq_entries = min_t(u32, n_sq_entries, qattr->max_wqe);
3127 	n_sq_entries = max_t(u32, n_sq_entries, 1);
3128 	n_sq_elems = n_sq_entries * QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
3129 
3130 	n_rq_elems = qp->rq.max_wr * QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
3131 
3132 	if (QLNX_IS_ROCE(dev)) {
3133 		rc = qlnxr_roce_create_kernel_qp(dev, qp, &in_params,
3134 						n_sq_elems, n_rq_elems);
3135 	} else {
3136 		rc = qlnxr_iwarp_create_kernel_qp(dev, qp, &in_params,
3137 						 n_sq_elems, n_rq_elems);
3138 	}
3139 
3140 	if (rc)
3141 		qlnxr_cleanup_kernel(dev, qp);
3142 
3143 	QL_DPRINT12(ha, "exit [%d]\n", rc);
3144 	return rc;
3145 }
3146 
3147 struct ib_qp *
3148 qlnxr_create_qp(struct ib_pd *ibpd,
3149 		struct ib_qp_init_attr *attrs,
3150 		struct ib_udata *udata)
3151 {
3152 	struct qlnxr_dev *dev = get_qlnxr_dev(ibpd->device);
3153 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3154 	struct qlnxr_qp *qp;
3155 	int rc = 0;
3156 	qlnx_host_t	*ha;
3157 
3158 	ha = dev->ha;
3159 
3160 	QL_DPRINT12(ha, "enter\n");
3161 
3162 	rc = qlnxr_check_qp_attrs(ibpd, dev, attrs, udata);
3163 	if (rc) {
3164 		QL_DPRINT11(ha, "qlnxr_check_qp_attrs failed [%d]\n", rc);
3165 		return ERR_PTR(rc);
3166 	}
3167 
3168 	QL_DPRINT12(ha, "called from %s, event_handle=%p,"
3169 		" eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
3170 		(udata ? "user library" : "kernel"),
3171 		attrs->event_handler, pd,
3172 		get_qlnxr_cq(attrs->send_cq),
3173 		get_qlnxr_cq(attrs->send_cq)->icid,
3174 		get_qlnxr_cq(attrs->recv_cq),
3175 		get_qlnxr_cq(attrs->recv_cq)->icid);
3176 
3177 	qp = qlnx_zalloc(sizeof(struct qlnxr_qp));
3178 
3179 	if (!qp) {
3180 		QL_DPRINT11(ha, "kzalloc(qp) failed\n");
3181 		return ERR_PTR(-ENOMEM);
3182 	}
3183 
3184 	qlnxr_set_common_qp_params(dev, qp, pd, attrs);
3185 
3186 	if (attrs->qp_type == IB_QPT_GSI) {
3187 		QL_DPRINT11(ha, "calling qlnxr_create_gsi_qp\n");
3188 		return qlnxr_create_gsi_qp(dev, attrs, qp);
3189 	}
3190 
3191 	if (udata) {
3192 		rc = qlnxr_create_user_qp(dev, qp, ibpd, udata, attrs);
3193 
3194 		if (rc) {
3195 			QL_DPRINT11(ha, "qlnxr_create_user_qp failed\n");
3196 			goto err;
3197 		}
3198 	} else {
3199 		rc = qlnxr_create_kernel_qp(dev, qp, ibpd, attrs);
3200 
3201 		if (rc) {
3202 			QL_DPRINT11(ha, "qlnxr_create_kernel_qp failed\n");
3203 			goto err;
3204 		}
3205 	}
3206 
3207 	qp->ibqp.qp_num = qp->qp_id;
3208 
3209 	rc = qlnxr_idr_add(dev, qp, qp->qp_id);
3210 
3211 	if (rc) {
3212 		QL_DPRINT11(ha, "qlnxr_idr_add failed\n");
3213 		goto err;
3214 	}
3215 
3216 	QL_DPRINT12(ha, "exit [%p]\n", &qp->ibqp);
3217 
3218 	return &qp->ibqp;
3219 err:
3220 	QL_DPRINT12(ha, "failed exit\n");
3221 	return ERR_PTR(-EFAULT);
3222 }
3223 
3224 static enum ib_qp_state
3225 qlnxr_get_ibqp_state(enum ecore_roce_qp_state qp_state)
3226 {
3227 	enum ib_qp_state state = IB_QPS_ERR;
3228 
3229 	switch (qp_state) {
3230 	case ECORE_ROCE_QP_STATE_RESET:
3231 		state = IB_QPS_RESET;
3232 		break;
3233 
3234 	case ECORE_ROCE_QP_STATE_INIT:
3235 		state = IB_QPS_INIT;
3236 		break;
3237 
3238 	case ECORE_ROCE_QP_STATE_RTR:
3239 		state = IB_QPS_RTR;
3240 		break;
3241 
3242 	case ECORE_ROCE_QP_STATE_RTS:
3243 		state = IB_QPS_RTS;
3244 		break;
3245 
3246 	case ECORE_ROCE_QP_STATE_SQD:
3247 		state = IB_QPS_SQD;
3248 		break;
3249 
3250 	case ECORE_ROCE_QP_STATE_ERR:
3251 		state = IB_QPS_ERR;
3252 		break;
3253 
3254 	case ECORE_ROCE_QP_STATE_SQE:
3255 		state = IB_QPS_SQE;
3256 		break;
3257 	}
3258 	return state;
3259 }
3260 
3261 static enum ecore_roce_qp_state
3262 qlnxr_get_state_from_ibqp( enum ib_qp_state qp_state)
3263 {
3264 	enum ecore_roce_qp_state ecore_qp_state;
3265 
3266 	ecore_qp_state = ECORE_ROCE_QP_STATE_ERR;
3267 
3268 	switch (qp_state) {
3269 	case IB_QPS_RESET:
3270 		ecore_qp_state =  ECORE_ROCE_QP_STATE_RESET;
3271 		break;
3272 
3273 	case IB_QPS_INIT:
3274 		ecore_qp_state =  ECORE_ROCE_QP_STATE_INIT;
3275 		break;
3276 
3277 	case IB_QPS_RTR:
3278 		ecore_qp_state =  ECORE_ROCE_QP_STATE_RTR;
3279 		break;
3280 
3281 	case IB_QPS_RTS:
3282 		ecore_qp_state =  ECORE_ROCE_QP_STATE_RTS;
3283 		break;
3284 
3285 	case IB_QPS_SQD:
3286 		ecore_qp_state =  ECORE_ROCE_QP_STATE_SQD;
3287 		break;
3288 
3289 	case IB_QPS_ERR:
3290 		ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3291 		break;
3292 
3293 	default:
3294 		ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3295 		break;
3296 	}
3297 
3298 	return (ecore_qp_state);
3299 }
3300 
3301 static void
3302 qlnxr_reset_qp_hwq_info(struct qlnxr_qp_hwq_info *qph)
3303 {
3304 	ecore_chain_reset(&qph->pbl);
3305 	qph->prod = qph->cons = 0;
3306 	qph->wqe_cons = 0;
3307 	qph->db_data.data.value = cpu_to_le16(0);
3308 
3309 	return;
3310 }
3311 
3312 static int
3313 qlnxr_update_qp_state(struct qlnxr_dev *dev,
3314 	struct qlnxr_qp *qp,
3315 	enum ecore_roce_qp_state new_state)
3316 {
3317 	int		status = 0;
3318 	uint32_t	reg_addr;
3319 	struct ecore_dev *cdev;
3320 	qlnx_host_t	*ha;
3321 
3322 	ha = dev->ha;
3323 	cdev = &ha->cdev;
3324 
3325 	QL_DPRINT12(ha, "enter qp = %p new_state = 0x%x qp->state = 0x%x\n",
3326 		qp, new_state, qp->state);
3327 
3328 	if (new_state == qp->state) {
3329 		return 0;
3330 	}
3331 
3332 	switch (qp->state) {
3333 	case ECORE_ROCE_QP_STATE_RESET:
3334 		switch (new_state) {
3335 		case ECORE_ROCE_QP_STATE_INIT:
3336 			qp->prev_wqe_size = 0;
3337 			qlnxr_reset_qp_hwq_info(&qp->sq);
3338 			if (!(qp->srq))
3339 				qlnxr_reset_qp_hwq_info(&qp->rq);
3340 			break;
3341 		default:
3342 			status = -EINVAL;
3343 			break;
3344 		};
3345 		break;
3346 	case ECORE_ROCE_QP_STATE_INIT:
3347 		/* INIT->XXX */
3348 		switch (new_state) {
3349 		case ECORE_ROCE_QP_STATE_RTR:
3350 		/* Update doorbell (in case post_recv was done before move to RTR) */
3351 			if (qp->srq)
3352 				break;
3353 			wmb();
3354 			//writel(qp->rq.db_data.raw, qp->rq.db);
3355 			//if (QLNX_IS_IWARP(dev))
3356 			//	writel(qp->rq.iwarp_db2_data.raw,
3357 			//	       qp->rq.iwarp_db2);
3358 
3359 			reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
3360 					(uint8_t *)cdev->doorbells);
3361 
3362 			bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
3363 			bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3364 
3365 			if (QLNX_IS_IWARP(dev)) {
3366 				reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
3367 					(uint8_t *)cdev->doorbells);
3368 				bus_write_4(ha->pci_dbells, reg_addr,\
3369 					qp->rq.iwarp_db2_data.raw);
3370 				bus_barrier(ha->pci_dbells,  0, 0,\
3371 					BUS_SPACE_BARRIER_READ);
3372 			}
3373 
3374 
3375 			mmiowb();
3376 			break;
3377 		case ECORE_ROCE_QP_STATE_ERR:
3378 			/* TBD:flush qps... */
3379 			break;
3380 		default:
3381 			/* invalid state change. */
3382 			status = -EINVAL;
3383 			break;
3384 		};
3385 		break;
3386 	case ECORE_ROCE_QP_STATE_RTR:
3387 		/* RTR->XXX */
3388 		switch (new_state) {
3389 		case ECORE_ROCE_QP_STATE_RTS:
3390 			break;
3391 		case ECORE_ROCE_QP_STATE_ERR:
3392 			break;
3393 		default:
3394 			/* invalid state change. */
3395 			status = -EINVAL;
3396 			break;
3397 		};
3398 		break;
3399 	case ECORE_ROCE_QP_STATE_RTS:
3400 		/* RTS->XXX */
3401 		switch (new_state) {
3402 		case ECORE_ROCE_QP_STATE_SQD:
3403 			break;
3404 		case ECORE_ROCE_QP_STATE_ERR:
3405 			break;
3406 		default:
3407 			/* invalid state change. */
3408 			status = -EINVAL;
3409 			break;
3410 		};
3411 		break;
3412 	case ECORE_ROCE_QP_STATE_SQD:
3413 		/* SQD->XXX */
3414 		switch (new_state) {
3415 		case ECORE_ROCE_QP_STATE_RTS:
3416 		case ECORE_ROCE_QP_STATE_ERR:
3417 			break;
3418 		default:
3419 			/* invalid state change. */
3420 			status = -EINVAL;
3421 			break;
3422 		};
3423 		break;
3424 	case ECORE_ROCE_QP_STATE_ERR:
3425 		/* ERR->XXX */
3426 		switch (new_state) {
3427 		case ECORE_ROCE_QP_STATE_RESET:
3428 			if ((qp->rq.prod != qp->rq.cons) ||
3429 			    (qp->sq.prod != qp->sq.cons)) {
3430 				QL_DPRINT11(ha,
3431 					"Error->Reset with rq/sq "
3432 					"not empty rq.prod=0x%x rq.cons=0x%x"
3433 					" sq.prod=0x%x sq.cons=0x%x\n",
3434 					qp->rq.prod, qp->rq.cons,
3435 					qp->sq.prod, qp->sq.cons);
3436 				status = -EINVAL;
3437 			}
3438 			break;
3439 		default:
3440 			status = -EINVAL;
3441 			break;
3442 		};
3443 		break;
3444 	default:
3445 		status = -EINVAL;
3446 		break;
3447 	};
3448 
3449 	QL_DPRINT12(ha, "exit\n");
3450 	return status;
3451 }
3452 
3453 int
3454 qlnxr_modify_qp(struct ib_qp	*ibqp,
3455 	struct ib_qp_attr	*attr,
3456 	int			attr_mask,
3457 	struct ib_udata		*udata)
3458 {
3459 	int rc = 0;
3460 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3461 	struct qlnxr_dev *dev = get_qlnxr_dev(&qp->dev->ibdev);
3462 	struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
3463 	enum ib_qp_state old_qp_state, new_qp_state;
3464 	struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3465 	qlnx_host_t	*ha;
3466 
3467 	ha = dev->ha;
3468 
3469 	QL_DPRINT12(ha,
3470 		"enter qp = %p attr_mask = 0x%x, state = %d udata = %p\n",
3471 		qp, attr_mask, attr->qp_state, udata);
3472 
3473 	old_qp_state = qlnxr_get_ibqp_state(qp->state);
3474 	if (attr_mask & IB_QP_STATE)
3475 		new_qp_state = attr->qp_state;
3476 	else
3477 		new_qp_state = old_qp_state;
3478 
3479 	if (QLNX_IS_ROCE(dev)) {
3480 		if (!ib_modify_qp_is_ok(old_qp_state,
3481 					new_qp_state,
3482 					ibqp->qp_type,
3483 					attr_mask )) {
3484 			QL_DPRINT12(ha,
3485 				"invalid attribute mask=0x%x"
3486 				" specified for qpn=0x%x of type=0x%x \n"
3487 				" old_qp_state=0x%x, new_qp_state=0x%x\n",
3488 				attr_mask, qp->qp_id, ibqp->qp_type,
3489 				old_qp_state, new_qp_state);
3490 			rc = -EINVAL;
3491 			goto err;
3492 		}
3493 	}
3494 	/* translate the masks... */
3495 	if (attr_mask & IB_QP_STATE) {
3496 		SET_FIELD(qp_params.modify_flags,
3497 			  ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
3498 		qp_params.new_state = qlnxr_get_state_from_ibqp(attr->qp_state);
3499 	}
3500 
3501 	// TBD consider changing ecore to be a flag as well...
3502 	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
3503 		qp_params.sqd_async = true;
3504 
3505 	if (attr_mask & IB_QP_PKEY_INDEX) {
3506 		SET_FIELD(qp_params.modify_flags,
3507 			  ECORE_ROCE_MODIFY_QP_VALID_PKEY,
3508 			  1);
3509 		if (attr->pkey_index >= QLNXR_ROCE_PKEY_TABLE_LEN) {
3510 			rc = -EINVAL;
3511 			goto err;
3512 		}
3513 
3514 		qp_params.pkey = QLNXR_ROCE_PKEY_DEFAULT;
3515 	}
3516 
3517 	if (attr_mask & IB_QP_QKEY) {
3518 		qp->qkey = attr->qkey;
3519 	}
3520 
3521 	/* tbd consider splitting in ecore.. */
3522 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
3523 		SET_FIELD(qp_params.modify_flags,
3524 			  ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
3525 		qp_params.incoming_rdma_read_en =
3526 			attr->qp_access_flags & IB_ACCESS_REMOTE_READ;
3527 		qp_params.incoming_rdma_write_en =
3528 			attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE;
3529 		qp_params.incoming_atomic_en =
3530 			attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC;
3531 	}
3532 
3533 	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
3534 		if (attr_mask & IB_QP_PATH_MTU) {
3535 			if (attr->path_mtu < IB_MTU_256 ||
3536 			    attr->path_mtu > IB_MTU_4096) {
3537 				QL_DPRINT12(ha,
3538 					"Only MTU sizes of 256, 512, 1024,"
3539 					" 2048 and 4096 are supported "
3540 					" attr->path_mtu = [%d]\n",
3541 					attr->path_mtu);
3542 
3543 				rc = -EINVAL;
3544 				goto err;
3545 			}
3546 			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
3547 				      ib_mtu_enum_to_int(
3548 						iboe_get_mtu(dev->ha->ifp->if_mtu)));
3549 		}
3550 
3551 		if (qp->mtu == 0) {
3552 			qp->mtu = ib_mtu_enum_to_int(
3553 					iboe_get_mtu(dev->ha->ifp->if_mtu));
3554 			QL_DPRINT12(ha, "fixing zetoed MTU to qp->mtu = %d\n",
3555 				qp->mtu);
3556 		}
3557 
3558 		SET_FIELD(qp_params.modify_flags,
3559 			  ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR,
3560 			  1);
3561 
3562 		qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
3563 		qp_params.flow_label = attr->ah_attr.grh.flow_label;
3564 		qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
3565 
3566 		qp->sgid_idx = attr->ah_attr.grh.sgid_index;
3567 
3568 		get_gid_info(ibqp, attr, attr_mask, dev, qp, &qp_params);
3569 
3570 		rc = qlnxr_get_dmac(dev, &attr->ah_attr, qp_params.remote_mac_addr);
3571 		if (rc)
3572 			return rc;
3573 
3574 		qp_params.use_local_mac = true;
3575 		memcpy(qp_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
3576 
3577 		QL_DPRINT12(ha, "dgid=0x%x:0x%x:0x%x:0x%x\n",
3578 		       qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
3579 		       qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
3580 		QL_DPRINT12(ha, "sgid=0x%x:0x%x:0x%x:0x%x\n",
3581 		       qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
3582 		       qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
3583 		QL_DPRINT12(ha,
3584 			"remote_mac=[0x%x:0x%x:0x%x:0x%x:0x%x:0x%x]\n",
3585 			qp_params.remote_mac_addr[0],
3586 			qp_params.remote_mac_addr[1],
3587 			qp_params.remote_mac_addr[2],
3588 			qp_params.remote_mac_addr[3],
3589 			qp_params.remote_mac_addr[4],
3590 			qp_params.remote_mac_addr[5]);
3591 
3592 		qp_params.mtu = qp->mtu;
3593 	}
3594 
3595 	if (qp_params.mtu == 0) {
3596 		/* stay with current MTU */
3597 		if (qp->mtu) {
3598 			qp_params.mtu = qp->mtu;
3599 		} else {
3600 			qp_params.mtu = ib_mtu_enum_to_int(
3601 						iboe_get_mtu(dev->ha->ifp->if_mtu));
3602 		}
3603 	}
3604 
3605 	if (attr_mask & IB_QP_TIMEOUT) {
3606 		SET_FIELD(qp_params.modify_flags, \
3607 			ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
3608 
3609 		qp_params.ack_timeout = attr->timeout;
3610 		if (attr->timeout) {
3611 			u32 temp;
3612 
3613 			/* 12.7.34 LOCAL ACK TIMEOUT
3614 			 * Value representing the transport (ACK) timeout for
3615 			 * use by the remote, expressed as (4.096 μS*2Local ACK
3616 			 * Timeout)
3617 			 */
3618 			/* We use 1UL since the temporal value may be  overflow
3619 			 * 32 bits
3620 			 */
3621 			temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
3622 			qp_params.ack_timeout = temp; /* FW requires [msec] */
3623 		}
3624 		else
3625 			qp_params.ack_timeout = 0; /* infinite */
3626 	}
3627 	if (attr_mask & IB_QP_RETRY_CNT) {
3628 		SET_FIELD(qp_params.modify_flags,\
3629 			 ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
3630 		qp_params.retry_cnt = attr->retry_cnt;
3631 	}
3632 
3633 	if (attr_mask & IB_QP_RNR_RETRY) {
3634 		SET_FIELD(qp_params.modify_flags,
3635 			  ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT,
3636 			  1);
3637 		qp_params.rnr_retry_cnt = attr->rnr_retry;
3638 	}
3639 
3640 	if (attr_mask & IB_QP_RQ_PSN) {
3641 		SET_FIELD(qp_params.modify_flags,
3642 			  ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN,
3643 			  1);
3644 		qp_params.rq_psn = attr->rq_psn;
3645 		qp->rq_psn = attr->rq_psn;
3646 	}
3647 
3648 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3649 		if (attr->max_rd_atomic > qattr->max_qp_req_rd_atomic_resc) {
3650 			rc = -EINVAL;
3651 			QL_DPRINT12(ha,
3652 				"unsupported  max_rd_atomic=%d, supported=%d\n",
3653 				attr->max_rd_atomic,
3654 				qattr->max_qp_req_rd_atomic_resc);
3655 			goto err;
3656 		}
3657 
3658 		SET_FIELD(qp_params.modify_flags,
3659 			  ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ,
3660 			  1);
3661 		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
3662 	}
3663 
3664 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
3665 		SET_FIELD(qp_params.modify_flags,
3666 			  ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER,
3667 			  1);
3668 		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
3669 	}
3670 
3671 	if (attr_mask & IB_QP_SQ_PSN) {
3672 		SET_FIELD(qp_params.modify_flags,
3673 			  ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN,
3674 			  1);
3675 		qp_params.sq_psn = attr->sq_psn;
3676 		qp->sq_psn = attr->sq_psn;
3677 	}
3678 
3679 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3680 		if (attr->max_dest_rd_atomic >
3681 		    qattr->max_qp_resp_rd_atomic_resc) {
3682 			QL_DPRINT12(ha,
3683 				"unsupported max_dest_rd_atomic=%d, "
3684 				"supported=%d\n",
3685 				attr->max_dest_rd_atomic,
3686 				qattr->max_qp_resp_rd_atomic_resc);
3687 
3688 			rc = -EINVAL;
3689 			goto err;
3690 		}
3691 
3692 		SET_FIELD(qp_params.modify_flags,
3693 			  ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP,
3694 			  1);
3695 		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
3696 	}
3697 
3698  	if (attr_mask & IB_QP_DEST_QPN) {
3699 		SET_FIELD(qp_params.modify_flags,
3700 			  ECORE_ROCE_MODIFY_QP_VALID_DEST_QP,
3701 			  1);
3702 
3703 		qp_params.dest_qp = attr->dest_qp_num;
3704 		qp->dest_qp_num = attr->dest_qp_num;
3705 	}
3706 
3707 	/*
3708 	 * Update the QP state before the actual ramrod to prevent a race with
3709 	 * fast path. Modifying the QP state to error will cause the device to
3710 	 * flush the CQEs and while polling the flushed CQEs will considered as
3711 	 * a potential issue if the QP isn't in error state.
3712 	 */
3713 	if ((attr_mask & IB_QP_STATE) && (qp->qp_type != IB_QPT_GSI) &&
3714 		(!udata) && (qp_params.new_state == ECORE_ROCE_QP_STATE_ERR))
3715 		qp->state = ECORE_ROCE_QP_STATE_ERR;
3716 
3717 	if (qp->qp_type != IB_QPT_GSI)
3718 		rc = ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
3719 
3720 	if (attr_mask & IB_QP_STATE) {
3721 		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
3722 			rc = qlnxr_update_qp_state(dev, qp, qp_params.new_state);
3723 		qp->state = qp_params.new_state;
3724 	}
3725 
3726 err:
3727 	QL_DPRINT12(ha, "exit\n");
3728 	return rc;
3729 }
3730 
3731 static int
3732 qlnxr_to_ib_qp_acc_flags(struct ecore_rdma_query_qp_out_params *params)
3733 {
3734 	int ib_qp_acc_flags = 0;
3735 
3736 	if (params->incoming_rdma_write_en)
3737 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
3738 	if (params->incoming_rdma_read_en)
3739 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
3740 	if (params->incoming_atomic_en)
3741 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
3742 	if (true) /* FIXME -> local write ?? */
3743 		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
3744 
3745 	return ib_qp_acc_flags;
3746 }
3747 
3748 static enum ib_mtu
3749 qlnxr_mtu_int_to_enum(u16 mtu)
3750 {
3751 	enum ib_mtu ib_mtu_size;
3752 
3753 	switch (mtu) {
3754 	case 256:
3755 		ib_mtu_size = IB_MTU_256;
3756 		break;
3757 
3758 	case 512:
3759 		ib_mtu_size = IB_MTU_512;
3760 		break;
3761 
3762 	case 1024:
3763 		ib_mtu_size = IB_MTU_1024;
3764 		break;
3765 
3766 	case 2048:
3767 		ib_mtu_size = IB_MTU_2048;
3768 		break;
3769 
3770 	case 4096:
3771 		ib_mtu_size = IB_MTU_4096;
3772 		break;
3773 
3774 	default:
3775 		ib_mtu_size = IB_MTU_1024;
3776 		break;
3777 	}
3778 	return (ib_mtu_size);
3779 }
3780 
3781 int
3782 qlnxr_query_qp(struct ib_qp *ibqp,
3783 	struct ib_qp_attr *qp_attr,
3784 	int attr_mask,
3785 	struct ib_qp_init_attr *qp_init_attr)
3786 {
3787 	int rc = 0;
3788 	struct ecore_rdma_query_qp_out_params params;
3789 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3790 	struct qlnxr_dev *dev = qp->dev;
3791 	qlnx_host_t	*ha;
3792 
3793 	ha = dev->ha;
3794 
3795 	QL_DPRINT12(ha, "enter\n");
3796 
3797 	memset(&params, 0, sizeof(params));
3798 
3799 	rc = ecore_rdma_query_qp(dev->rdma_ctx, qp->ecore_qp, &params);
3800 	if (rc)
3801 		goto err;
3802 
3803 	memset(qp_attr, 0, sizeof(*qp_attr));
3804 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3805 
3806 	qp_attr->qp_state = qlnxr_get_ibqp_state(params.state);
3807 	qp_attr->cur_qp_state = qlnxr_get_ibqp_state(params.state);
3808 
3809 	/* In some cases in iWARP qelr will ask for the state only */
3810 	if (QLNX_IS_IWARP(dev) && (attr_mask == IB_QP_STATE)) {
3811 		QL_DPRINT11(ha, "only state requested\n");
3812 		return 0;
3813 	}
3814 
3815 	qp_attr->path_mtu = qlnxr_mtu_int_to_enum(params.mtu);
3816 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
3817 	qp_attr->rq_psn = params.rq_psn;
3818 	qp_attr->sq_psn = params.sq_psn;
3819 	qp_attr->dest_qp_num = params.dest_qp;
3820 
3821 	qp_attr->qp_access_flags = qlnxr_to_ib_qp_acc_flags(&params);
3822 
3823 	QL_DPRINT12(ha, "qp_state = 0x%x cur_qp_state = 0x%x "
3824 		"path_mtu = %d qp_access_flags = 0x%x\n",
3825 		qp_attr->qp_state, qp_attr->cur_qp_state, qp_attr->path_mtu,
3826 		qp_attr->qp_access_flags);
3827 
3828 	qp_attr->cap.max_send_wr = qp->sq.max_wr;
3829 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
3830 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
3831 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
3832 	qp_attr->cap.max_inline_data = qp->max_inline_data;
3833 	qp_init_attr->cap = qp_attr->cap;
3834 
3835 	memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
3836 	       sizeof(qp_attr->ah_attr.grh.dgid.raw));
3837 
3838 	qp_attr->ah_attr.grh.flow_label = params.flow_label;
3839 	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
3840 	qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
3841 	qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
3842 
3843 	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
3844 	qp_attr->ah_attr.port_num = 1; /* FIXME -> check this */
3845 	qp_attr->ah_attr.sl = 0;/* FIXME -> check this */
3846 	qp_attr->timeout = params.timeout;
3847 	qp_attr->rnr_retry = params.rnr_retry;
3848 	qp_attr->retry_cnt = params.retry_cnt;
3849 	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
3850 	qp_attr->pkey_index = params.pkey_index;
3851 	qp_attr->port_num = 1; /* FIXME -> check this */
3852 	qp_attr->ah_attr.src_path_bits = 0;
3853 	qp_attr->ah_attr.static_rate = 0;
3854 	qp_attr->alt_pkey_index = 0;
3855 	qp_attr->alt_port_num = 0;
3856 	qp_attr->alt_timeout = 0;
3857 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
3858 
3859 	qp_attr->sq_draining = (params.state == ECORE_ROCE_QP_STATE_SQD) ? 1 : 0;
3860 	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
3861 	qp_attr->max_rd_atomic = params.max_rd_atomic;
3862 	qp_attr->en_sqd_async_notify = (params.sqd_async)? 1 : 0;
3863 
3864 	QL_DPRINT12(ha, "max_inline_data=%d\n",
3865 		qp_attr->cap.max_inline_data);
3866 
3867 err:
3868 	QL_DPRINT12(ha, "exit\n");
3869 	return rc;
3870 }
3871 
3872 static void
3873 qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3874 {
3875 	qlnx_host_t	*ha;
3876 
3877 	ha = dev->ha;
3878 
3879 	QL_DPRINT12(ha, "enter\n");
3880 
3881 	if (qp->usq.umem)
3882 		ib_umem_release(qp->usq.umem);
3883 
3884 	qp->usq.umem = NULL;
3885 
3886 	if (qp->urq.umem)
3887 		ib_umem_release(qp->urq.umem);
3888 
3889 	qp->urq.umem = NULL;
3890 
3891 	QL_DPRINT12(ha, "exit\n");
3892 	return;
3893 }
3894 
3895 static void
3896 qlnxr_cleanup_kernel(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3897 {
3898 	qlnx_host_t	*ha;
3899 
3900 	ha = dev->ha;
3901 
3902 	QL_DPRINT12(ha, "enter\n");
3903 
3904 	if (qlnxr_qp_has_sq(qp)) {
3905 		QL_DPRINT12(ha, "freeing SQ\n");
3906 		ha->qlnxr_debug = 1;
3907 //		ecore_chain_free(dev->cdev, &qp->sq.pbl);
3908 		ha->qlnxr_debug = 0;
3909 		kfree(qp->wqe_wr_id);
3910 	}
3911 
3912 	if (qlnxr_qp_has_rq(qp)) {
3913 		QL_DPRINT12(ha, "freeing RQ\n");
3914 		ha->qlnxr_debug = 1;
3915 	//	ecore_chain_free(dev->cdev, &qp->rq.pbl);
3916 		ha->qlnxr_debug = 0;
3917 		kfree(qp->rqe_wr_id);
3918 	}
3919 
3920 	QL_DPRINT12(ha, "exit\n");
3921 	return;
3922 }
3923 
3924 static int
3925 qlnxr_free_qp_resources(struct qlnxr_dev *dev,
3926     struct qlnxr_qp *qp, struct ib_udata *udata)
3927 {
3928 	int		rc = 0;
3929 	qlnx_host_t	*ha;
3930 	struct ecore_rdma_destroy_qp_out_params d_out_params;
3931 
3932 	ha = dev->ha;
3933 
3934 	QL_DPRINT12(ha, "enter\n");
3935 
3936 #if 0
3937 	if (qp->qp_type != IB_QPT_GSI) {
3938 		rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
3939 				&d_out_params);
3940 		if (rc)
3941 			return rc;
3942 	}
3943 
3944 	if (udata)
3945 		qlnxr_cleanup_user(dev, qp);
3946 	else
3947 		qlnxr_cleanup_kernel(dev, qp);
3948 #endif
3949 
3950 	if (udata)
3951 		qlnxr_cleanup_user(dev, qp);
3952 	else
3953 		qlnxr_cleanup_kernel(dev, qp);
3954 
3955 	if (qp->qp_type != IB_QPT_GSI) {
3956 		rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
3957 				&d_out_params);
3958 		if (rc)
3959 			return rc;
3960 	}
3961 
3962 	QL_DPRINT12(ha, "exit\n");
3963 	return 0;
3964 }
3965 
3966 int
3967 qlnxr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3968 {
3969 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3970 	struct qlnxr_dev *dev = qp->dev;
3971 	int rc = 0;
3972 	struct ib_qp_attr attr;
3973 	int attr_mask = 0;
3974 	qlnx_host_t	*ha;
3975 
3976 	ha = dev->ha;
3977 
3978 	QL_DPRINT12(ha, "enter qp = %p, qp_type=%d\n", qp, qp->qp_type);
3979 
3980 	qp->destroyed = 1;
3981 
3982 	if (QLNX_IS_ROCE(dev) && (qp->state != (ECORE_ROCE_QP_STATE_RESET |
3983 				  ECORE_ROCE_QP_STATE_ERR |
3984 				  ECORE_ROCE_QP_STATE_INIT))) {
3985 		attr.qp_state = IB_QPS_ERR;
3986 		attr_mask |= IB_QP_STATE;
3987 
3988 		/* change the QP state to ERROR */
3989 		qlnxr_modify_qp(ibqp, &attr, attr_mask, NULL);
3990 	}
3991 
3992 	if (qp->qp_type == IB_QPT_GSI)
3993 		qlnxr_destroy_gsi_qp(dev);
3994 
3995 	qp->sig = ~qp->sig;
3996 
3997 	qlnxr_free_qp_resources(dev, qp, udata);
3998 
3999 	if (atomic_dec_and_test(&qp->refcnt)) {
4000 		/* TODO: only for iWARP? */
4001 		qlnxr_idr_remove(dev, qp->qp_id);
4002 	}
4003 
4004 	QL_DPRINT12(ha, "exit\n");
4005 	return rc;
4006 }
4007 
4008 static inline int
4009 qlnxr_wq_is_full(struct qlnxr_qp_hwq_info *wq)
4010 {
4011 	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
4012 }
4013 
4014 static int
4015 sge_data_len(struct ib_sge *sg_list, int num_sge)
4016 {
4017 	int i, len = 0;
4018 	for (i = 0; i < num_sge; i++)
4019 		len += sg_list[i].length;
4020 	return len;
4021 }
4022 
4023 static void
4024 swap_wqe_data64(u64 *p)
4025 {
4026 	int i;
4027 
4028 	for (i = 0; i < QLNXR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
4029 		*p = cpu_to_be64(cpu_to_le64(*p));
4030 }
4031 
4032 static u32
4033 qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
4034 	struct qlnxr_qp		*qp,
4035 	u8			*wqe_size,
4036 	const struct ib_send_wr	*wr,
4037 	const struct ib_send_wr	**bad_wr,
4038 	u8			*bits,
4039 	u8			bit)
4040 {
4041 	int i, seg_siz;
4042 	char *seg_prt, *wqe;
4043 	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
4044 	qlnx_host_t	*ha;
4045 
4046 	ha = dev->ha;
4047 
4048 	QL_DPRINT12(ha, "enter[%d]\n", data_size);
4049 
4050 	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
4051 		QL_DPRINT12(ha,
4052 			"Too much inline data in WR:[%d, %d]\n",
4053 			data_size, ROCE_REQ_MAX_INLINE_DATA_SIZE);
4054 		*bad_wr = wr;
4055 		return 0;
4056 	}
4057 
4058 	if (!data_size)
4059 		return data_size;
4060 
4061 	/* set the bit */
4062 	*bits |= bit;
4063 
4064 	seg_prt = wqe = NULL;
4065 	seg_siz = 0;
4066 
4067 	/* copy data inline */
4068 	for (i = 0; i < wr->num_sge; i++) {
4069 		u32 len = wr->sg_list[i].length;
4070 		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
4071 
4072 		while (len > 0) {
4073 			u32 cur;
4074 
4075 			/* new segment required */
4076 			if (!seg_siz) {
4077 				wqe = (char *)ecore_chain_produce(&qp->sq.pbl);
4078 				seg_prt = wqe;
4079 				seg_siz = sizeof(struct rdma_sq_common_wqe);
4080 				(*wqe_size)++;
4081 			}
4082 
4083 			/* calculate currently allowed length */
4084 			cur = MIN(len, seg_siz);
4085 
4086 			memcpy(seg_prt, src, cur);
4087 
4088 			/* update segment variables */
4089 			seg_prt += cur;
4090 			seg_siz -= cur;
4091 			/* update sge variables */
4092 			src += cur;
4093 			len -= cur;
4094 
4095 			/* swap fully-completed segments */
4096 			if (!seg_siz)
4097 				swap_wqe_data64((u64 *)wqe);
4098 		}
4099 	}
4100 
4101 	/* swap last not completed segment */
4102 	if (seg_siz)
4103 		swap_wqe_data64((u64 *)wqe);
4104 
4105 	QL_DPRINT12(ha, "exit\n");
4106 	return data_size;
4107 }
4108 
4109 static u32
4110 qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
4111 	u8 *wqe_size, const struct ib_send_wr *wr)
4112 {
4113 	int i;
4114 	u32 data_size = 0;
4115 	qlnx_host_t	*ha;
4116 
4117 	ha = dev->ha;
4118 
4119 	QL_DPRINT12(ha, "enter wr->num_sge = %d \n", wr->num_sge);
4120 
4121 	for (i = 0; i < wr->num_sge; i++) {
4122 		struct rdma_sq_sge *sge = ecore_chain_produce(&qp->sq.pbl);
4123 
4124 		TYPEPTR_ADDR_SET(sge, addr, wr->sg_list[i].addr);
4125 		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
4126 		sge->length = cpu_to_le32(wr->sg_list[i].length);
4127 		data_size += wr->sg_list[i].length;
4128 	}
4129 
4130 	if (wqe_size)
4131 		*wqe_size += wr->num_sge;
4132 
4133 	QL_DPRINT12(ha, "exit data_size = %d\n", data_size);
4134 	return data_size;
4135 }
4136 
4137 static u32
4138 qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
4139 	struct qlnxr_qp *qp,
4140 	struct rdma_sq_rdma_wqe_1st *rwqe,
4141 	struct rdma_sq_rdma_wqe_2nd *rwqe2,
4142 	const struct ib_send_wr *wr,
4143 	const struct ib_send_wr **bad_wr)
4144 {
4145 	qlnx_host_t	*ha;
4146 	u32             ret = 0;
4147 
4148 	ha = dev->ha;
4149 
4150 	QL_DPRINT12(ha, "enter\n");
4151 
4152 	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
4153 	TYPEPTR_ADDR_SET(rwqe2, remote_va, rdma_wr(wr)->remote_addr);
4154 
4155 	if (wr->send_flags & IB_SEND_INLINE) {
4156 		u8 flags = 0;
4157 		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
4158 		return qlnxr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size,
4159 				wr, bad_wr, &rwqe->flags, flags);
4160 	}
4161 
4162 	ret = qlnxr_prepare_sq_sges(dev, qp, &rwqe->wqe_size, wr);
4163 
4164 	QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4165 
4166 	return (ret);
4167 }
4168 
4169 static u32
4170 qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
4171 	struct qlnxr_qp *qp,
4172 	struct rdma_sq_send_wqe *swqe,
4173 	struct rdma_sq_send_wqe *swqe2,
4174 	const struct ib_send_wr *wr,
4175 	const struct ib_send_wr **bad_wr)
4176 {
4177 	qlnx_host_t	*ha;
4178 	u32             ret = 0;
4179 
4180 	ha = dev->ha;
4181 
4182 	QL_DPRINT12(ha, "enter\n");
4183 
4184 	memset(swqe2, 0, sizeof(*swqe2));
4185 
4186 	if (wr->send_flags & IB_SEND_INLINE) {
4187 		u8 flags = 0;
4188 		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
4189 		return qlnxr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size,
4190 				wr, bad_wr, &swqe->flags, flags);
4191 	}
4192 
4193 	ret = qlnxr_prepare_sq_sges(dev, qp, &swqe->wqe_size, wr);
4194 
4195 	QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4196 
4197 	return (ret);
4198 }
4199 
4200 static void
4201 qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
4202 {
4203 	qlnx_host_t	*ha;
4204 
4205 	ha = dev->ha;
4206 
4207 	int work = info->completed - info->completed_handled - 1;
4208 
4209 	QL_DPRINT12(ha, "enter [%d]\n", work);
4210 
4211 	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
4212 		struct qlnxr_pbl *pbl;
4213 
4214 		/* Free all the page list that are possible to be freed
4215 		 * (all the ones that were invalidated), under the assumption
4216 		 * that if an FMR was completed successfully that means that
4217 		 * if there was an invalidate operation before it also ended
4218 		 */
4219 		pbl = list_first_entry(&info->inuse_pbl_list,
4220 				       struct qlnxr_pbl,
4221 				       list_entry);
4222 		list_del(&pbl->list_entry);
4223 		list_add_tail(&pbl->list_entry, &info->free_pbl_list);
4224 		info->completed_handled++;
4225 	}
4226 
4227 	QL_DPRINT12(ha, "exit\n");
4228 	return;
4229 }
4230 
4231 #if __FreeBSD_version >= 1102000
4232 
4233 static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
4234 		struct rdma_sq_fmr_wqe_1st *fwqe1,
4235 		const struct ib_reg_wr *wr)
4236 {
4237 	struct qlnxr_mr *mr = get_qlnxr_mr(wr->mr);
4238 	struct rdma_sq_fmr_wqe_2nd *fwqe2;
4239 
4240 	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)ecore_chain_produce(&qp->sq.pbl);
4241 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
4242 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
4243 	fwqe1->l_key = wr->key;
4244 
4245 	fwqe2->access_ctrl = 0;
4246 
4247 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
4248 		!!(wr->access & IB_ACCESS_REMOTE_READ));
4249 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
4250 		!!(wr->access & IB_ACCESS_REMOTE_WRITE));
4251 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
4252 		!!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
4253 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
4254 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
4255 		!!(wr->access & IB_ACCESS_LOCAL_WRITE));
4256 	fwqe2->fmr_ctrl = 0;
4257 
4258 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4259 		ilog2(mr->ibmr.page_size) - 12);
4260 
4261 	fwqe2->length_hi = 0; /* TODO - figure out why length is only 32bit.. */
4262 	fwqe2->length_lo = mr->ibmr.length;
4263 	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
4264 	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
4265 
4266 	qp->wqe_wr_id[qp->sq.prod].mr = mr;
4267 
4268 	return 0;
4269 }
4270 
4271 #else
4272 
4273 static void
4274 build_frmr_pbes(struct qlnxr_dev *dev, const struct ib_send_wr *wr,
4275 	struct mr_info *info)
4276 {
4277 	int i;
4278 	u64 buf_addr = 0;
4279 	int num_pbes, total_num_pbes = 0;
4280 	struct regpair *pbe;
4281 	struct qlnxr_pbl *pbl_tbl = info->pbl_table;
4282 	struct qlnxr_pbl_info *pbl_info = &info->pbl_info;
4283 	qlnx_host_t	*ha;
4284 
4285 	ha = dev->ha;
4286 
4287 	QL_DPRINT12(ha, "enter\n");
4288 
4289 	pbe = (struct regpair *)pbl_tbl->va;
4290 	num_pbes = 0;
4291 
4292 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
4293 		buf_addr = wr->wr.fast_reg.page_list->page_list[i];
4294 		pbe->lo = cpu_to_le32((u32)buf_addr);
4295 		pbe->hi = cpu_to_le32((u32)upper_32_bits(buf_addr));
4296 
4297 		num_pbes += 1;
4298 		pbe++;
4299 		total_num_pbes++;
4300 
4301 		if (total_num_pbes == pbl_info->num_pbes)
4302 			return;
4303 
4304 		/* if the given pbl is full storing the pbes,
4305 		 * move to next pbl.
4306 		 */
4307 		if (num_pbes ==
4308 		    (pbl_info->pbl_size / sizeof(u64))) {
4309 			pbl_tbl++;
4310 			pbe = (struct regpair *)pbl_tbl->va;
4311 			num_pbes = 0;
4312 		}
4313 	}
4314 	QL_DPRINT12(ha, "exit\n");
4315 
4316 	return;
4317 }
4318 
4319 static int
4320 qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
4321 {
4322 	int rc = 0;
4323 	qlnx_host_t	*ha;
4324 
4325 	ha = dev->ha;
4326 
4327 	QL_DPRINT12(ha, "enter\n");
4328 
4329 	if (info->completed == 0) {
4330 		//DP_VERBOSE(dev, QLNXR_MSG_MR, "First FMR\n");
4331 		/* first fmr */
4332 		return 0;
4333 	}
4334 
4335 	qlnx_handle_completed_mrs(dev, info);
4336 
4337 	list_add_tail(&info->pbl_table->list_entry, &info->inuse_pbl_list);
4338 
4339 	if (list_empty(&info->free_pbl_list)) {
4340 		info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info,
4341 							  GFP_ATOMIC);
4342 	} else {
4343 		info->pbl_table = list_first_entry(&info->free_pbl_list,
4344 					struct qlnxr_pbl,
4345 					list_entry);
4346 		list_del(&info->pbl_table->list_entry);
4347 	}
4348 
4349 	if (!info->pbl_table)
4350 		rc = -ENOMEM;
4351 
4352 	QL_DPRINT12(ha, "exit\n");
4353 	return rc;
4354 }
4355 
4356 static inline int
4357 qlnxr_prepare_fmr(struct qlnxr_qp *qp,
4358 	struct rdma_sq_fmr_wqe_1st *fwqe1,
4359 	const struct ib_send_wr *wr)
4360 {
4361 	struct qlnxr_dev *dev = qp->dev;
4362 	u64 fbo;
4363 	struct qlnxr_fast_reg_page_list *frmr_list =
4364 		get_qlnxr_frmr_list(wr->wr.fast_reg.page_list);
4365 	struct rdma_sq_fmr_wqe *fwqe2 =
4366 		(struct rdma_sq_fmr_wqe *)ecore_chain_produce(&qp->sq.pbl);
4367 	int rc = 0;
4368 	qlnx_host_t	*ha;
4369 
4370 	ha = dev->ha;
4371 
4372 	QL_DPRINT12(ha, "enter\n");
4373 
4374 	if (wr->wr.fast_reg.page_list_len == 0)
4375 		BUG();
4376 
4377 	rc = qlnxr_prepare_safe_pbl(dev, &frmr_list->info);
4378 	if (rc)
4379 		return rc;
4380 
4381 	fwqe1->addr.hi = upper_32_bits(wr->wr.fast_reg.iova_start);
4382 	fwqe1->addr.lo = lower_32_bits(wr->wr.fast_reg.iova_start);
4383 	fwqe1->l_key = wr->wr.fast_reg.rkey;
4384 
4385 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_READ,
4386 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ));
4387 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_WRITE,
4388 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE));
4389 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_ENABLE_ATOMIC,
4390 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_ATOMIC));
4391 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_READ, 1);
4392 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_WRITE,
4393 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE));
4394 
4395 	fwqe2->fmr_ctrl = 0;
4396 
4397 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4398 		   ilog2(1 << wr->wr.fast_reg.page_shift) - 12);
4399 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_ZERO_BASED, 0);
4400 
4401 	fwqe2->length_hi = 0; /* Todo - figure this out... why length is only 32bit.. */
4402 	fwqe2->length_lo = wr->wr.fast_reg.length;
4403 	fwqe2->pbl_addr.hi = upper_32_bits(frmr_list->info.pbl_table->pa);
4404 	fwqe2->pbl_addr.lo = lower_32_bits(frmr_list->info.pbl_table->pa);
4405 
4406 	/* produce another wqe for fwqe3 */
4407 	ecore_chain_produce(&qp->sq.pbl);
4408 
4409 	fbo = wr->wr.fast_reg.iova_start -
4410 	    (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
4411 
4412 	QL_DPRINT12(ha, "wr.fast_reg.iova_start = %p rkey=%x addr=%x:%x"
4413 		" length = %x pbl_addr %x:%x\n",
4414 		wr->wr.fast_reg.iova_start, wr->wr.fast_reg.rkey,
4415 		fwqe1->addr.hi, fwqe1->addr.lo, fwqe2->length_lo,
4416 		fwqe2->pbl_addr.hi, fwqe2->pbl_addr.lo);
4417 
4418 	build_frmr_pbes(dev, wr, &frmr_list->info);
4419 
4420 	qp->wqe_wr_id[qp->sq.prod].frmr = frmr_list;
4421 
4422 	QL_DPRINT12(ha, "exit\n");
4423 	return 0;
4424 }
4425 
4426 #endif /* #if __FreeBSD_version >= 1102000 */
4427 
4428 static enum ib_wc_opcode
4429 qlnxr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
4430 {
4431 	switch (opcode) {
4432 	case IB_WR_RDMA_WRITE:
4433 	case IB_WR_RDMA_WRITE_WITH_IMM:
4434 		return IB_WC_RDMA_WRITE;
4435 	case IB_WR_SEND_WITH_IMM:
4436 	case IB_WR_SEND:
4437 	case IB_WR_SEND_WITH_INV:
4438 		return IB_WC_SEND;
4439 	case IB_WR_RDMA_READ:
4440 		return IB_WC_RDMA_READ;
4441 	case IB_WR_ATOMIC_CMP_AND_SWP:
4442 		return IB_WC_COMP_SWAP;
4443 	case IB_WR_ATOMIC_FETCH_AND_ADD:
4444 		return IB_WC_FETCH_ADD;
4445 
4446 #if __FreeBSD_version >= 1102000
4447 	case IB_WR_REG_MR:
4448 		return IB_WC_REG_MR;
4449 #else
4450 	case IB_WR_FAST_REG_MR:
4451 		return IB_WC_FAST_REG_MR;
4452 #endif /* #if __FreeBSD_version >= 1102000 */
4453 
4454 	case IB_WR_LOCAL_INV:
4455 		return IB_WC_LOCAL_INV;
4456 	default:
4457 		return IB_WC_SEND;
4458 	}
4459 }
4460 static inline bool
4461 qlnxr_can_post_send(struct qlnxr_qp *qp, const struct ib_send_wr *wr)
4462 {
4463 	int wq_is_full, err_wr, pbl_is_full;
4464 	struct qlnxr_dev *dev = qp->dev;
4465 	qlnx_host_t	*ha;
4466 
4467 	ha = dev->ha;
4468 
4469 	QL_DPRINT12(ha, "enter[qp, wr] = [%p,%p]\n", qp, wr);
4470 
4471 	/* prevent SQ overflow and/or processing of a bad WR */
4472 	err_wr = wr->num_sge > qp->sq.max_sges;
4473 	wq_is_full = qlnxr_wq_is_full(&qp->sq);
4474 	pbl_is_full = ecore_chain_get_elem_left_u32(&qp->sq.pbl) <
4475 		      QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
4476 	if (wq_is_full || err_wr || pbl_is_full) {
4477 		if (wq_is_full &&
4478 		    !(qp->err_bitmap & QLNXR_QP_ERR_SQ_FULL)) {
4479 			qp->err_bitmap |= QLNXR_QP_ERR_SQ_FULL;
4480 
4481 			QL_DPRINT12(ha,
4482 				"error: WQ is full. Post send on QP failed"
4483 				" (this error appears only once) "
4484 				"[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4485 				qp, wr, qp->err_bitmap);
4486 		}
4487 
4488 		if (err_wr &&
4489 		    !(qp->err_bitmap & QLNXR_QP_ERR_BAD_SR)) {
4490 			qp->err_bitmap |= QLNXR_QP_ERR_BAD_SR;
4491 
4492 			QL_DPRINT12(ha,
4493 				"error: WQ is bad. Post send on QP failed"
4494 				" (this error appears only once) "
4495 				"[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4496 				qp, wr, qp->err_bitmap);
4497 		}
4498 
4499 		if (pbl_is_full &&
4500 		    !(qp->err_bitmap & QLNXR_QP_ERR_SQ_PBL_FULL)) {
4501 			qp->err_bitmap |= QLNXR_QP_ERR_SQ_PBL_FULL;
4502 
4503 			QL_DPRINT12(ha,
4504 				"error: WQ PBL is full. Post send on QP failed"
4505 				" (this error appears only once) "
4506 				"[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4507 				qp, wr, qp->err_bitmap);
4508 		}
4509 		return false;
4510 	}
4511 	QL_DPRINT12(ha, "exit[qp, wr] = [%p,%p]\n", qp, wr);
4512 	return true;
4513 }
4514 
4515 int
4516 qlnxr_post_send(struct ib_qp *ibqp,
4517 	const struct ib_send_wr *wr,
4518 	const struct ib_send_wr **bad_wr)
4519 {
4520 	struct qlnxr_dev	*dev = get_qlnxr_dev(ibqp->device);
4521 	struct qlnxr_qp		*qp = get_qlnxr_qp(ibqp);
4522 	unsigned long 		flags;
4523 	int 			status = 0, rc = 0;
4524 	bool			comp;
4525 	qlnx_host_t		*ha;
4526 	uint32_t		reg_addr;
4527 
4528 	*bad_wr = NULL;
4529 	ha = dev->ha;
4530 
4531 	QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4532 		ibqp, wr, bad_wr);
4533 
4534 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4535 		return -EINVAL;
4536 
4537 	if (qp->qp_type == IB_QPT_GSI)
4538 		return qlnxr_gsi_post_send(ibqp, wr, bad_wr);
4539 
4540 	spin_lock_irqsave(&qp->q_lock, flags);
4541 
4542 	if (QLNX_IS_ROCE(dev) && (qp->state != ECORE_ROCE_QP_STATE_RTS) &&
4543 	    (qp->state != ECORE_ROCE_QP_STATE_ERR) &&
4544 	    (qp->state != ECORE_ROCE_QP_STATE_SQD)) {
4545 		spin_unlock_irqrestore(&qp->q_lock, flags);
4546 		*bad_wr = wr;
4547 		QL_DPRINT11(ha, "QP in wrong state! QP icid=0x%x state %d\n",
4548 			qp->icid, qp->state);
4549 		return -EINVAL;
4550 	}
4551 
4552 	if (!wr) {
4553 		QL_DPRINT11(ha, "Got an empty post send???\n");
4554 	}
4555 
4556 	while (wr) {
4557 		struct rdma_sq_common_wqe	*wqe;
4558 		struct rdma_sq_send_wqe		*swqe;
4559 		struct rdma_sq_send_wqe		*swqe2;
4560 		struct rdma_sq_rdma_wqe_1st	*rwqe;
4561 		struct rdma_sq_rdma_wqe_2nd	*rwqe2;
4562 		struct rdma_sq_local_inv_wqe	*iwqe;
4563 		struct rdma_sq_atomic_wqe	*awqe1;
4564 		struct rdma_sq_atomic_wqe	*awqe2;
4565 		struct rdma_sq_atomic_wqe	*awqe3;
4566 		struct rdma_sq_fmr_wqe_1st	*fwqe1;
4567 
4568 		if (!qlnxr_can_post_send(qp, wr)) {
4569 			status = -ENOMEM;
4570 			*bad_wr = wr;
4571 			break;
4572 		}
4573 
4574 		wqe = ecore_chain_produce(&qp->sq.pbl);
4575 
4576 		qp->wqe_wr_id[qp->sq.prod].signaled =
4577 			!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
4578 
4579 		/* common fields */
4580 		wqe->flags = 0;
4581 		wqe->flags |= (RDMA_SQ_SEND_WQE_COMP_FLG_MASK <<
4582 				RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT);
4583 
4584 		SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, \
4585 			!!(wr->send_flags & IB_SEND_SOLICITED));
4586 
4587 		comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) ||
4588 				(qp->signaled);
4589 
4590 		SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
4591 		SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,  \
4592 			!!(wr->send_flags & IB_SEND_FENCE));
4593 
4594 		wqe->prev_wqe_size = qp->prev_wqe_size;
4595 
4596 		qp->wqe_wr_id[qp->sq.prod].opcode = qlnxr_ib_to_wc_opcode(wr->opcode);
4597 
4598 		switch (wr->opcode) {
4599 		case IB_WR_SEND_WITH_IMM:
4600 
4601 			wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
4602 			swqe = (struct rdma_sq_send_wqe *)wqe;
4603 			swqe->wqe_size = 2;
4604 			swqe2 = (struct rdma_sq_send_wqe *)
4605 					ecore_chain_produce(&qp->sq.pbl);
4606 			swqe->inv_key_or_imm_data =
4607 				cpu_to_le32(wr->ex.imm_data);
4608 			swqe->length = cpu_to_le32(
4609 						qlnxr_prepare_sq_send_data(dev,
4610 							qp, swqe, swqe2, wr,
4611 							bad_wr));
4612 
4613 			qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4614 			qp->prev_wqe_size = swqe->wqe_size;
4615 			qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4616 
4617 			QL_DPRINT12(ha, "SEND w/ IMM length = %d imm data=%x\n",
4618 				swqe->length, wr->ex.imm_data);
4619 
4620 			break;
4621 
4622 		case IB_WR_SEND:
4623 
4624 			wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
4625 			swqe = (struct rdma_sq_send_wqe *)wqe;
4626 
4627 			swqe->wqe_size = 2;
4628 			swqe2 = (struct rdma_sq_send_wqe *)
4629 					ecore_chain_produce(&qp->sq.pbl);
4630 			swqe->length = cpu_to_le32(
4631 						qlnxr_prepare_sq_send_data(dev,
4632 							qp, swqe, swqe2, wr,
4633 							bad_wr));
4634 			qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4635 			qp->prev_wqe_size = swqe->wqe_size;
4636 			qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4637 
4638 			QL_DPRINT12(ha, "SEND w/o IMM length = %d\n",
4639 				swqe->length);
4640 
4641 			break;
4642 
4643 		case IB_WR_SEND_WITH_INV:
4644 
4645 			wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
4646 			swqe = (struct rdma_sq_send_wqe *)wqe;
4647 			swqe2 = (struct rdma_sq_send_wqe *)
4648 					ecore_chain_produce(&qp->sq.pbl);
4649 			swqe->wqe_size = 2;
4650 			swqe->inv_key_or_imm_data =
4651 				cpu_to_le32(wr->ex.invalidate_rkey);
4652 			swqe->length = cpu_to_le32(qlnxr_prepare_sq_send_data(dev,
4653 						qp, swqe, swqe2, wr, bad_wr));
4654 			qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4655 			qp->prev_wqe_size = swqe->wqe_size;
4656 			qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4657 
4658 			QL_DPRINT12(ha, "SEND w INVALIDATE length = %d\n",
4659 				swqe->length);
4660 			break;
4661 
4662 		case IB_WR_RDMA_WRITE_WITH_IMM:
4663 
4664 			wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
4665 			rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4666 
4667 			rwqe->wqe_size = 2;
4668 			rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
4669 			rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4670 					ecore_chain_produce(&qp->sq.pbl);
4671 			rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4672 						qp, rwqe, rwqe2, wr, bad_wr));
4673 			qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4674 			qp->prev_wqe_size = rwqe->wqe_size;
4675 			qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4676 
4677 			QL_DPRINT12(ha,
4678 				"RDMA WRITE w/ IMM length = %d imm data=%x\n",
4679 				rwqe->length, rwqe->imm_data);
4680 
4681 			break;
4682 
4683 		case IB_WR_RDMA_WRITE:
4684 
4685 			wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
4686 			rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4687 
4688 			rwqe->wqe_size = 2;
4689 			rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4690 					ecore_chain_produce(&qp->sq.pbl);
4691 			rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4692 						qp, rwqe, rwqe2, wr, bad_wr));
4693 			qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4694 			qp->prev_wqe_size = rwqe->wqe_size;
4695 			qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4696 
4697 			QL_DPRINT12(ha,
4698 				"RDMA WRITE w/o IMM length = %d\n",
4699 				rwqe->length);
4700 
4701 			break;
4702 
4703 		case IB_WR_RDMA_READ_WITH_INV:
4704 
4705 			QL_DPRINT12(ha,
4706 				"RDMA READ WITH INVALIDATE not supported\n");
4707 
4708 			*bad_wr = wr;
4709 			rc = -EINVAL;
4710 
4711 			break;
4712 
4713 		case IB_WR_RDMA_READ:
4714 
4715 			wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
4716 			rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4717 
4718 			rwqe->wqe_size = 2;
4719 			rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4720 					ecore_chain_produce(&qp->sq.pbl);
4721 			rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4722 						qp, rwqe, rwqe2, wr, bad_wr));
4723 
4724 			qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4725 			qp->prev_wqe_size = rwqe->wqe_size;
4726 			qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4727 
4728 			QL_DPRINT12(ha, "RDMA READ length = %d\n",
4729 				rwqe->length);
4730 
4731 			break;
4732 
4733 		case IB_WR_ATOMIC_CMP_AND_SWP:
4734 		case IB_WR_ATOMIC_FETCH_AND_ADD:
4735 
4736 			QL_DPRINT12(ha,
4737 				"ATOMIC operation = %s\n",
4738 				((wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) ?
4739 					"IB_WR_ATOMIC_CMP_AND_SWP" :
4740 					"IB_WR_ATOMIC_FETCH_AND_ADD"));
4741 
4742 			awqe1 = (struct rdma_sq_atomic_wqe *)wqe;
4743 			awqe1->prev_wqe_size = 4;
4744 
4745 			awqe2 = (struct rdma_sq_atomic_wqe *)
4746 					ecore_chain_produce(&qp->sq.pbl);
4747 
4748 			TYPEPTR_ADDR_SET(awqe2, remote_va, \
4749 				atomic_wr(wr)->remote_addr);
4750 
4751 			awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
4752 
4753 			awqe3 = (struct rdma_sq_atomic_wqe *)
4754 					ecore_chain_produce(&qp->sq.pbl);
4755 
4756 			if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
4757 				wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
4758 				TYPEPTR_ADDR_SET(awqe3, swap_data,
4759 						 atomic_wr(wr)->compare_add);
4760 			} else {
4761 				wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
4762 				TYPEPTR_ADDR_SET(awqe3, swap_data,
4763 						 atomic_wr(wr)->swap);
4764 				TYPEPTR_ADDR_SET(awqe3, cmp_data,
4765 						 atomic_wr(wr)->compare_add);
4766 			}
4767 
4768 			qlnxr_prepare_sq_sges(dev, qp, NULL, wr);
4769 
4770 			qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->prev_wqe_size;
4771 			qp->prev_wqe_size = awqe1->prev_wqe_size;
4772 
4773 			break;
4774 
4775 		case IB_WR_LOCAL_INV:
4776 
4777 			QL_DPRINT12(ha,
4778 				"INVALIDATE length (IB_WR_LOCAL_INV)\n");
4779 
4780 			iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
4781 			iwqe->prev_wqe_size = 1;
4782 
4783 			iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
4784 			iwqe->inv_l_key = wr->ex.invalidate_rkey;
4785 			qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->prev_wqe_size;
4786 			qp->prev_wqe_size = iwqe->prev_wqe_size;
4787 
4788 			break;
4789 
4790 #if __FreeBSD_version >= 1102000
4791 
4792 		case IB_WR_REG_MR:
4793 
4794 			QL_DPRINT12(ha, "IB_WR_REG_MR\n");
4795 
4796 			wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4797 			fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4798 			fwqe1->wqe_size = 2;
4799 
4800 			rc = qlnxr_prepare_reg(qp, fwqe1, reg_wr(wr));
4801 			if (rc) {
4802 				QL_DPRINT11(ha, "IB_WR_REG_MR failed rc=%d\n", rc);
4803 				*bad_wr = wr;
4804 				break;
4805 			}
4806 
4807 			qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
4808 			qp->prev_wqe_size = fwqe1->wqe_size;
4809 
4810 			break;
4811 #else
4812 		case IB_WR_FAST_REG_MR:
4813 
4814 			QL_DPRINT12(ha, "FAST_MR (IB_WR_FAST_REG_MR)\n");
4815 
4816 			wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4817 			fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4818 			fwqe1->prev_wqe_size = 3;
4819 
4820 			rc = qlnxr_prepare_fmr(qp, fwqe1, wr);
4821 
4822 			if (rc) {
4823 				QL_DPRINT12(ha,
4824 					"FAST_MR (IB_WR_FAST_REG_MR) failed"
4825 					" rc = %d\n", rc);
4826 				*bad_wr = wr;
4827 				break;
4828 			}
4829 
4830 			qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->prev_wqe_size;
4831 			qp->prev_wqe_size = fwqe1->prev_wqe_size;
4832 
4833 			break;
4834 #endif /* #if __FreeBSD_version >= 1102000 */
4835 
4836 		default:
4837 
4838 			QL_DPRINT12(ha, "Invalid Opcode 0x%x!\n", wr->opcode);
4839 
4840 			rc = -EINVAL;
4841 			*bad_wr = wr;
4842 			break;
4843 		}
4844 
4845 		if (*bad_wr) {
4846 			/*
4847 			 * restore prod to its position before this WR was processed
4848 			 */
4849 			ecore_chain_set_prod(&qp->sq.pbl,
4850 			     le16_to_cpu(qp->sq.db_data.data.value),
4851 			     wqe);
4852 			/* restore prev_wqe_size */
4853 			qp->prev_wqe_size = wqe->prev_wqe_size;
4854 			status = rc;
4855 
4856 			QL_DPRINT12(ha, "failed *bad_wr = %p\n", *bad_wr);
4857 			break; /* out of the loop */
4858 		}
4859 
4860 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
4861 
4862 		qlnxr_inc_sw_prod(&qp->sq);
4863 
4864 		qp->sq.db_data.data.value++;
4865 
4866 		wr = wr->next;
4867 	}
4868 
4869 	/* Trigger doorbell
4870 	 * If there was a failure in the first WR then it will be triggered in
4871 	 * vane. However this is not harmful (as long as the producer value is
4872 	 * unchanged). For performance reasons we avoid checking for this
4873 	 * redundant doorbell.
4874 	 */
4875 	wmb();
4876 	//writel(qp->sq.db_data.raw, qp->sq.db);
4877 
4878 	reg_addr = (uint32_t)((uint8_t *)qp->sq.db - (uint8_t *)ha->cdev.doorbells);
4879         bus_write_4(ha->pci_dbells, reg_addr, qp->sq.db_data.raw);
4880         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
4881 
4882 	mmiowb();
4883 
4884 	spin_unlock_irqrestore(&qp->q_lock, flags);
4885 
4886 	QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4887 		ibqp, wr, bad_wr);
4888 
4889 	return status;
4890 }
4891 
4892 static u32
4893 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
4894 {
4895 	u32 used;
4896 
4897 	/* Calculate number of elements used based on producer
4898 	 * count and consumer count and subtract it from max
4899 	 * work request supported so that we get elements left.
4900 	 */
4901 	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
4902 
4903 	return hw_srq->max_wr - used;
4904 }
4905 
4906 int
4907 qlnxr_post_recv(struct ib_qp *ibqp,
4908 	const struct ib_recv_wr *wr,
4909 	const struct ib_recv_wr **bad_wr)
4910 {
4911  	struct qlnxr_qp		*qp = get_qlnxr_qp(ibqp);
4912 	struct qlnxr_dev	*dev = qp->dev;
4913 	unsigned long		flags;
4914 	int			status = 0;
4915 	qlnx_host_t		*ha;
4916 	uint32_t		reg_addr;
4917 
4918 	ha = dev->ha;
4919 
4920 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4921 		return -EINVAL;
4922 
4923 	QL_DPRINT12(ha, "enter\n");
4924 
4925 	if (qp->qp_type == IB_QPT_GSI) {
4926 		QL_DPRINT12(ha, "(qp->qp_type = IB_QPT_GSI)\n");
4927 		return qlnxr_gsi_post_recv(ibqp, wr, bad_wr);
4928 	}
4929 
4930 	if (qp->srq) {
4931 		QL_DPRINT11(ha, "qp->srq [%p]"
4932 			" QP is associated with SRQ, cannot post RQ buffers\n",
4933 			qp->srq);
4934 		return -EINVAL;
4935 	}
4936 
4937 	spin_lock_irqsave(&qp->q_lock, flags);
4938 
4939 	if (qp->state == ECORE_ROCE_QP_STATE_RESET) {
4940 		spin_unlock_irqrestore(&qp->q_lock, flags);
4941 		*bad_wr = wr;
4942 
4943 		QL_DPRINT11(ha, "qp->qp_type = ECORE_ROCE_QP_STATE_RESET\n");
4944 
4945 		return -EINVAL;
4946 	}
4947 
4948 	while (wr) {
4949 		int i;
4950 
4951 		if ((ecore_chain_get_elem_left_u32(&qp->rq.pbl) <
4952 			QLNXR_MAX_RQE_ELEMENTS_PER_RQE) ||
4953 			(wr->num_sge > qp->rq.max_sges)) {
4954 			status = -ENOMEM;
4955 			*bad_wr = wr;
4956 			break;
4957 		}
4958 		for (i = 0; i < wr->num_sge; i++) {
4959 			u32 flags = 0;
4960 			struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
4961 
4962 			/* first one must include the number of SGE in the list */
4963 			if (!i)
4964 				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge);
4965 
4966 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, wr->sg_list[i].lkey);
4967 
4968 			RQ_SGE_SET(rqe, wr->sg_list[i].addr, \
4969 				wr->sg_list[i].length, flags);
4970 		}
4971 		/* Special case of no sges. FW requires between 1-4 sges...
4972 		 * in this case we need to post 1 sge with length zero. this is
4973 		 * because rdma write with immediate consumes an RQ. */
4974 		if (!wr->num_sge) {
4975 			u32 flags = 0;
4976 			struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
4977 
4978 			/* first one must include the number of SGE in the list */
4979 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
4980 			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
4981 
4982 			//RQ_SGE_SET(rqe, 0, 0, flags);
4983 			rqe->addr.hi = 0;
4984 			rqe->addr.lo = 0;
4985 
4986 			rqe->length = 0;
4987 			rqe->flags = cpu_to_le32(flags);
4988 
4989 			i = 1;
4990 		}
4991 
4992 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
4993 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
4994 
4995 		qlnxr_inc_sw_prod(&qp->rq);
4996 
4997 		wmb();
4998 
4999 		qp->rq.db_data.data.value++;
5000 
5001 	//	writel(qp->rq.db_data.raw, qp->rq.db);
5002 		mmiowb();
5003 	//	if (QLNX_IS_IWARP(dev)) {
5004 	//		writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
5005 	//		mmiowb(); /* for second doorbell */
5006 	//	}
5007 
5008 		reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
5009 				(uint8_t *)ha->cdev.doorbells);
5010 
5011 		bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
5012 		bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5013 
5014 		if (QLNX_IS_IWARP(dev)) {
5015 			reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
5016 						(uint8_t *)ha->cdev.doorbells);
5017 			bus_write_4(ha->pci_dbells, reg_addr, \
5018 				qp->rq.iwarp_db2_data.raw);
5019 			bus_barrier(ha->pci_dbells,  0, 0, \
5020 				BUS_SPACE_BARRIER_READ);
5021 		}
5022 
5023 		wr = wr->next;
5024 	}
5025 
5026 	spin_unlock_irqrestore(&qp->q_lock, flags);
5027 
5028 	QL_DPRINT12(ha, "exit status = 0x%x\n", status);
5029 
5030 	return status;
5031 }
5032 
5033 /* In fmr we need to increase the number of fmr completed counter for the fmr
5034  * algorithm determining whether we can free a pbl or not.
5035  * we need to perform this whether the work request was signaled or not. for
5036  * this purpose we call this function from the condition that checks if a wr
5037  * should be skipped, to make sure we don't miss it ( possibly this fmr
5038  * operation was not signalted)
5039  */
5040 static inline void
5041 qlnxr_chk_if_fmr(struct qlnxr_qp *qp)
5042 {
5043 #if __FreeBSD_version >= 1102000
5044 
5045 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
5046 		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5047 #else
5048 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_FAST_REG_MR)
5049 		qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5050 
5051 #endif /* #if __FreeBSD_version >= 1102000 */
5052 }
5053 
5054 static int
5055 process_req(struct qlnxr_dev *dev,
5056 	struct qlnxr_qp *qp,
5057 	struct qlnxr_cq *cq,
5058 	int num_entries,
5059 	struct ib_wc *wc,
5060 	u16 hw_cons,
5061 	enum ib_wc_status status,
5062 	int force)
5063 {
5064 	u16		cnt = 0;
5065 	qlnx_host_t	*ha = dev->ha;
5066 
5067 	QL_DPRINT12(ha, "enter\n");
5068 
5069 	while (num_entries && qp->sq.wqe_cons != hw_cons) {
5070 		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
5071 			qlnxr_chk_if_fmr(qp);
5072 			/* skip WC */
5073 			goto next_cqe;
5074 		}
5075 
5076 		/* fill WC */
5077 		wc->status = status;
5078 		wc->vendor_err = 0;
5079 		wc->wc_flags = 0;
5080 		wc->src_qp = qp->id;
5081 		wc->qp = &qp->ibqp;
5082 
5083 		// common section
5084 		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
5085 		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
5086 
5087 		switch (wc->opcode) {
5088 		case IB_WC_RDMA_WRITE:
5089 
5090 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
5091 
5092 			QL_DPRINT12(ha,
5093 				"opcode = IB_WC_RDMA_WRITE bytes = %d\n",
5094 				qp->wqe_wr_id[qp->sq.cons].bytes_len);
5095 			break;
5096 
5097 		case IB_WC_COMP_SWAP:
5098 		case IB_WC_FETCH_ADD:
5099 			wc->byte_len = 8;
5100 			break;
5101 
5102 #if __FreeBSD_version >= 1102000
5103 		case IB_WC_REG_MR:
5104 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5105 			break;
5106 #else
5107 		case IB_WC_FAST_REG_MR:
5108 			qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5109 			break;
5110 #endif /* #if __FreeBSD_version >= 1102000 */
5111 
5112 		case IB_WC_RDMA_READ:
5113 		case IB_WC_SEND:
5114 
5115 			QL_DPRINT12(ha, "opcode = 0x%x \n", wc->opcode);
5116 			break;
5117 		default:
5118 			;//DP_ERR("TBD ERROR");
5119 		}
5120 
5121 		num_entries--;
5122 		wc++;
5123 		cnt++;
5124 next_cqe:
5125 		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
5126 			ecore_chain_consume(&qp->sq.pbl);
5127 		qlnxr_inc_sw_cons(&qp->sq);
5128 	}
5129 
5130 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5131 	return cnt;
5132 }
5133 
5134 static int
5135 qlnxr_poll_cq_req(struct qlnxr_dev *dev,
5136 	struct qlnxr_qp *qp,
5137 	struct qlnxr_cq *cq,
5138 	int num_entries,
5139 	struct ib_wc *wc,
5140 	struct rdma_cqe_requester *req)
5141 {
5142 	int		cnt = 0;
5143 	qlnx_host_t	*ha = dev->ha;
5144 
5145 	QL_DPRINT12(ha, "enter req->status = 0x%x\n", req->status);
5146 
5147 	switch (req->status) {
5148 	case RDMA_CQE_REQ_STS_OK:
5149 
5150 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5151 			IB_WC_SUCCESS, 0);
5152 		break;
5153 
5154 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
5155 
5156 		if (qp->state != ECORE_ROCE_QP_STATE_ERR)
5157 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5158 				  IB_WC_WR_FLUSH_ERR, 1);
5159 		break;
5160 
5161 	default: /* other errors case */
5162 
5163 		/* process all WQE before the cosumer */
5164 		qp->state = ECORE_ROCE_QP_STATE_ERR;
5165 		cnt = process_req(dev, qp, cq, num_entries, wc,
5166 				req->sq_cons - 1, IB_WC_SUCCESS, 0);
5167 		wc += cnt;
5168 		/* if we have extra WC fill it with actual error info */
5169 
5170 		if (cnt < num_entries) {
5171 			enum ib_wc_status wc_status;
5172 
5173 			switch (req->status) {
5174 			case 	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
5175 				wc_status = IB_WC_BAD_RESP_ERR;
5176 				break;
5177 			case 	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
5178 				wc_status = IB_WC_LOC_LEN_ERR;
5179 				break;
5180 			case    RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
5181 				wc_status = IB_WC_LOC_QP_OP_ERR;
5182 				break;
5183 			case    RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
5184 				wc_status = IB_WC_LOC_PROT_ERR;
5185 				break;
5186 			case    RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
5187 				wc_status = IB_WC_MW_BIND_ERR;
5188 				break;
5189 			case    RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
5190 				wc_status = IB_WC_REM_INV_REQ_ERR;
5191 				break;
5192 			case    RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
5193 				wc_status = IB_WC_REM_ACCESS_ERR;
5194 				break;
5195 			case    RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
5196 				wc_status = IB_WC_REM_OP_ERR;
5197 				break;
5198 			case    RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
5199 				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
5200 				break;
5201 			case    RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
5202 				wc_status = IB_WC_RETRY_EXC_ERR;
5203 				break;
5204 			default:
5205 				wc_status = IB_WC_GENERAL_ERR;
5206 			}
5207 
5208 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
5209 					wc_status, 1 /* force use of WC */);
5210 		}
5211 	}
5212 
5213 	QL_DPRINT12(ha, "exit cnt = %d\n", cnt);
5214 	return cnt;
5215 }
5216 
5217 static void
5218 __process_resp_one(struct qlnxr_dev *dev,
5219 	struct qlnxr_qp *qp,
5220 	struct qlnxr_cq *cq,
5221 	struct ib_wc *wc,
5222 	struct rdma_cqe_responder *resp,
5223 	u64 wr_id)
5224 {
5225 	enum ib_wc_status	wc_status = IB_WC_SUCCESS;
5226 #if __FreeBSD_version < 1102000
5227 	u8			flags;
5228 #endif
5229 	qlnx_host_t		*ha = dev->ha;
5230 
5231 	QL_DPRINT12(ha, "enter qp = %p resp->status = 0x%x\n",
5232 		qp, resp->status);
5233 
5234 	wc->opcode = IB_WC_RECV;
5235 	wc->wc_flags = 0;
5236 
5237 	switch (resp->status) {
5238 	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
5239 		wc_status = IB_WC_LOC_ACCESS_ERR;
5240 		break;
5241 
5242 	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
5243 		wc_status = IB_WC_LOC_LEN_ERR;
5244 		break;
5245 
5246 	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
5247 		wc_status = IB_WC_LOC_QP_OP_ERR;
5248 		break;
5249 
5250 	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
5251 		wc_status = IB_WC_LOC_PROT_ERR;
5252 		break;
5253 
5254 	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
5255 		wc_status = IB_WC_MW_BIND_ERR;
5256 		break;
5257 
5258 	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
5259 		wc_status = IB_WC_REM_INV_RD_REQ_ERR;
5260 		break;
5261 
5262 	case RDMA_CQE_RESP_STS_OK:
5263 
5264 #if __FreeBSD_version >= 1102000
5265 		if (resp->flags & QLNXR_RESP_IMM) {
5266 			wc->ex.imm_data =
5267 				le32_to_cpu(resp->imm_data_or_inv_r_Key);
5268 			wc->wc_flags |= IB_WC_WITH_IMM;
5269 
5270 			if (resp->flags & QLNXR_RESP_RDMA)
5271 				wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5272 
5273 			if (resp->flags & QLNXR_RESP_INV) {
5274 				QL_DPRINT11(ha,
5275 					"Invalid flags QLNXR_RESP_INV [0x%x]"
5276 					"qp = %p qp->id = 0x%x cq = %p"
5277 					" cq->icid = 0x%x\n",
5278 					resp->flags, qp, qp->id, cq, cq->icid );
5279 			}
5280 		} else if (resp->flags & QLNXR_RESP_INV) {
5281 			wc->ex.imm_data =
5282 				le32_to_cpu(resp->imm_data_or_inv_r_Key);
5283 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
5284 
5285 			if (resp->flags & QLNXR_RESP_RDMA) {
5286 				QL_DPRINT11(ha,
5287 					"Invalid flags QLNXR_RESP_RDMA [0x%x]"
5288 					"qp = %p qp->id = 0x%x cq = %p"
5289 					" cq->icid = 0x%x\n",
5290 					resp->flags, qp, qp->id, cq, cq->icid );
5291 			}
5292 		} else if (resp->flags & QLNXR_RESP_RDMA) {
5293 			QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5294 				"qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5295 				resp->flags, qp, qp->id, cq, cq->icid );
5296 		}
5297 #else
5298 		wc_status = IB_WC_SUCCESS;
5299 		wc->byte_len = le32_to_cpu(resp->length);
5300 
5301 		flags = resp->flags & QLNXR_RESP_RDMA_IMM;
5302 
5303 		switch (flags) {
5304 		case QLNXR_RESP_RDMA_IMM:
5305 			/* update opcode */
5306 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5307 			/* fall to set imm data */
5308 		case QLNXR_RESP_IMM:
5309 			wc->ex.imm_data =
5310 				le32_to_cpu(resp->imm_data_or_inv_r_Key);
5311 			wc->wc_flags |= IB_WC_WITH_IMM;
5312 			break;
5313 		case QLNXR_RESP_RDMA:
5314 			QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5315 				"qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5316 				resp->flags, qp, qp->id, cq, cq->icid );
5317 			break;
5318 		default:
5319 			/* valid configuration, but nothing todo here */
5320 			;
5321 		}
5322 #endif /* #if __FreeBSD_version >= 1102000 */
5323 
5324 		break;
5325 	default:
5326 		wc_status = IB_WC_GENERAL_ERR;
5327 	}
5328 
5329 	/* fill WC */
5330 	wc->status = wc_status;
5331 	wc->vendor_err = 0;
5332 	wc->src_qp = qp->id;
5333 	wc->qp = &qp->ibqp;
5334 	wc->wr_id = wr_id;
5335 
5336 	QL_DPRINT12(ha, "exit status = 0x%x\n", wc_status);
5337 
5338 	return;
5339 }
5340 
5341 static int
5342 process_resp_one_srq(struct qlnxr_dev *dev,
5343 	struct qlnxr_qp *qp,
5344 	struct qlnxr_cq *cq,
5345 	struct ib_wc *wc,
5346 	struct rdma_cqe_responder *resp)
5347 {
5348 	struct qlnxr_srq	*srq = qp->srq;
5349 	u64			wr_id;
5350 	qlnx_host_t		*ha = dev->ha;
5351 
5352 	QL_DPRINT12(ha, "enter\n");
5353 
5354 	wr_id = HILO_U64(resp->srq_wr_id.hi, resp->srq_wr_id.lo);
5355 
5356 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5357 		wc->status = IB_WC_WR_FLUSH_ERR;
5358 		wc->vendor_err = 0;
5359 		wc->wr_id = wr_id;
5360 		wc->byte_len = 0;
5361 		wc->src_qp = qp->id;
5362 		wc->qp = &qp->ibqp;
5363 		wc->wr_id = wr_id;
5364 	} else {
5365 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
5366 	}
5367 
5368 	/* PBL is maintained in case of WR granularity.
5369 	 * So increment WR consumer after consuming WR
5370 	 */
5371 	srq->hw_srq.wr_cons_cnt++;
5372 
5373 	QL_DPRINT12(ha, "exit\n");
5374 	return 1;
5375 }
5376 
5377 static int
5378 process_resp_one(struct qlnxr_dev *dev,
5379 	struct qlnxr_qp *qp,
5380 	struct qlnxr_cq *cq,
5381 	struct ib_wc *wc,
5382 	struct rdma_cqe_responder *resp)
5383 {
5384 	qlnx_host_t	*ha = dev->ha;
5385 	u64		wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5386 
5387 	QL_DPRINT12(ha, "enter\n");
5388 
5389 	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
5390 
5391 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5392 		ecore_chain_consume(&qp->rq.pbl);
5393 	qlnxr_inc_sw_cons(&qp->rq);
5394 
5395 	QL_DPRINT12(ha, "exit\n");
5396 	return 1;
5397 }
5398 
5399 static int
5400 process_resp_flush(struct qlnxr_qp *qp,
5401 	int num_entries,
5402 	struct ib_wc *wc,
5403 	u16 hw_cons)
5404 {
5405 	u16		cnt = 0;
5406 	qlnx_host_t	*ha = qp->dev->ha;
5407 
5408 	QL_DPRINT12(ha, "enter\n");
5409 
5410 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
5411 		/* fill WC */
5412 		wc->status = IB_WC_WR_FLUSH_ERR;
5413 		wc->vendor_err = 0;
5414 		wc->wc_flags = 0;
5415 		wc->src_qp = qp->id;
5416 		wc->byte_len = 0;
5417 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5418 		wc->qp = &qp->ibqp;
5419 		num_entries--;
5420 		wc++;
5421 		cnt++;
5422 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5423 			ecore_chain_consume(&qp->rq.pbl);
5424 		qlnxr_inc_sw_cons(&qp->rq);
5425 	}
5426 
5427 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5428 	return cnt;
5429 }
5430 
5431 static void
5432 try_consume_resp_cqe(struct qlnxr_cq *cq,
5433 	struct qlnxr_qp *qp,
5434 	struct rdma_cqe_responder *resp,
5435 	int *update)
5436 {
5437 	if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
5438 		consume_cqe(cq);
5439 		*update |= 1;
5440 	}
5441 }
5442 
5443 static int
5444 qlnxr_poll_cq_resp_srq(struct qlnxr_dev *dev,
5445 	struct qlnxr_qp *qp,
5446 	struct qlnxr_cq *cq,
5447 	int num_entries,
5448 	struct ib_wc *wc,
5449 	struct rdma_cqe_responder *resp,
5450 	int *update)
5451 {
5452 	int		cnt;
5453 	qlnx_host_t	*ha = dev->ha;
5454 
5455 	QL_DPRINT12(ha, "enter\n");
5456 
5457 	cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
5458 	consume_cqe(cq);
5459 	*update |= 1;
5460 
5461 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5462 	return cnt;
5463 }
5464 
5465 static int
5466 qlnxr_poll_cq_resp(struct qlnxr_dev *dev,
5467 	struct qlnxr_qp *qp,
5468 	struct qlnxr_cq *cq,
5469 	int num_entries,
5470 	struct ib_wc *wc,
5471 	struct rdma_cqe_responder *resp,
5472 	int *update)
5473 {
5474 	int		cnt;
5475 	qlnx_host_t	*ha = dev->ha;
5476 
5477 	QL_DPRINT12(ha, "enter\n");
5478 
5479 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5480 		cnt = process_resp_flush(qp, num_entries, wc,
5481 				resp->rq_cons);
5482 		try_consume_resp_cqe(cq, qp, resp, update);
5483 	} else {
5484 		cnt = process_resp_one(dev, qp, cq, wc, resp);
5485 		consume_cqe(cq);
5486 		*update |= 1;
5487 	}
5488 
5489 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5490 	return cnt;
5491 }
5492 
5493 static void
5494 try_consume_req_cqe(struct qlnxr_cq *cq, struct qlnxr_qp *qp,
5495 	struct rdma_cqe_requester *req, int *update)
5496 {
5497 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
5498 		consume_cqe(cq);
5499 		*update |= 1;
5500 	}
5501 }
5502 
5503 static void
5504 doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
5505 {
5506 	uint64_t	reg_addr;
5507 	qlnx_host_t	*ha = dev->ha;
5508 
5509 	QL_DPRINT12(ha, "enter\n");
5510 
5511 	wmb();
5512 	cq->db.data.agg_flags = flags;
5513 	cq->db.data.value = cpu_to_le32(cons);
5514 
5515 	reg_addr = (uint64_t)((uint8_t *)cq->db_addr -
5516 				(uint8_t *)(ha->cdev.doorbells));
5517 
5518 	bus_write_8(ha->pci_dbells, reg_addr, cq->db.raw);
5519 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5520 
5521 	QL_DPRINT12(ha, "exit\n");
5522 	return;
5523 
5524 //#ifdef __LP64__
5525 //	writeq(cq->db.raw, cq->db_addr);
5526 //#else
5527 	/* Note that since the FW allows 64 bit write only, in 32bit systems
5528 	 * the value of db_addr must be low enough. This is currently not
5529 	 * enforced.
5530 	 */
5531 //	writel(cq->db.raw & 0xffffffff, cq->db_addr);
5532 //	mmiowb();
5533 //#endif
5534 }
5535 
5536 static int
5537 is_valid_cqe(struct qlnxr_cq *cq, union rdma_cqe *cqe)
5538 {
5539 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
5540 	return (resp_cqe->flags & RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK) ==
5541 			cq->pbl_toggle;
5542 }
5543 
5544 int
5545 qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
5546 {
5547 	struct qlnxr_cq	*cq = get_qlnxr_cq(ibcq);
5548 	struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
5549 	int		done = 0;
5550 	union rdma_cqe	*cqe = cq->latest_cqe;
5551 	int 		update = 0;
5552 	u32		old_cons, new_cons;
5553 	unsigned long	flags;
5554 	qlnx_host_t	*ha = dev->ha;
5555 
5556 	QL_DPRINT12(ha, "enter\n");
5557 
5558 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5559 		return -EINVAL;
5560 
5561 	if (cq->destroyed) {
5562 		QL_DPRINT11(ha, "called after destroy for cq %p (icid=%d)\n",
5563 			cq, cq->icid);
5564 		return 0;
5565 	}
5566 
5567 	if (cq->cq_type == QLNXR_CQ_TYPE_GSI)
5568 		return qlnxr_gsi_poll_cq(ibcq, num_entries, wc);
5569 
5570 	spin_lock_irqsave(&cq->cq_lock, flags);
5571 
5572 	old_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5573 
5574 	while (num_entries && is_valid_cqe(cq, cqe)) {
5575 		int cnt = 0;
5576 		struct qlnxr_qp *qp;
5577 		struct rdma_cqe_requester *resp_cqe;
5578 		enum rdma_cqe_type cqe_type;
5579 
5580 		/* prevent speculative reads of any field of CQE */
5581 		rmb();
5582 
5583 		resp_cqe = &cqe->req;
5584 		qp = (struct qlnxr_qp *)(uintptr_t)HILO_U64(resp_cqe->qp_handle.hi,
5585 						resp_cqe->qp_handle.lo);
5586 
5587 		if (!qp) {
5588 			QL_DPRINT11(ha, "qp = NULL\n");
5589 			break;
5590 		}
5591 
5592 		wc->qp = &qp->ibqp;
5593 
5594 		cqe_type = GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
5595 
5596 		switch (cqe_type) {
5597 		case RDMA_CQE_TYPE_REQUESTER:
5598 			cnt = qlnxr_poll_cq_req(dev, qp, cq, num_entries,
5599 					wc, &cqe->req);
5600 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
5601 			break;
5602 		case RDMA_CQE_TYPE_RESPONDER_RQ:
5603 			cnt = qlnxr_poll_cq_resp(dev, qp, cq, num_entries,
5604 					wc, &cqe->resp, &update);
5605 			break;
5606 		case RDMA_CQE_TYPE_RESPONDER_SRQ:
5607 			cnt = qlnxr_poll_cq_resp_srq(dev, qp, cq, num_entries,
5608 					wc, &cqe->resp, &update);
5609 			break;
5610 		case RDMA_CQE_TYPE_INVALID:
5611 		default:
5612 			QL_DPRINT11(ha, "cqe type [0x%x] invalid\n", cqe_type);
5613 			break;
5614 		}
5615 		num_entries -= cnt;
5616 		wc += cnt;
5617 		done += cnt;
5618 
5619 		cqe = cq->latest_cqe;
5620 	}
5621 	new_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5622 
5623 	cq->cq_cons += new_cons - old_cons;
5624 
5625 	if (update) {
5626 		/* doorbell notifies abount latest VALID entry,
5627 		 * but chain already point to the next INVALID one
5628 		 */
5629 		doorbell_cq(dev, cq, cq->cq_cons - 1, cq->arm_flags);
5630 		QL_DPRINT12(ha, "cq = %p cons = 0x%x "
5631 			"arm_flags = 0x%x db.icid = 0x%x\n", cq,
5632 			(cq->cq_cons - 1), cq->arm_flags, cq->db.data.icid);
5633 	}
5634 
5635 	spin_unlock_irqrestore(&cq->cq_lock, flags);
5636 
5637 	QL_DPRINT12(ha, "exit\n");
5638 
5639 	return done;
5640 }
5641 
5642 int
5643 qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
5644 {
5645         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5646         unsigned long sflags;
5647         struct qlnxr_dev *dev;
5648 	qlnx_host_t	*ha;
5649 
5650 	dev = get_qlnxr_dev((ibcq->device));
5651 	ha = dev->ha;
5652 
5653 	QL_DPRINT12(ha, "enter ibcq = %p flags = 0x%x "
5654 		"cp = %p cons = 0x%x cq_type = 0x%x\n", ibcq,
5655 		flags, cq, cq->cq_cons, cq->cq_type);
5656 
5657 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5658 		return -EINVAL;
5659 
5660 	if (cq->destroyed) {
5661 		QL_DPRINT11(ha, "cq was already destroyed cq = %p icid=%d\n",
5662 			cq, cq->icid);
5663 		return -EINVAL;
5664 	}
5665 
5666         if (cq->cq_type == QLNXR_CQ_TYPE_GSI) {
5667                 return 0;
5668         }
5669 
5670         spin_lock_irqsave(&cq->cq_lock, sflags);
5671 
5672         cq->arm_flags = 0;
5673 
5674         if (flags & IB_CQ_SOLICITED) {
5675                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
5676         }
5677         if (flags & IB_CQ_NEXT_COMP) {
5678                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
5679         }
5680 
5681         doorbell_cq(dev, cq, (cq->cq_cons - 1), cq->arm_flags);
5682 
5683         spin_unlock_irqrestore(&cq->cq_lock, sflags);
5684 
5685 	QL_DPRINT12(ha, "exit ibcq = %p flags = 0x%x\n", ibcq, flags);
5686         return 0;
5687 }
5688 
5689 static struct qlnxr_mr *
5690 __qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
5691 {
5692 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
5693 	struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
5694 	struct qlnxr_mr *mr;
5695 	int		rc = -ENOMEM;
5696 	qlnx_host_t	*ha;
5697 
5698 	ha = dev->ha;
5699 
5700 	QL_DPRINT12(ha, "enter ibpd = %p pd = %p "
5701 		" pd_id = %d max_page_list_len = %d\n",
5702 		ibpd, pd, pd->pd_id, max_page_list_len);
5703 
5704 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5705 	if (!mr) {
5706 		QL_DPRINT11(ha, "kzalloc(mr) failed\n");
5707 		return ERR_PTR(rc);
5708 	}
5709 
5710 	mr->dev = dev;
5711 	mr->type = QLNXR_MR_FRMR;
5712 
5713 	rc = qlnxr_init_mr_info(dev, &mr->info, max_page_list_len,
5714 				  1 /* allow dual layer pbl */);
5715 	if (rc) {
5716 		QL_DPRINT11(ha, "qlnxr_init_mr_info failed\n");
5717 		goto err0;
5718 	}
5719 
5720 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
5721 	if (rc) {
5722 		QL_DPRINT11(ha, "ecore_rdma_alloc_tid failed\n");
5723 		goto err0;
5724 	}
5725 
5726 	/* index only, 18 bit long, lkey = itid << 8 | key */
5727 	mr->hw_mr.tid_type = ECORE_RDMA_TID_FMR;
5728 	mr->hw_mr.key = 0;
5729 	mr->hw_mr.pd = pd->pd_id;
5730 	mr->hw_mr.local_read = 1;
5731 	mr->hw_mr.local_write = 0;
5732 	mr->hw_mr.remote_read = 0;
5733 	mr->hw_mr.remote_write = 0;
5734 	mr->hw_mr.remote_atomic = 0;
5735 	mr->hw_mr.mw_bind = false; /* TBD MW BIND */
5736 	mr->hw_mr.pbl_ptr = 0; /* Will be supplied during post */
5737 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
5738 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
5739 	mr->hw_mr.fbo = 0;
5740 	mr->hw_mr.length = 0;
5741 	mr->hw_mr.vaddr = 0;
5742 	mr->hw_mr.zbva = false; /* TBD figure when this should be true */
5743 	mr->hw_mr.phy_mr = true; /* Fast MR - True, Regular Register False */
5744 	mr->hw_mr.dma_mr = false;
5745 
5746 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
5747 	if (rc) {
5748 		QL_DPRINT11(ha, "ecore_rdma_register_tid failed\n");
5749 		goto err1;
5750 	}
5751 
5752 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
5753 	mr->ibmr.rkey = mr->ibmr.lkey;
5754 
5755 	QL_DPRINT12(ha, "exit mr = %p mr->ibmr.lkey = 0x%x\n",
5756 		mr, mr->ibmr.lkey);
5757 
5758 	return mr;
5759 
5760 err1:
5761 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
5762 err0:
5763 	kfree(mr);
5764 
5765 	QL_DPRINT12(ha, "exit\n");
5766 
5767 	return ERR_PTR(rc);
5768 }
5769 
5770 #if __FreeBSD_version >= 1102000
5771 
5772 struct ib_mr *
5773 qlnxr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
5774     u32 max_num_sg, struct ib_udata *udata)
5775 {
5776 	struct qlnxr_dev *dev;
5777 	struct qlnxr_mr *mr;
5778 	qlnx_host_t     *ha;
5779 
5780 	dev = get_qlnxr_dev(ibpd->device);
5781 	ha = dev->ha;
5782 
5783 	QL_DPRINT12(ha, "enter\n");
5784 
5785 	if (mr_type != IB_MR_TYPE_MEM_REG)
5786 		return ERR_PTR(-EINVAL);
5787 
5788 	mr = __qlnxr_alloc_mr(ibpd, max_num_sg);
5789 
5790 	if (IS_ERR(mr))
5791 		return ERR_PTR(-EINVAL);
5792 
5793 	QL_DPRINT12(ha, "exit mr = %p &mr->ibmr = %p\n", mr, &mr->ibmr);
5794 
5795 	return &mr->ibmr;
5796 }
5797 
5798 static int
5799 qlnxr_set_page(struct ib_mr *ibmr, u64 addr)
5800 {
5801 	struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5802 	struct qlnxr_pbl *pbl_table;
5803 	struct regpair *pbe;
5804 	struct qlnxr_dev *dev;
5805 	qlnx_host_t     *ha;
5806 	u32 pbes_in_page;
5807 
5808 	dev = mr->dev;
5809 	ha = dev->ha;
5810 
5811 	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
5812 		QL_DPRINT12(ha, "fails mr->npages %d\n", mr->npages);
5813 		return -ENOMEM;
5814 	}
5815 
5816 	QL_DPRINT12(ha, "mr->npages %d addr = %p enter\n", mr->npages,
5817 		((void *)addr));
5818 
5819 	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
5820 	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
5821 	pbe = (struct regpair *)pbl_table->va;
5822 	pbe +=  mr->npages % pbes_in_page;
5823 	pbe->lo = cpu_to_le32((u32)addr);
5824 	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
5825 
5826 	mr->npages++;
5827 
5828 	QL_DPRINT12(ha, "mr->npages %d addr = %p exit \n", mr->npages,
5829 		((void *)addr));
5830 	return 0;
5831 }
5832 
5833 int
5834 qlnxr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
5835 	int sg_nents, unsigned int *sg_offset)
5836 {
5837 	int             ret;
5838 	struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5839 	qlnx_host_t     *ha;
5840 
5841 	if (mr == NULL)
5842 		return (-1);
5843 
5844 	if (mr->dev == NULL)
5845 		return (-1);
5846 
5847 	ha = mr->dev->ha;
5848 
5849 	QL_DPRINT12(ha, "enter\n");
5850 
5851 	mr->npages = 0;
5852 	qlnx_handle_completed_mrs(mr->dev, &mr->info);
5853 
5854 	ret = ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qlnxr_set_page);
5855 
5856 	QL_DPRINT12(ha, "exit ret = %d\n", ret);
5857 
5858 	return (ret);
5859 }
5860 
5861 #else
5862 
5863 struct ib_mr *
5864 qlnxr_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
5865 {
5866 	struct qlnxr_dev *dev;
5867 	struct qlnxr_mr *mr;
5868 	qlnx_host_t	*ha;
5869 	struct ib_mr *ibmr = NULL;
5870 
5871 	dev = get_qlnxr_dev((ibpd->device));
5872 	ha = dev->ha;
5873 
5874 	QL_DPRINT12(ha, "enter\n");
5875 
5876 	mr = __qlnxr_alloc_mr(ibpd, max_page_list_len);
5877 
5878 	if (IS_ERR(mr)) {
5879 		ibmr = ERR_PTR(-EINVAL);
5880 	} else {
5881 		ibmr = &mr->ibmr;
5882 	}
5883 
5884 	QL_DPRINT12(ha, "exit %p\n", ibmr);
5885 	return (ibmr);
5886 }
5887 
5888 void
5889 qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
5890 {
5891 	struct qlnxr_fast_reg_page_list *frmr_list;
5892 
5893 	frmr_list = get_qlnxr_frmr_list(page_list);
5894 
5895 	free_mr_info(frmr_list->dev, &frmr_list->info);
5896 
5897 	kfree(frmr_list->ibfrpl.page_list);
5898 	kfree(frmr_list);
5899 
5900 	return;
5901 }
5902 
5903 struct ib_fast_reg_page_list *
5904 qlnxr_alloc_frmr_page_list(struct ib_device *ibdev, int page_list_len)
5905 {
5906 	struct qlnxr_fast_reg_page_list *frmr_list = NULL;
5907 	struct qlnxr_dev		*dev;
5908 	int				size = page_list_len * sizeof(u64);
5909 	int				rc = -ENOMEM;
5910 	qlnx_host_t			*ha;
5911 
5912 	dev = get_qlnxr_dev(ibdev);
5913 	ha = dev->ha;
5914 
5915 	QL_DPRINT12(ha, "enter\n");
5916 
5917 	frmr_list = kzalloc(sizeof(*frmr_list), GFP_KERNEL);
5918 	if (!frmr_list) {
5919 		QL_DPRINT11(ha, "kzalloc(frmr_list) failed\n");
5920 		goto err;
5921 	}
5922 
5923 	frmr_list->dev = dev;
5924 	frmr_list->ibfrpl.page_list = kzalloc(size, GFP_KERNEL);
5925 	if (!frmr_list->ibfrpl.page_list) {
5926 		QL_DPRINT11(ha, "frmr_list->ibfrpl.page_list = NULL failed\n");
5927 		goto err0;
5928 	}
5929 
5930 	rc = qlnxr_init_mr_info(dev, &frmr_list->info, page_list_len,
5931 			  1 /* allow dual layer pbl */);
5932 	if (rc)
5933 		goto err1;
5934 
5935 	QL_DPRINT12(ha, "exit %p\n", &frmr_list->ibfrpl);
5936 
5937 	return &frmr_list->ibfrpl;
5938 
5939 err1:
5940 	kfree(frmr_list->ibfrpl.page_list);
5941 err0:
5942 	kfree(frmr_list);
5943 err:
5944 	QL_DPRINT12(ha, "exit with error\n");
5945 
5946 	return ERR_PTR(rc);
5947 }
5948 
5949 static int
5950 qlnxr_validate_phys_buf_list(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
5951 	int buf_cnt, uint64_t *total_size)
5952 {
5953 	u64 size = 0;
5954 
5955 	*total_size = 0;
5956 
5957 	if (!buf_cnt || buf_list == NULL) {
5958 		QL_DPRINT11(ha,
5959 			"failed buf_list = %p buf_cnt = %d\n", buf_list, buf_cnt);
5960 		return (-1);
5961 	}
5962 
5963 	size = buf_list->size;
5964 
5965 	if (!size) {
5966 		QL_DPRINT11(ha,
5967 			"failed buf_list = %p buf_cnt = %d"
5968 			" buf_list->size = 0\n", buf_list, buf_cnt);
5969 		return (-1);
5970 	}
5971 
5972 	while (buf_cnt) {
5973 		*total_size += buf_list->size;
5974 
5975 		if (buf_list->size != size) {
5976 			QL_DPRINT11(ha,
5977 				"failed buf_list = %p buf_cnt = %d"
5978 				" all buffers should have same size\n",
5979 				buf_list, buf_cnt);
5980 			return (-1);
5981 		}
5982 
5983 		buf_list++;
5984 		buf_cnt--;
5985 	}
5986 	return (0);
5987 }
5988 
5989 static size_t
5990 qlnxr_get_num_pages(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
5991 	int buf_cnt)
5992 {
5993 	int	i;
5994 	size_t	num_pages = 0;
5995 	u64	size;
5996 
5997 	for (i = 0; i < buf_cnt; i++) {
5998 		size = 0;
5999 		while (size < buf_list->size) {
6000 			size += PAGE_SIZE;
6001 			num_pages++;
6002 		}
6003 		buf_list++;
6004 	}
6005 	return (num_pages);
6006 }
6007 
6008 static void
6009 qlnxr_populate_phys_mem_pbls(struct qlnxr_dev *dev,
6010 	struct ib_phys_buf *buf_list, int buf_cnt,
6011 	struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
6012 {
6013 	struct regpair		*pbe;
6014 	struct qlnxr_pbl	*pbl_tbl;
6015 	int			pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
6016 	qlnx_host_t		*ha;
6017         int                     i;
6018 	u64			pbe_addr;
6019 
6020 	ha = dev->ha;
6021 
6022 	QL_DPRINT12(ha, "enter\n");
6023 
6024 	if (!pbl_info) {
6025 		QL_DPRINT11(ha, "PBL_INFO not initialized\n");
6026 		return;
6027 	}
6028 
6029 	if (!pbl_info->num_pbes) {
6030 		QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
6031 		return;
6032 	}
6033 
6034 	/* If we have a two layered pbl, the first pbl points to the rest
6035 	 * of the pbls and the first entry lays on the second pbl in the table
6036 	 */
6037 	if (pbl_info->two_layered)
6038 		pbl_tbl = &pbl[1];
6039 	else
6040 		pbl_tbl = pbl;
6041 
6042 	pbe = (struct regpair *)pbl_tbl->va;
6043 	if (!pbe) {
6044 		QL_DPRINT12(ha, "pbe is NULL\n");
6045 		return;
6046 	}
6047 
6048 	pbe_cnt = 0;
6049 
6050 	for (i = 0; i < buf_cnt; i++) {
6051 		pages = buf_list->size >> PAGE_SHIFT;
6052 
6053 		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
6054 			/* store the page address in pbe */
6055 
6056 			pbe_addr = buf_list->addr + (PAGE_SIZE * pg_cnt);
6057 
6058 			pbe->lo = cpu_to_le32((u32)pbe_addr);
6059 			pbe->hi = cpu_to_le32(((u32)(pbe_addr >> 32)));
6060 
6061 			QL_DPRINT12(ha, "Populate pbl table:"
6062 				" pbe->addr=0x%x:0x%x "
6063 				" pbe_cnt = %d total_num_pbes=%d"
6064 				" pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
6065 				total_num_pbes, pbe);
6066 
6067 			pbe_cnt ++;
6068 			total_num_pbes ++;
6069 			pbe++;
6070 
6071 			if (total_num_pbes == pbl_info->num_pbes)
6072 				return;
6073 
6074 			/* if the given pbl is full storing the pbes,
6075 			 * move to next pbl.  */
6076 
6077 			if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
6078 				pbl_tbl++;
6079 				pbe = (struct regpair *)pbl_tbl->va;
6080 				pbe_cnt = 0;
6081 			}
6082 		}
6083 		buf_list++;
6084 	}
6085 	QL_DPRINT12(ha, "exit\n");
6086 	return;
6087 }
6088 
6089 struct ib_mr *
6090 qlnxr_reg_kernel_mr(struct ib_pd *ibpd,
6091 	struct ib_phys_buf *buf_list,
6092 	int buf_cnt, int acc, u64 *iova_start)
6093 {
6094 	int		rc = -ENOMEM;
6095 	struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
6096 	struct qlnxr_mr *mr;
6097 	struct qlnxr_pd *pd;
6098 	qlnx_host_t	*ha;
6099 	size_t		num_pages = 0;
6100 	uint64_t	length;
6101 
6102 	ha = dev->ha;
6103 
6104 	QL_DPRINT12(ha, "enter\n");
6105 
6106 	pd = get_qlnxr_pd(ibpd);
6107 
6108 	QL_DPRINT12(ha, "pd = %d buf_list = %p, buf_cnt = %d,"
6109 		" iova_start = %p, acc = %d\n",
6110 		pd->pd_id, buf_list, buf_cnt, iova_start, acc);
6111 
6112 	//if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
6113 	//	QL_DPRINT11(ha, "(acc & IB_ACCESS_REMOTE_WRITE &&"
6114 	//		" !(acc & IB_ACCESS_LOCAL_WRITE))\n");
6115 	//	return ERR_PTR(-EINVAL);
6116 	//}
6117 
6118 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
6119 	if (!mr) {
6120 		QL_DPRINT11(ha, "kzalloc(mr) failed\n");
6121 		return ERR_PTR(rc);
6122 	}
6123 
6124 	mr->type = QLNXR_MR_KERNEL;
6125 	mr->iova_start = iova_start;
6126 
6127 	rc = qlnxr_validate_phys_buf_list(ha, buf_list, buf_cnt, &length);
6128 	if (rc)
6129 		goto err0;
6130 
6131 	num_pages = qlnxr_get_num_pages(ha, buf_list, buf_cnt);
6132 	if (!num_pages)
6133 		goto err0;
6134 
6135 	rc = qlnxr_init_mr_info(dev, &mr->info, num_pages, 1);
6136 	if (rc) {
6137 		QL_DPRINT11(ha,
6138 			"qlnxr_init_mr_info failed [%d]\n", rc);
6139 		goto err1;
6140 	}
6141 
6142 	qlnxr_populate_phys_mem_pbls(dev, buf_list, buf_cnt, mr->info.pbl_table,
6143 		   &mr->info.pbl_info);
6144 
6145 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
6146 
6147 	if (rc) {
6148 		QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
6149 		goto err1;
6150 	}
6151 
6152 	/* index only, 18 bit long, lkey = itid << 8 | key */
6153 	mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
6154 	mr->hw_mr.key = 0;
6155 	mr->hw_mr.pd = pd->pd_id;
6156 	mr->hw_mr.local_read = 1;
6157 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
6158 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
6159 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
6160 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
6161 	mr->hw_mr.mw_bind = false; /* TBD MW BIND */
6162 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
6163 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
6164 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
6165 	mr->hw_mr.page_size_log = ilog2(PAGE_SIZE); /* for the MR pages */
6166 
6167 	mr->hw_mr.fbo = 0;
6168 
6169 	mr->hw_mr.length = length;
6170 	mr->hw_mr.vaddr = (uint64_t)iova_start;
6171 	mr->hw_mr.zbva = false; /* TBD figure when this should be true */
6172 	mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
6173 	mr->hw_mr.dma_mr = false;
6174 
6175 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
6176 	if (rc) {
6177 		QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
6178 		goto err2;
6179 	}
6180 
6181 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6182 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
6183 		mr->hw_mr.remote_atomic)
6184 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6185 
6186 	QL_DPRINT12(ha, "lkey: %x\n", mr->ibmr.lkey);
6187 
6188 	return (&mr->ibmr);
6189 
6190 err2:
6191 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
6192 err1:
6193 	qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
6194 err0:
6195 	kfree(mr);
6196 
6197 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6198 	return (ERR_PTR(rc));
6199 }
6200 
6201 #endif /* #if __FreeBSD_version >= 1102000 */
6202 
6203 int
6204 qlnxr_create_ah(struct ib_ah *ibah,
6205 	struct ib_ah_attr *attr, u32 flags,
6206 	struct ib_udata *udata)
6207 {
6208 	struct qlnxr_dev *dev;
6209 	qlnx_host_t	*ha;
6210 	struct qlnxr_ah *ah = get_qlnxr_ah(ibah);
6211 
6212 	dev = get_qlnxr_dev(ibah->device);
6213 	ha = dev->ha;
6214 
6215 	QL_DPRINT12(ha, "in create_ah\n");
6216 
6217 	ah->attr = *attr;
6218 
6219 	return (0);
6220 }
6221 
6222 void
6223 qlnxr_destroy_ah(struct ib_ah *ibah, u32 flags)
6224 {
6225 }
6226 
6227 int
6228 qlnxr_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6229 {
6230 	struct qlnxr_dev *dev;
6231 	qlnx_host_t     *ha;
6232 
6233 	dev = get_qlnxr_dev((ibah->device));
6234 	ha = dev->ha;
6235 	QL_DPRINT12(ha, "Query AH not supported\n");
6236 	return -EINVAL;
6237 }
6238 
6239 int
6240 qlnxr_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6241 {
6242 	struct qlnxr_dev *dev;
6243 	qlnx_host_t     *ha;
6244 
6245 	dev = get_qlnxr_dev((ibah->device));
6246 	ha = dev->ha;
6247 	QL_DPRINT12(ha, "Modify AH not supported\n");
6248 	return -ENOSYS;
6249 }
6250 
6251 #if __FreeBSD_version >= 1102000
6252 int
6253 qlnxr_process_mad(struct ib_device *ibdev,
6254 		int process_mad_flags,
6255 		u8 port_num,
6256 		const struct ib_wc *in_wc,
6257 		const struct ib_grh *in_grh,
6258 		const struct ib_mad_hdr *mad_hdr,
6259 		size_t in_mad_size,
6260 		struct ib_mad_hdr *out_mad,
6261 		size_t *out_mad_size,
6262 		u16 *out_mad_pkey_index)
6263 
6264 #else
6265 
6266 int
6267 qlnxr_process_mad(struct ib_device *ibdev,
6268                         int process_mad_flags,
6269                         u8 port_num,
6270                         struct ib_wc *in_wc,
6271                         struct ib_grh *in_grh,
6272                         struct ib_mad *in_mad,
6273                         struct ib_mad *out_mad)
6274 
6275 #endif /* #if __FreeBSD_version >= 1102000 */
6276 {
6277 	struct qlnxr_dev *dev;
6278 	qlnx_host_t	*ha;
6279 
6280 	dev = get_qlnxr_dev(ibdev);
6281 	ha = dev->ha;
6282 	QL_DPRINT12(ha, "process mad not supported\n");
6283 
6284 	return -ENOSYS;
6285 //	QL_DPRINT12(ha, "qlnxr_process_mad in_mad %x %x %x %x %x %x %x %x\n",
6286 //               in_mad->mad_hdr.attr_id, in_mad->mad_hdr.base_version,
6287 //               in_mad->mad_hdr.attr_mod, in_mad->mad_hdr.class_specific,
6288 //               in_mad->mad_hdr.class_version, in_mad->mad_hdr.method,
6289 //               in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.status);
6290 
6291 //	return IB_MAD_RESULT_SUCCESS;
6292 }
6293 
6294 #if __FreeBSD_version >= 1102000
6295 int
6296 qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
6297 	struct ib_port_immutable *immutable)
6298 {
6299 	struct qlnxr_dev        *dev;
6300 	qlnx_host_t             *ha;
6301 	struct ib_port_attr     attr;
6302 	int                     err;
6303 
6304 	dev = get_qlnxr_dev(ibdev);
6305 	ha = dev->ha;
6306 
6307 	QL_DPRINT12(ha, "enter\n");
6308 
6309 	err = qlnxr_query_port(ibdev, port_num, &attr);
6310 	if (err)
6311 		return err;
6312 
6313 	if (QLNX_IS_IWARP(dev)) {
6314 		immutable->pkey_tbl_len = 1;
6315 		immutable->gid_tbl_len = 1;
6316 		immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
6317 		immutable->max_mad_size = 0;
6318 	} else {
6319 		immutable->pkey_tbl_len = attr.pkey_tbl_len;
6320 		immutable->gid_tbl_len = attr.gid_tbl_len;
6321 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
6322 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
6323 	}
6324 
6325 	QL_DPRINT12(ha, "exit\n");
6326 	return 0;
6327 }
6328 #endif /* #if __FreeBSD_version > 1102000 */
6329 
6330 /***** iWARP related functions *************/
6331 
6332 static void
6333 qlnxr_iw_mpa_request(void *context,
6334 	struct ecore_iwarp_cm_event_params *params)
6335 {
6336 	struct qlnxr_iw_listener *listener = (struct qlnxr_iw_listener *)context;
6337 	struct qlnxr_dev *dev = listener->dev;
6338 	struct qlnxr_iw_ep *ep;
6339 	struct iw_cm_event event;
6340 	struct sockaddr_in *laddr;
6341 	struct sockaddr_in *raddr;
6342 	qlnx_host_t	*ha;
6343 
6344 	ha = dev->ha;
6345 
6346 	QL_DPRINT12(ha, "enter\n");
6347 
6348 	if (params->cm_info->ip_version != ECORE_TCP_IPV4) {
6349 		QL_DPRINT11(ha, "only IPv4 supported [0x%x]\n",
6350 			params->cm_info->ip_version);
6351 		return;
6352 	}
6353 
6354 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
6355 
6356 	if (!ep) {
6357 		QL_DPRINT11(ha, "kzalloc{ep) failed\n");
6358 		return;
6359 	}
6360 
6361 	ep->dev = dev;
6362 	ep->ecore_context = params->ep_context;
6363 
6364 	memset(&event, 0, sizeof(event));
6365 
6366 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
6367 	event.status = params->status;
6368 
6369 	laddr = (struct sockaddr_in *)&event.local_addr;
6370 	raddr = (struct sockaddr_in *)&event.remote_addr;
6371 
6372 	laddr->sin_family = AF_INET;
6373 	raddr->sin_family = AF_INET;
6374 
6375 	laddr->sin_port = htons(params->cm_info->local_port);
6376 	raddr->sin_port = htons(params->cm_info->remote_port);
6377 
6378 	laddr->sin_addr.s_addr = htonl(params->cm_info->local_ip[0]);
6379 	raddr->sin_addr.s_addr = htonl(params->cm_info->remote_ip[0]);
6380 
6381 	event.provider_data = (void *)ep;
6382 	event.private_data = (void *)params->cm_info->private_data;
6383 	event.private_data_len = (u8)params->cm_info->private_data_len;
6384 
6385 #if __FreeBSD_version >= 1100000
6386 	event.ord = params->cm_info->ord;
6387 	event.ird = params->cm_info->ird;
6388 #endif /* #if __FreeBSD_version >= 1100000 */
6389 
6390 	listener->cm_id->event_handler(listener->cm_id, &event);
6391 
6392 	QL_DPRINT12(ha, "exit\n");
6393 
6394 	return;
6395 }
6396 
6397 static void
6398 qlnxr_iw_issue_event(void *context,
6399 	 struct ecore_iwarp_cm_event_params *params,
6400 	 enum iw_cm_event_type event_type,
6401 	 char *str)
6402 {
6403 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6404 	struct qlnxr_dev *dev = ep->dev;
6405 	struct iw_cm_event event;
6406 	qlnx_host_t	*ha;
6407 
6408 	ha = dev->ha;
6409 
6410 	QL_DPRINT12(ha, "enter\n");
6411 
6412 	memset(&event, 0, sizeof(event));
6413 	event.status = params->status;
6414 	event.event = event_type;
6415 
6416 	if (params->cm_info != NULL) {
6417 #if __FreeBSD_version >= 1100000
6418 		event.ird = params->cm_info->ird;
6419 		event.ord = params->cm_info->ord;
6420 		QL_DPRINT12(ha, "ord=[%d] \n", event.ord);
6421 		QL_DPRINT12(ha, "ird=[%d] \n", event.ird);
6422 #endif /* #if __FreeBSD_version >= 1100000 */
6423 
6424 		event.private_data_len = params->cm_info->private_data_len;
6425 		event.private_data = (void *)params->cm_info->private_data;
6426 		QL_DPRINT12(ha, "private_data_len=[%d] \n",
6427 			event.private_data_len);
6428 	}
6429 
6430 	QL_DPRINT12(ha, "event=[%d] %s\n", event.event, str);
6431 	QL_DPRINT12(ha, "status=[%d] \n", event.status);
6432 
6433 	if (ep) {
6434 		if (ep->cm_id)
6435 			ep->cm_id->event_handler(ep->cm_id, &event);
6436 		else
6437 			QL_DPRINT11(ha, "ep->cm_id == NULL \n");
6438 	} else {
6439 		QL_DPRINT11(ha, "ep == NULL \n");
6440 	}
6441 
6442 	QL_DPRINT12(ha, "exit\n");
6443 
6444 	return;
6445 }
6446 
6447 static void
6448 qlnxr_iw_close_event(void *context,
6449 	 struct ecore_iwarp_cm_event_params *params)
6450 {
6451 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6452 	struct qlnxr_dev *dev = ep->dev;
6453 	qlnx_host_t	*ha;
6454 
6455 	ha = dev->ha;
6456 
6457 	QL_DPRINT12(ha, "enter\n");
6458 
6459 	if (ep->cm_id) {
6460 		qlnxr_iw_issue_event(context,
6461 				    params,
6462 				    IW_CM_EVENT_CLOSE,
6463 				    "IW_CM_EVENT_EVENT_CLOSE");
6464 		ep->cm_id->rem_ref(ep->cm_id);
6465 		ep->cm_id = NULL;
6466 	}
6467 
6468 	QL_DPRINT12(ha, "exit\n");
6469 
6470 	return;
6471 }
6472 
6473 #if __FreeBSD_version >= 1102000
6474 
6475 static void
6476 qlnxr_iw_passive_complete(void *context,
6477         struct ecore_iwarp_cm_event_params *params)
6478 {
6479         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6480         struct qlnxr_dev        *dev = ep->dev;
6481         qlnx_host_t             *ha;
6482 
6483         ha = dev->ha;
6484 
6485         /* We will only reach the following state if MPA_REJECT was called on
6486          * passive. In this case there will be no associated QP.
6487          */
6488         if ((params->status == -ECONNREFUSED) && (ep->qp == NULL)) {
6489                 QL_DPRINT11(ha, "PASSIVE connection refused releasing ep...\n");
6490                 kfree(ep);
6491                 return;
6492         }
6493 
6494         /* We always issue an established event, however, ofed does not look
6495          * at event code for established. So if there was a failure, we follow
6496          * with close...
6497          */
6498         qlnxr_iw_issue_event(context,
6499                 params,
6500                 IW_CM_EVENT_ESTABLISHED,
6501                 "IW_CM_EVENT_ESTABLISHED");
6502 
6503         if (params->status < 0) {
6504                 qlnxr_iw_close_event(context, params);
6505         }
6506 
6507         return;
6508 }
6509 
6510 struct qlnxr_discon_work {
6511         struct work_struct work;
6512         struct qlnxr_iw_ep *ep;
6513         enum ecore_iwarp_event_type event;
6514         int status;
6515 };
6516 
6517 static void
6518 qlnxr_iw_disconnect_worker(struct work_struct *work)
6519 {
6520         struct qlnxr_discon_work *dwork =
6521                 container_of(work, struct qlnxr_discon_work, work);
6522         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
6523         struct qlnxr_iw_ep *ep = dwork->ep;
6524         struct qlnxr_dev *dev = ep->dev;
6525         struct qlnxr_qp *qp = ep->qp;
6526         struct iw_cm_event event;
6527 
6528         if (qp->destroyed) {
6529                 kfree(dwork);
6530                 qlnxr_iw_qp_rem_ref(&qp->ibqp);
6531                 return;
6532         }
6533 
6534         memset(&event, 0, sizeof(event));
6535         event.status = dwork->status;
6536         event.event = IW_CM_EVENT_DISCONNECT;
6537 
6538         /* Success means graceful disconnect was requested. modifying
6539          * to SQD is translated to graceful disconnect. O/w reset is sent
6540          */
6541         if (dwork->status)
6542                 qp_params.new_state = ECORE_ROCE_QP_STATE_ERR;
6543         else
6544                 qp_params.new_state = ECORE_ROCE_QP_STATE_SQD;
6545 
6546         kfree(dwork);
6547 
6548         if (ep->cm_id)
6549                 ep->cm_id->event_handler(ep->cm_id, &event);
6550 
6551         SET_FIELD(qp_params.modify_flags,
6552                   ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
6553 
6554         ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
6555 
6556         qlnxr_iw_qp_rem_ref(&qp->ibqp);
6557 
6558         return;
6559 }
6560 
6561 void
6562 qlnxr_iw_disconnect_event(void *context,
6563         struct ecore_iwarp_cm_event_params *params)
6564 {
6565         struct qlnxr_discon_work *work;
6566         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6567         struct qlnxr_dev *dev = ep->dev;
6568         struct qlnxr_qp *qp = ep->qp;
6569 
6570         work = kzalloc(sizeof(*work), GFP_ATOMIC);
6571         if (!work)
6572                 return;
6573 
6574         qlnxr_iw_qp_add_ref(&qp->ibqp);
6575         work->ep = ep;
6576         work->event = params->event;
6577         work->status = params->status;
6578 
6579         INIT_WORK(&work->work, qlnxr_iw_disconnect_worker);
6580         queue_work(dev->iwarp_wq, &work->work);
6581 
6582         return;
6583 }
6584 
6585 #endif /* #if __FreeBSD_version >= 1102000 */
6586 
6587 static int
6588 qlnxr_iw_mpa_reply(void *context,
6589 	struct ecore_iwarp_cm_event_params *params)
6590 {
6591         struct qlnxr_iw_ep	*ep = (struct qlnxr_iw_ep *)context;
6592         struct qlnxr_dev	*dev = ep->dev;
6593         struct ecore_iwarp_send_rtr_in rtr_in;
6594         int			rc;
6595 	qlnx_host_t		*ha;
6596 
6597 	ha = dev->ha;
6598 
6599 	QL_DPRINT12(ha, "enter\n");
6600 
6601 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6602 		return -EINVAL;
6603 
6604 	bzero(&rtr_in, sizeof(struct ecore_iwarp_send_rtr_in));
6605         rtr_in.ep_context = params->ep_context;
6606 
6607         rc = ecore_iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
6608 
6609 	QL_DPRINT12(ha, "exit rc = %d\n", rc);
6610         return rc;
6611 }
6612 
6613 void
6614 qlnxr_iw_qp_event(void *context,
6615 	struct ecore_iwarp_cm_event_params *params,
6616 	enum ib_event_type ib_event,
6617 	char *str)
6618 {
6619         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6620         struct qlnxr_dev *dev = ep->dev;
6621         struct ib_qp *ibqp = &(ep->qp->ibqp);
6622         struct ib_event event;
6623 	qlnx_host_t	*ha;
6624 
6625 	ha = dev->ha;
6626 
6627 	QL_DPRINT12(ha,
6628 		"[context, event, event_handler] = [%p, 0x%x, %s, %p] enter\n",
6629 		context, params->event, str, ibqp->event_handler);
6630 
6631         if (ibqp->event_handler) {
6632                 event.event = ib_event;
6633                 event.device = ibqp->device;
6634                 event.element.qp = ibqp;
6635                 ibqp->event_handler(&event, ibqp->qp_context);
6636         }
6637 
6638 	return;
6639 }
6640 
6641 int
6642 qlnxr_iw_event_handler(void *context,
6643 	struct ecore_iwarp_cm_event_params *params)
6644 {
6645 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6646 	struct qlnxr_dev *dev = ep->dev;
6647 	qlnx_host_t	*ha;
6648 
6649 	ha = dev->ha;
6650 
6651 	QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6652 		"enter\n", context, params->event);
6653 
6654 	switch (params->event) {
6655 	/* Passive side request received */
6656 	case ECORE_IWARP_EVENT_MPA_REQUEST:
6657 		qlnxr_iw_mpa_request(context, params);
6658 		break;
6659 
6660         case ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY:
6661                 qlnxr_iw_mpa_reply(context, params);
6662                 break;
6663 
6664 	/* Passive side established ( ack on mpa response ) */
6665 	case ECORE_IWARP_EVENT_PASSIVE_COMPLETE:
6666 
6667 #if __FreeBSD_version >= 1102000
6668 
6669 		ep->during_connect = 0;
6670 		qlnxr_iw_passive_complete(context, params);
6671 
6672 #else
6673 		qlnxr_iw_issue_event(context,
6674 				    params,
6675 				    IW_CM_EVENT_ESTABLISHED,
6676 				    "IW_CM_EVENT_ESTABLISHED");
6677 #endif /* #if __FreeBSD_version >= 1102000 */
6678 		break;
6679 
6680 	/* Active side reply received */
6681 	case ECORE_IWARP_EVENT_ACTIVE_COMPLETE:
6682 		ep->during_connect = 0;
6683 		qlnxr_iw_issue_event(context,
6684 				    params,
6685 				    IW_CM_EVENT_CONNECT_REPLY,
6686 				    "IW_CM_EVENT_CONNECT_REPLY");
6687 		if (params->status < 0) {
6688 			struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6689 
6690 			ep->cm_id->rem_ref(ep->cm_id);
6691 			ep->cm_id = NULL;
6692 		}
6693 		break;
6694 
6695 	case ECORE_IWARP_EVENT_DISCONNECT:
6696 
6697 #if __FreeBSD_version >= 1102000
6698 		qlnxr_iw_disconnect_event(context, params);
6699 #else
6700 		qlnxr_iw_issue_event(context,
6701 				    params,
6702 				    IW_CM_EVENT_DISCONNECT,
6703 				    "IW_CM_EVENT_DISCONNECT");
6704 		qlnxr_iw_close_event(context, params);
6705 #endif /* #if __FreeBSD_version >= 1102000 */
6706 		break;
6707 
6708 	case ECORE_IWARP_EVENT_CLOSE:
6709 		ep->during_connect = 0;
6710 		qlnxr_iw_close_event(context, params);
6711 		break;
6712 
6713         case ECORE_IWARP_EVENT_RQ_EMPTY:
6714                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6715                                  "IWARP_EVENT_RQ_EMPTY");
6716                 break;
6717 
6718         case ECORE_IWARP_EVENT_IRQ_FULL:
6719                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6720                                  "IWARP_EVENT_IRQ_FULL");
6721                 break;
6722 
6723         case ECORE_IWARP_EVENT_LLP_TIMEOUT:
6724                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6725                                  "IWARP_EVENT_LLP_TIMEOUT");
6726                 break;
6727 
6728         case ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
6729                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6730                                  "IWARP_EVENT_REMOTE_PROTECTION_ERROR");
6731                 break;
6732 
6733         case ECORE_IWARP_EVENT_CQ_OVERFLOW:
6734                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6735                                  "QED_IWARP_EVENT_CQ_OVERFLOW");
6736                 break;
6737 
6738         case ECORE_IWARP_EVENT_QP_CATASTROPHIC:
6739                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6740                                  "QED_IWARP_EVENT_QP_CATASTROPHIC");
6741                 break;
6742 
6743         case ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR:
6744                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6745                                  "IWARP_EVENT_LOCAL_ACCESS_ERROR");
6746                 break;
6747 
6748         case ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR:
6749                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6750                                  "IWARP_EVENT_REMOTE_OPERATION_ERROR");
6751                 break;
6752 
6753         case ECORE_IWARP_EVENT_TERMINATE_RECEIVED:
6754 		QL_DPRINT12(ha, "Got terminate message"
6755 			" ECORE_IWARP_EVENT_TERMINATE_RECEIVED\n");
6756                 break;
6757 
6758 	default:
6759 		QL_DPRINT12(ha,
6760 			"Unknown event [0x%x] received \n", params->event);
6761 		break;
6762 	};
6763 
6764 	QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6765 		"exit\n", context, params->event);
6766 	return 0;
6767 }
6768 
6769 static int
6770 qlnxr_addr4_resolve(struct qlnxr_dev *dev,
6771 			      struct sockaddr_in *src_in,
6772 			      struct sockaddr_in *dst_in,
6773 			      u8 *dst_mac)
6774 {
6775 	int rc;
6776 
6777 #if __FreeBSD_version >= 1100000
6778 	rc = arpresolve(dev->ha->ifp, 0, NULL, (struct sockaddr *)dst_in,
6779 			dst_mac, NULL, NULL);
6780 #else
6781 	struct llentry *lle;
6782 
6783 	rc = arpresolve(dev->ha->ifp, NULL, NULL, (struct sockaddr *)dst_in,
6784 			dst_mac, &lle);
6785 #endif
6786 
6787 	QL_DPRINT12(dev->ha, "rc = %d "
6788 		"sa_len = 0x%x sa_family = 0x%x IP Address = %d.%d.%d.%d "
6789 		"Dest MAC %02x:%02x:%02x:%02x:%02x:%02x\n", rc,
6790 		dst_in->sin_len, dst_in->sin_family,
6791 		NIPQUAD((dst_in->sin_addr.s_addr)),
6792 		dst_mac[0], dst_mac[1], dst_mac[2],
6793 		dst_mac[3], dst_mac[4], dst_mac[5]);
6794 
6795 	return rc;
6796 }
6797 
6798 int
6799 qlnxr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6800 {
6801 	struct qlnxr_dev *dev;
6802 	struct ecore_iwarp_connect_out out_params;
6803 	struct ecore_iwarp_connect_in in_params;
6804 	struct qlnxr_iw_ep *ep;
6805 	struct qlnxr_qp *qp;
6806 	struct sockaddr_in *laddr;
6807 	struct sockaddr_in *raddr;
6808 	int rc = 0;
6809 	qlnx_host_t	*ha;
6810 
6811 	dev = get_qlnxr_dev((cm_id->device));
6812 	ha = dev->ha;
6813 
6814 	QL_DPRINT12(ha, "[cm_id, conn_param] = [%p, %p] "
6815 		"enter \n", cm_id, conn_param);
6816 
6817 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6818 		return -EINVAL;
6819 
6820 	qp = idr_find(&dev->qpidr, conn_param->qpn);
6821 
6822 	laddr = (struct sockaddr_in *)&cm_id->local_addr;
6823 	raddr = (struct sockaddr_in *)&cm_id->remote_addr;
6824 
6825 	QL_DPRINT12(ha,
6826 		"local = [%d.%d.%d.%d, %d] remote = [%d.%d.%d.%d, %d]\n",
6827 		NIPQUAD((laddr->sin_addr.s_addr)), laddr->sin_port,
6828 		NIPQUAD((raddr->sin_addr.s_addr)), raddr->sin_port);
6829 
6830 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
6831 	if (!ep) {
6832 		QL_DPRINT11(ha, "struct qlnxr_iw_ep "
6833 			"alloc memory failed\n");
6834 		return -ENOMEM;
6835 	}
6836 
6837 	ep->dev = dev;
6838 	ep->qp = qp;
6839 	cm_id->add_ref(cm_id);
6840 	ep->cm_id = cm_id;
6841 
6842 	memset(&in_params, 0, sizeof (struct ecore_iwarp_connect_in));
6843 	memset(&out_params, 0, sizeof (struct ecore_iwarp_connect_out));
6844 
6845 	in_params.event_cb = qlnxr_iw_event_handler;
6846 	in_params.cb_context = ep;
6847 
6848 	in_params.cm_info.ip_version = ECORE_TCP_IPV4;
6849 
6850 	in_params.cm_info.remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
6851 	in_params.cm_info.local_ip[0] = ntohl(laddr->sin_addr.s_addr);
6852 	in_params.cm_info.remote_port = ntohs(raddr->sin_port);
6853 	in_params.cm_info.local_port = ntohs(laddr->sin_port);
6854 	in_params.cm_info.vlan = 0;
6855 	in_params.mss = dev->ha->ifp->if_mtu - 40;
6856 
6857 	QL_DPRINT12(ha, "remote_ip = [%d.%d.%d.%d] "
6858 		"local_ip = [%d.%d.%d.%d] remote_port = %d local_port = %d "
6859 		"vlan = %d\n",
6860 		NIPQUAD((in_params.cm_info.remote_ip[0])),
6861 		NIPQUAD((in_params.cm_info.local_ip[0])),
6862 		in_params.cm_info.remote_port, in_params.cm_info.local_port,
6863 		in_params.cm_info.vlan);
6864 
6865 	rc = qlnxr_addr4_resolve(dev, laddr, raddr, (u8 *)in_params.remote_mac_addr);
6866 
6867 	if (rc) {
6868 		QL_DPRINT11(ha, "qlnxr_addr4_resolve failed\n");
6869 		goto err;
6870 	}
6871 
6872 	QL_DPRINT12(ha, "ord = %d ird=%d private_data=%p"
6873 		" private_data_len=%d rq_psn=%d\n",
6874 		conn_param->ord, conn_param->ird, conn_param->private_data,
6875 		conn_param->private_data_len, qp->rq_psn);
6876 
6877 	in_params.cm_info.ord = conn_param->ord;
6878 	in_params.cm_info.ird = conn_param->ird;
6879 	in_params.cm_info.private_data = conn_param->private_data;
6880 	in_params.cm_info.private_data_len = conn_param->private_data_len;
6881 	in_params.qp = qp->ecore_qp;
6882 
6883 	memcpy(in_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
6884 
6885 	rc = ecore_iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
6886 
6887 	if (rc) {
6888 		QL_DPRINT12(ha, "ecore_iwarp_connect failed\n");
6889 		goto err;
6890 	}
6891 
6892 	QL_DPRINT12(ha, "exit\n");
6893 
6894 	return rc;
6895 
6896 err:
6897 	cm_id->rem_ref(cm_id);
6898 	kfree(ep);
6899 
6900 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6901 	return rc;
6902 }
6903 
6904 int
6905 qlnxr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
6906 {
6907 	struct qlnxr_dev *dev;
6908 	struct qlnxr_iw_listener *listener;
6909 	struct ecore_iwarp_listen_in iparams;
6910 	struct ecore_iwarp_listen_out oparams;
6911 	struct sockaddr_in *laddr;
6912 	qlnx_host_t	*ha;
6913 	int rc;
6914 
6915 	dev = get_qlnxr_dev((cm_id->device));
6916 	ha = dev->ha;
6917 
6918 	QL_DPRINT12(ha, "enter\n");
6919 
6920 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6921 		return -EINVAL;
6922 
6923 	laddr = (struct sockaddr_in *)&cm_id->local_addr;
6924 
6925 	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
6926 
6927 	if (listener == NULL) {
6928 		QL_DPRINT11(ha, "listener memory alloc failed\n");
6929 		return -ENOMEM;
6930 	}
6931 
6932 	listener->dev = dev;
6933 	cm_id->add_ref(cm_id);
6934 	listener->cm_id = cm_id;
6935 	listener->backlog = backlog;
6936 
6937 	memset(&iparams, 0, sizeof (struct ecore_iwarp_listen_in));
6938 	memset(&oparams, 0, sizeof (struct ecore_iwarp_listen_out));
6939 
6940 	iparams.cb_context = listener;
6941 	iparams.event_cb = qlnxr_iw_event_handler;
6942 	iparams.max_backlog = backlog;
6943 
6944 	iparams.ip_version = ECORE_TCP_IPV4;
6945 
6946 	iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
6947 	iparams.port = ntohs(laddr->sin_port);
6948 	iparams.vlan = 0;
6949 
6950 	QL_DPRINT12(ha, "[%d.%d.%d.%d, %d] iparamsport=%d\n",
6951 		NIPQUAD((laddr->sin_addr.s_addr)),
6952 		laddr->sin_port, iparams.port);
6953 
6954 	rc = ecore_iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
6955 	if (rc) {
6956 		QL_DPRINT11(ha,
6957 			"ecore_iwarp_create_listen failed rc = %d\n", rc);
6958 		goto err;
6959 	}
6960 
6961 	listener->ecore_handle = oparams.handle;
6962 	cm_id->provider_data = listener;
6963 
6964 	QL_DPRINT12(ha, "exit\n");
6965 	return rc;
6966 
6967 err:
6968 	cm_id->rem_ref(cm_id);
6969 	kfree(listener);
6970 
6971 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6972 	return rc;
6973 }
6974 
6975 void
6976 qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id)
6977 {
6978 	struct qlnxr_iw_listener *listener = cm_id->provider_data;
6979 	struct qlnxr_dev *dev = get_qlnxr_dev((cm_id->device));
6980 	int rc = 0;
6981 	qlnx_host_t	*ha;
6982 
6983 	ha = dev->ha;
6984 
6985 	QL_DPRINT12(ha, "enter\n");
6986 
6987 	if (listener->ecore_handle)
6988 		rc = ecore_iwarp_destroy_listen(dev->rdma_ctx,
6989 				listener->ecore_handle);
6990 
6991 	cm_id->rem_ref(cm_id);
6992 
6993 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6994 	return;
6995 }
6996 
6997 int
6998 qlnxr_iw_accept(struct iw_cm_id *cm_id,
6999 	struct iw_cm_conn_param *conn_param)
7000 {
7001 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7002 	struct qlnxr_dev *dev = ep->dev;
7003 	struct qlnxr_qp *qp;
7004 	struct ecore_iwarp_accept_in params;
7005 	int rc;
7006 	qlnx_host_t	*ha;
7007 
7008 	ha = dev->ha;
7009 
7010 	QL_DPRINT12(ha, "enter  qpid=%d\n", conn_param->qpn);
7011 
7012 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7013 		return -EINVAL;
7014 
7015 	qp = idr_find(&dev->qpidr, conn_param->qpn);
7016 	if (!qp) {
7017 		QL_DPRINT11(ha, "idr_find failed invalid qpn = %d\n",
7018 			conn_param->qpn);
7019 		return -EINVAL;
7020 	}
7021 	ep->qp = qp;
7022 	qp->ep = ep;
7023 	cm_id->add_ref(cm_id);
7024 	ep->cm_id = cm_id;
7025 
7026 	params.ep_context = ep->ecore_context;
7027 	params.cb_context = ep;
7028 	params.qp = ep->qp->ecore_qp;
7029 	params.private_data = conn_param->private_data;
7030 	params.private_data_len = conn_param->private_data_len;
7031 	params.ird = conn_param->ird;
7032 	params.ord = conn_param->ord;
7033 
7034 	rc = ecore_iwarp_accept(dev->rdma_ctx, &params);
7035 	if (rc) {
7036 		QL_DPRINT11(ha, "ecore_iwarp_accept failed %d\n", rc);
7037 		goto err;
7038 	}
7039 
7040 	QL_DPRINT12(ha, "exit\n");
7041 	return 0;
7042 err:
7043 	cm_id->rem_ref(cm_id);
7044 	QL_DPRINT12(ha, "exit rc = %d\n", rc);
7045 	return rc;
7046 }
7047 
7048 int
7049 qlnxr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
7050 {
7051 #if __FreeBSD_version >= 1102000
7052 
7053         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7054         struct qlnxr_dev *dev = ep->dev;
7055         struct ecore_iwarp_reject_in params;
7056         int rc;
7057 
7058         params.ep_context = ep->ecore_context;
7059         params.cb_context = ep;
7060         params.private_data = pdata;
7061         params.private_data_len = pdata_len;
7062         ep->qp = NULL;
7063 
7064         rc = ecore_iwarp_reject(dev->rdma_ctx, &params);
7065 
7066         return rc;
7067 
7068 #else
7069 
7070 	printf("iWARP reject_cr not implemented\n");
7071 	return -EINVAL;
7072 
7073 #endif /* #if __FreeBSD_version >= 1102000 */
7074 }
7075 
7076 void
7077 qlnxr_iw_qp_add_ref(struct ib_qp *ibqp)
7078 {
7079 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7080 	qlnx_host_t	*ha;
7081 
7082 	ha = qp->dev->ha;
7083 
7084 	QL_DPRINT12(ha, "enter ibqp = %p\n", ibqp);
7085 
7086 	atomic_inc(&qp->refcnt);
7087 
7088 	QL_DPRINT12(ha, "exit \n");
7089 	return;
7090 }
7091 
7092 void
7093 qlnxr_iw_qp_rem_ref(struct ib_qp *ibqp)
7094 {
7095 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7096 	qlnx_host_t	*ha;
7097 
7098 	ha = qp->dev->ha;
7099 
7100 	QL_DPRINT12(ha, "enter ibqp = %p qp = %p\n", ibqp, qp);
7101 
7102 	if (atomic_dec_and_test(&qp->refcnt)) {
7103 		qlnxr_idr_remove(qp->dev, qp->qp_id);
7104 	}
7105 
7106 	QL_DPRINT12(ha, "exit \n");
7107 	return;
7108 }
7109 
7110 struct ib_qp *
7111 qlnxr_iw_get_qp(struct ib_device *ibdev, int qpn)
7112 {
7113 	struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
7114 	struct ib_qp *qp;
7115 	qlnx_host_t	*ha;
7116 
7117 	ha = dev->ha;
7118 
7119 	QL_DPRINT12(ha, "enter dev = %p ibdev = %p qpn = %d\n", dev, ibdev, qpn);
7120 
7121 	qp = idr_find(&dev->qpidr, qpn);
7122 
7123 	QL_DPRINT12(ha, "exit qp = %p\n", qp);
7124 
7125 	return (qp);
7126 }
7127