xref: /freebsd/sys/dev/qlnx/qlnxr/qlnxr_verbs.c (revision 6e563a1b608438504d963c2d7c70e50d2e75af46)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: qlnxr_verbs.c
30  */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "qlnxr_def.h"
35 #include "rdma_common.h"
36 #include "qlnxr_roce.h"
37 #include "qlnxr_cm.h"
38 
39 #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
40 
41 #define TYPEPTR_ADDR_SET(type_ptr, field, vaddr)			\
42 	do {								\
43 		(type_ptr)->field.hi = cpu_to_le32(upper_32_bits(vaddr));\
44 		(type_ptr)->field.lo = cpu_to_le32(lower_32_bits(vaddr));\
45 	} while (0)
46 
47 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
48 	do {							\
49 		TYPEPTR_ADDR_SET(sge, addr, vaddr);		\
50 		(sge)->length = cpu_to_le32(vlength);		\
51 		(sge)->flags = cpu_to_le32(vflags);		\
52 	} while (0)
53 
54 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
55 	do {							\
56 		TYPEPTR_ADDR_SET(hdr, wr_id, vwr_id);		\
57 		(hdr)->num_sges = num_sge;			\
58 	} while (0)
59 
60 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
61 	do {							\
62 		TYPEPTR_ADDR_SET(sge, addr, vaddr);		\
63 		(sge)->length = cpu_to_le32(vlength);		\
64 		(sge)->l_key = cpu_to_le32(vlkey);		\
65 	} while (0)
66 
67 #define NIPQUAD(addr) \
68 	((unsigned char *)&addr)[0], \
69 	((unsigned char *)&addr)[1], \
70 	((unsigned char *)&addr)[2], \
71 	((unsigned char *)&addr)[3]
72 
73 static int
74 qlnxr_check_srq_params(struct qlnxr_dev *dev,
75 	struct ib_srq_init_attr *attrs);
76 
77 static int
78 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
79 	struct qlnxr_srq *srq,
80 	struct qlnxr_create_srq_ureq *ureq,
81 	int access, int dmasync);
82 
83 static int
84 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
85 	struct qlnxr_dev *dev,
86 	struct ib_srq_init_attr *init_attr);
87 
88 static int
89 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
90 	struct qlnxr_srq *srq,
91 	struct ib_udata *udata);
92 
93 static void
94 qlnxr_free_srq_user_params(struct qlnxr_srq *srq);
95 
96 static void
97 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq);
98 
99 static u32
100 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq);
101 
102 int
103 qlnxr_iw_query_gid(struct ib_device *ibdev, u8 port, int index,
104 	union ib_gid *sgid)
105 {
106 	struct qlnxr_dev	*dev;
107 	qlnx_host_t		*ha;
108 
109 	dev = get_qlnxr_dev(ibdev);
110 	ha = dev->ha;
111 
112 	QL_DPRINT12(ha, "enter\n");
113 
114 	memset(sgid->raw, 0, sizeof(sgid->raw));
115 
116 	memcpy(sgid->raw, dev->ha->primary_mac, sizeof (dev->ha->primary_mac));
117 
118 	QL_DPRINT12(ha, "exit\n");
119 
120 	return 0;
121 }
122 
123 int
124 qlnxr_query_gid(struct ib_device *ibdev, u8 port, int index,
125 	union ib_gid *sgid)
126 {
127 	struct qlnxr_dev	*dev;
128 	qlnx_host_t		*ha;
129 
130 	dev = get_qlnxr_dev(ibdev);
131 	ha = dev->ha;
132 	QL_DPRINT12(ha, "enter index: %d\n", index);
133 #if 0
134 	int ret = 0;
135 	/* @@@: if DEFINE_ROCE_GID_TABLE to be used here */
136 	//if (!rdma_cap_roce_gid_table(ibdev, port)) {
137 	if (!(rdma_protocol_roce(ibdev, port) &&
138 		ibdev->add_gid && ibdev->del_gid)) {
139 		QL_DPRINT11(ha, "acquire gid failed\n");
140 		return -ENODEV;
141 	}
142 
143 	ret = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
144 	if (ret == -EAGAIN) {
145 		memcpy(sgid, &zgid, sizeof(*sgid));
146 		return 0;
147 	}
148 #endif
149 	if ((index >= QLNXR_MAX_SGID) || (index < 0)) {
150 		QL_DPRINT12(ha, "invalid gid index %d\n", index);
151 		memset(sgid, 0, sizeof(*sgid));
152 		return -EINVAL;
153 	}
154 	memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
155 
156 	QL_DPRINT12(ha, "exit : %p\n", sgid);
157 
158 	return 0;
159 }
160 
161 int
162 qlnxr_create_srq(struct ib_srq *ibsrq,
163 		 struct ib_srq_init_attr *init_attr,
164 		 struct ib_udata *udata)
165 {
166 	struct qlnxr_dev	*dev;
167 	qlnx_host_t		*ha;
168 	struct ecore_rdma_destroy_srq_in_params destroy_in_params;
169 	struct ecore_rdma_create_srq_out_params out_params;
170 	struct ecore_rdma_create_srq_in_params in_params;
171 	u64 pbl_base_addr, phy_prod_pair_addr;
172 	struct qlnxr_srq_hwq_info *hw_srq;
173 	struct qlnxr_ucontext *ctx;
174 	struct qlnxr_create_srq_ureq ureq;
175 	u32 page_cnt, page_size;
176 	struct qlnxr_srq *srq = get_qlnxr_srq(ibsrq);
177 	int ret = 0;
178 
179 	dev = get_qlnxr_dev(ibsrq->device);
180 	ha = dev->ha;
181 
182 	QL_DPRINT12(ha, "enter\n");
183 
184 	ret = qlnxr_check_srq_params(dev, init_attr);
185 
186 	srq->dev = dev;
187 	hw_srq = &srq->hw_srq;
188 	spin_lock_init(&srq->lock);
189 	memset(&in_params, 0, sizeof(in_params));
190 
191 	if (udata) {
192 		ctx = rdma_udata_to_drv_context(
193 		    udata, struct qlnxr_ucontext, ibucontext);
194 
195 		memset(&ureq, 0, sizeof(ureq));
196 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
197 			udata->inlen))) {
198 			QL_DPRINT11(ha, "problem"
199 				" copying data from user space\n");
200 			goto err0;
201 		}
202 
203 		ret = qlnxr_init_srq_user_params(&ctx->ibucontext, srq, &ureq, 0, 0);
204 		if (ret)
205 			goto err0;
206 
207 		page_cnt = srq->usrq.pbl_info.num_pbes;
208 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
209 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
210 		// @@@ : if DEFINE_IB_UMEM_PAGE_SHIFT
211 		// page_size = BIT(srq->usrq.umem->page_shift);
212 		// else
213 		page_size = srq->usrq.umem->page_size;
214 	} else {
215 		struct ecore_chain *pbl;
216 		ret = qlnxr_alloc_srq_kernel_params(srq, dev, init_attr);
217 		if (ret)
218 			goto err0;
219 		pbl = &hw_srq->pbl;
220 
221 		page_cnt = ecore_chain_get_page_cnt(pbl);
222 		pbl_base_addr = ecore_chain_get_pbl_phys(pbl);
223 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
224 		page_size = pbl->elem_per_page << 4;
225 	}
226 
227 	in_params.pd_id = get_qlnxr_pd(ibsrq->pd)->pd_id;
228 	in_params.pbl_base_addr = pbl_base_addr;
229 	in_params.prod_pair_addr = phy_prod_pair_addr;
230 	in_params.num_pages = page_cnt;
231 	in_params.page_size = page_size;
232 
233 	ret = ecore_rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
234 	if (ret)
235 		goto err1;
236 
237 	srq->srq_id = out_params.srq_id;
238 
239 	if (udata) {
240 		ret = qlnxr_copy_srq_uresp(dev, srq, udata);
241 		if (ret)
242 			goto err2;
243 	}
244 
245 	QL_DPRINT12(ha, "created srq with srq_id = 0x%0x\n", srq->srq_id);
246 	return (0);
247 err2:
248 	memset(&in_params, 0, sizeof(in_params));
249 	destroy_in_params.srq_id = srq->srq_id;
250 	ecore_rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
251 
252 err1:
253 	if (udata)
254 		qlnxr_free_srq_user_params(srq);
255 	else
256 		qlnxr_free_srq_kernel_params(srq);
257 
258 err0:
259 	return (-EFAULT);
260 }
261 
262 void
263 qlnxr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
264 {
265 	struct qlnxr_dev	*dev;
266 	struct qlnxr_srq	*srq;
267 	qlnx_host_t		*ha;
268 	struct ecore_rdma_destroy_srq_in_params in_params;
269 
270 	srq = get_qlnxr_srq(ibsrq);
271 	dev = srq->dev;
272 	ha = dev->ha;
273 
274 	memset(&in_params, 0, sizeof(in_params));
275 	in_params.srq_id = srq->srq_id;
276 
277 	ecore_rdma_destroy_srq(dev->rdma_ctx, &in_params);
278 
279 	if (ibsrq->pd->uobject && ibsrq->pd->uobject->context)
280 		qlnxr_free_srq_user_params(srq);
281 	else
282 		qlnxr_free_srq_kernel_params(srq);
283 
284 	QL_DPRINT12(ha, "destroyed srq_id=0x%0x\n", srq->srq_id);
285 }
286 
287 int
288 qlnxr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
289 	enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
290 {
291 	struct qlnxr_dev	*dev;
292 	struct qlnxr_srq	*srq;
293 	qlnx_host_t		*ha;
294 	struct ecore_rdma_modify_srq_in_params in_params;
295 	int ret = 0;
296 
297 	srq = get_qlnxr_srq(ibsrq);
298 	dev = srq->dev;
299 	ha = dev->ha;
300 
301 	QL_DPRINT12(ha, "enter\n");
302 	if (attr_mask & IB_SRQ_MAX_WR) {
303 		QL_DPRINT12(ha, "invalid attribute mask=0x%x"
304 			" specified for %p\n", attr_mask, srq);
305 		return -EINVAL;
306 	}
307 
308 	if (attr_mask & IB_SRQ_LIMIT) {
309 		if (attr->srq_limit >= srq->hw_srq.max_wr) {
310 			QL_DPRINT12(ha, "invalid srq_limit=0x%x"
311 				" (max_srq_limit = 0x%x)\n",
312 			       attr->srq_limit, srq->hw_srq.max_wr);
313 			return -EINVAL;
314 		}
315 		memset(&in_params, 0, sizeof(in_params));
316 		in_params.srq_id = srq->srq_id;
317 		in_params.wqe_limit = attr->srq_limit;
318 		ret = ecore_rdma_modify_srq(dev->rdma_ctx, &in_params);
319 		if (ret)
320 			return ret;
321 	}
322 
323 	QL_DPRINT12(ha, "modified srq with srq_id = 0x%0x\n", srq->srq_id);
324 	return 0;
325 }
326 
327 int
328 qlnxr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
329 {
330 	struct qlnxr_dev	*dev;
331 	struct qlnxr_srq	*srq;
332 	qlnx_host_t		*ha;
333 	struct ecore_rdma_device *qattr;
334 	srq = get_qlnxr_srq(ibsrq);
335 	dev = srq->dev;
336 	ha = dev->ha;
337 	//qattr = &dev->attr;
338 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
339 	QL_DPRINT12(ha, "enter\n");
340 
341 	if (!dev->rdma_ctx) {
342 		QL_DPRINT12(ha, "called with invalid params"
343 			" rdma_ctx is NULL\n");
344 		return -EINVAL;
345 	}
346 
347 	srq_attr->srq_limit = qattr->max_srq;
348 	srq_attr->max_wr = qattr->max_srq_wr;
349 	srq_attr->max_sge = qattr->max_sge;
350 
351 	QL_DPRINT12(ha, "exit\n");
352 	return 0;
353 }
354 
355 /* Increment srq wr producer by one */
356 static
357 void qlnxr_inc_srq_wr_prod (struct qlnxr_srq_hwq_info *info)
358 {
359 	info->wr_prod_cnt++;
360 }
361 
362 /* Increment srq wr consumer by one */
363 static
364 void qlnxr_inc_srq_wr_cons(struct qlnxr_srq_hwq_info *info)
365 {
366         info->wr_cons_cnt++;
367 }
368 
369 /* get_port_immutable verb is not available in FreeBSD */
370 #if 0
371 int
372 qlnxr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
373 	struct ib_port_immutable *immutable)
374 {
375 	struct qlnxr_dev                *dev;
376 	qlnx_host_t                     *ha;
377 	dev = get_qlnxr_dev(ibdev);
378 	ha = dev->ha;
379 
380 	QL_DPRINT12(ha, "entered but not implemented!!!\n");
381 }
382 #endif
383 
384 int
385 qlnxr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
386 	const struct ib_recv_wr **bad_wr)
387 {
388 	struct qlnxr_dev	*dev;
389 	struct qlnxr_srq	*srq;
390 	qlnx_host_t		*ha;
391 	struct qlnxr_srq_hwq_info *hw_srq;
392 	struct ecore_chain *pbl;
393 	unsigned long flags;
394 	int status = 0;
395 	u32 num_sge, offset;
396 
397 	srq = get_qlnxr_srq(ibsrq);
398 	dev = srq->dev;
399 	ha = dev->ha;
400 	hw_srq = &srq->hw_srq;
401 
402 	QL_DPRINT12(ha, "enter\n");
403 	spin_lock_irqsave(&srq->lock, flags);
404 
405 	pbl = &srq->hw_srq.pbl;
406 	while (wr) {
407 		struct rdma_srq_wqe_header *hdr;
408 		int i;
409 
410 		if (!qlnxr_srq_elem_left(hw_srq) ||
411 		    wr->num_sge > srq->hw_srq.max_sges) {
412 			QL_DPRINT11(ha, "WR cannot be posted"
413 			    " (%d, %d) || (%d > %d)\n",
414 			    hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
415 			    wr->num_sge, srq->hw_srq.max_sges);
416 			status = -ENOMEM;
417 			*bad_wr = wr;
418 			break;
419 		}
420 
421 		hdr = ecore_chain_produce(pbl);
422 		num_sge = wr->num_sge;
423 		/* Set number of sge and WR id in header */
424 		SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
425 
426                 /* PBL is maintained in case of WR granularity.
427                  * So increment WR producer in case we post a WR.
428                  */
429 		qlnxr_inc_srq_wr_prod(hw_srq);
430 		hw_srq->wqe_prod++;
431 		hw_srq->sge_prod++;
432 
433 		QL_DPRINT12(ha, "SRQ WR : SGEs: %d with wr_id[%d] = %llx\n",
434 			wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
435 
436 		for (i = 0; i < wr->num_sge; i++) {
437 			struct rdma_srq_sge *srq_sge =
438 			    ecore_chain_produce(pbl);
439 			/* Set SGE length, lkey and address */
440 			SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
441 				wr->sg_list[i].length, wr->sg_list[i].lkey);
442 
443 			QL_DPRINT12(ha, "[%d]: len %d, key %x, addr %x:%x\n",
444 				i, srq_sge->length, srq_sge->l_key,
445 				srq_sge->addr.hi, srq_sge->addr.lo);
446 			hw_srq->sge_prod++;
447 		}
448 		wmb();
449 		/*
450 		 * SRQ prod is 8 bytes. Need to update SGE prod in index
451 		 * in first 4 bytes and need to update WQE prod in next
452 		 * 4 bytes.
453 		 */
454 		*(srq->hw_srq.virt_prod_pair_addr) = hw_srq->sge_prod;
455 		offset = offsetof(struct rdma_srq_producers, wqe_prod);
456 		*((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
457 			hw_srq->wqe_prod;
458 		/* Flush prod after updating it */
459 		wmb();
460 		wr = wr->next;
461 	}
462 
463 	QL_DPRINT12(ha, "Elements in SRQ: %d\n",
464 		ecore_chain_get_elem_left(pbl));
465 
466 	spin_unlock_irqrestore(&srq->lock, flags);
467 	QL_DPRINT12(ha, "exit\n");
468 	return status;
469 }
470 
471 int
472 #if __FreeBSD_version < 1102000
473 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
474 #else
475 qlnxr_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
476 	struct ib_udata *udata)
477 #endif /* #if __FreeBSD_version < 1102000 */
478 
479 {
480 	struct qlnxr_dev		*dev;
481 	struct ecore_rdma_device	*qattr;
482 	qlnx_host_t			*ha;
483 
484 	dev = get_qlnxr_dev(ibdev);
485 	ha = dev->ha;
486 
487 	QL_DPRINT12(ha, "enter\n");
488 
489 #if __FreeBSD_version > 1102000
490 	if (udata->inlen || udata->outlen)
491 		return -EINVAL;
492 #endif /* #if __FreeBSD_version > 1102000 */
493 
494 	if (dev->rdma_ctx == NULL) {
495 		return -EINVAL;
496 	}
497 
498 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
499 
500 	memset(attr, 0, sizeof *attr);
501 
502 	attr->fw_ver = qattr->fw_ver;
503 	attr->sys_image_guid = qattr->sys_image_guid;
504 	attr->max_mr_size = qattr->max_mr_size;
505 	attr->page_size_cap = qattr->page_size_caps;
506 	attr->vendor_id = qattr->vendor_id;
507 	attr->vendor_part_id = qattr->vendor_part_id;
508 	attr->hw_ver = qattr->hw_ver;
509 	attr->max_qp = qattr->max_qp;
510 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
511 					IB_DEVICE_RC_RNR_NAK_GEN |
512 					IB_DEVICE_LOCAL_DMA_LKEY |
513 					IB_DEVICE_MEM_MGT_EXTENSIONS;
514 
515 	attr->max_sge = qattr->max_sge;
516 	attr->max_sge_rd = qattr->max_sge;
517 	attr->max_cq = qattr->max_cq;
518 	attr->max_cqe = qattr->max_cqe;
519 	attr->max_mr = qattr->max_mr;
520 	attr->max_mw = qattr->max_mw;
521 	attr->max_pd = qattr->max_pd;
522 	attr->atomic_cap = dev->atomic_cap;
523 	attr->max_fmr = qattr->max_fmr;
524 	attr->max_map_per_fmr = 16; /* TBD: FMR */
525 
526 	/* There is an implicit assumption in some of the ib_xxx apps that the
527 	 * qp_rd_atom is smaller than the qp_init_rd_atom. Specifically, in
528 	 * communication the qp_rd_atom is passed to the other side and used as
529 	 * init_rd_atom without check device capabilities for init_rd_atom.
530 	 * for this reason, we set the qp_rd_atom to be the minimum between the
531 	 * two...There is an additional assumption in mlx4 driver that the
532 	 * values are power of two, fls is performed on the value - 1, which
533 	 * in fact gives a larger power of two for values which are not a power
534 	 * of two. This should be fixed in mlx4 driver, but until then ->
535 	 * we provide a value that is a power of two in our code.
536 	 */
537 	attr->max_qp_init_rd_atom =
538 		1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
539 	attr->max_qp_rd_atom =
540 		min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
541 		    attr->max_qp_init_rd_atom);
542 
543 	attr->max_srq = qattr->max_srq;
544 	attr->max_srq_sge = qattr->max_srq_sge;
545 	attr->max_srq_wr = qattr->max_srq_wr;
546 
547 	/* TODO: R&D to more properly configure the following */
548 	attr->local_ca_ack_delay = qattr->dev_ack_delay;
549 	attr->max_fast_reg_page_list_len = qattr->max_mr/8;
550 	attr->max_pkeys = QLNXR_ROCE_PKEY_MAX;
551 	attr->max_ah = qattr->max_ah;
552 
553 	QL_DPRINT12(ha, "exit\n");
554 	return 0;
555 }
556 
557 static inline void
558 get_link_speed_and_width(int speed, uint8_t *ib_speed, uint8_t *ib_width)
559 {
560 	switch (speed) {
561 	case 1000:
562 		*ib_speed = IB_SPEED_SDR;
563 		*ib_width = IB_WIDTH_1X;
564 		break;
565 	case 10000:
566 		*ib_speed = IB_SPEED_QDR;
567 		*ib_width = IB_WIDTH_1X;
568 		break;
569 
570 	case 20000:
571 		*ib_speed = IB_SPEED_DDR;
572 		*ib_width = IB_WIDTH_4X;
573 		break;
574 
575 	case 25000:
576 		*ib_speed = IB_SPEED_EDR;
577 		*ib_width = IB_WIDTH_1X;
578 		break;
579 
580 	case 40000:
581 		*ib_speed = IB_SPEED_QDR;
582 		*ib_width = IB_WIDTH_4X;
583 		break;
584 
585 	case 50000:
586 		*ib_speed = IB_SPEED_QDR;
587 		*ib_width = IB_WIDTH_4X; // TODO doesn't add up to 50...
588 		break;
589 
590 	case 100000:
591 		*ib_speed = IB_SPEED_EDR;
592 		*ib_width = IB_WIDTH_4X;
593 		break;
594 
595 	default:
596 		/* Unsupported */
597 		*ib_speed = IB_SPEED_SDR;
598 		*ib_width = IB_WIDTH_1X;
599 	}
600 	return;
601 }
602 
603 int
604 qlnxr_query_port(struct ib_device *ibdev, uint8_t port,
605 	struct ib_port_attr *attr)
606 {
607 	struct qlnxr_dev	*dev;
608 	struct ecore_rdma_port	*rdma_port;
609 	qlnx_host_t		*ha;
610 
611 	dev = get_qlnxr_dev(ibdev);
612 	ha = dev->ha;
613 
614 	QL_DPRINT12(ha, "enter\n");
615 
616 	if (port > 1) {
617 		QL_DPRINT12(ha, "port [%d] > 1 \n", port);
618 		return -EINVAL;
619 	}
620 
621 	if (dev->rdma_ctx == NULL) {
622 		QL_DPRINT12(ha, "rdma_ctx == NULL\n");
623 		return -EINVAL;
624 	}
625 
626 	rdma_port = ecore_rdma_query_port(dev->rdma_ctx);
627 	memset(attr, 0, sizeof *attr);
628 
629 	if (rdma_port->port_state == ECORE_RDMA_PORT_UP) {
630 		attr->state = IB_PORT_ACTIVE;
631 		attr->phys_state = 5;
632 	} else {
633 		attr->state = IB_PORT_DOWN;
634 		attr->phys_state = 3;
635 	}
636 
637 	attr->max_mtu = IB_MTU_4096;
638 	attr->active_mtu = iboe_get_mtu(dev->ha->ifp->if_mtu);
639 	attr->lid = 0;
640 	attr->lmc = 0;
641 	attr->sm_lid = 0;
642 	attr->sm_sl = 0;
643 	attr->port_cap_flags = 0;
644 
645 	if (QLNX_IS_IWARP(dev)) {
646 		attr->gid_tbl_len = 1;
647 		attr->pkey_tbl_len = 1;
648 	} else {
649 		attr->gid_tbl_len = QLNXR_MAX_SGID;
650 		attr->pkey_tbl_len = QLNXR_ROCE_PKEY_TABLE_LEN;
651 	}
652 
653 	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
654 	attr->qkey_viol_cntr = 0;
655 
656 	get_link_speed_and_width(rdma_port->link_speed,
657 				 &attr->active_speed, &attr->active_width);
658 
659 	attr->max_msg_sz = rdma_port->max_msg_size;
660 	attr->max_vl_num = 4; /* TODO -> figure this one out... */
661 
662 	QL_DPRINT12(ha, "state = %d phys_state = %d "
663 		" link_speed = %d active_speed = %d active_width = %d"
664 		" attr->gid_tbl_len = %d attr->pkey_tbl_len = %d"
665 		" max_msg_sz = 0x%x max_vl_num = 0x%x \n",
666 		attr->state, attr->phys_state,
667 		rdma_port->link_speed, attr->active_speed,
668 		attr->active_width, attr->gid_tbl_len, attr->pkey_tbl_len,
669 		attr->max_msg_sz, attr->max_vl_num);
670 
671 	QL_DPRINT12(ha, "exit\n");
672 	return 0;
673 }
674 
675 int
676 qlnxr_modify_port(struct ib_device *ibdev, uint8_t port, int mask,
677 	struct ib_port_modify *props)
678 {
679 	struct qlnxr_dev	*dev;
680 	qlnx_host_t		*ha;
681 
682 	dev = get_qlnxr_dev(ibdev);
683 	ha = dev->ha;
684 
685 	QL_DPRINT12(ha, "enter\n");
686 
687 	if (port > 1) {
688 		QL_DPRINT12(ha, "port (%d) > 1\n", port);
689 		return -EINVAL;
690 	}
691 
692 	QL_DPRINT12(ha, "exit\n");
693 	return 0;
694 }
695 
696 enum rdma_link_layer
697 qlnxr_link_layer(struct ib_device *ibdev, uint8_t port_num)
698 {
699 	struct qlnxr_dev	*dev;
700 	qlnx_host_t		*ha;
701 
702 	dev = get_qlnxr_dev(ibdev);
703 	ha = dev->ha;
704 
705 	QL_DPRINT12(ha, "ibdev = %p port_num = 0x%x\n", ibdev, port_num);
706 
707         return IB_LINK_LAYER_ETHERNET;
708 }
709 
710 int
711 qlnxr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
712 {
713 	struct ib_device *ibdev = ibpd->device;
714 	struct qlnxr_pd		*pd = get_qlnxr_pd(ibpd);
715 	u16			pd_id;
716 	int			rc;
717 	struct qlnxr_dev	*dev;
718 	qlnx_host_t		*ha;
719 
720 	dev = get_qlnxr_dev(ibdev);
721 	ha = dev->ha;
722 
723 	QL_DPRINT12(ha, "ibdev = %p udata = %p enter\n", ibdev, udata);
724 
725 	if (dev->rdma_ctx == NULL) {
726 		QL_DPRINT11(ha, "dev->rdma_ctx = NULL\n");
727 		rc = -1;
728 		goto err;
729 	}
730 
731 	rc = ecore_rdma_alloc_pd(dev->rdma_ctx, &pd_id);
732 	if (rc)	{
733 		QL_DPRINT11(ha, "ecore_rdma_alloc_pd failed\n");
734 		goto err;
735 	}
736 
737 	pd->pd_id = pd_id;
738 
739 	if (udata) {
740 		rc = ib_copy_to_udata(udata, &pd->pd_id, sizeof(pd->pd_id));
741 		if (rc) {
742 			QL_DPRINT11(ha, "ib_copy_to_udata failed\n");
743 			ecore_rdma_free_pd(dev->rdma_ctx, pd_id);
744 			goto err;
745 		}
746 
747 		pd->uctx = rdma_udata_to_drv_context(
748 		    udata, struct qlnxr_ucontext, ibucontext);
749 		pd->uctx->pd = pd;
750 	}
751 
752 	atomic_add_rel_32(&dev->pd_count, 1);
753 	QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
754 		pd, pd_id, dev->pd_count);
755 
756 	return (0);
757 
758 err:
759 	QL_DPRINT12(ha, "exit -1\n");
760 	return (rc);
761 }
762 
763 void
764 qlnxr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
765 {
766 	struct qlnxr_pd		*pd;
767 	struct qlnxr_dev	*dev;
768 	qlnx_host_t		*ha;
769 
770 	pd = get_qlnxr_pd(ibpd);
771 	dev = get_qlnxr_dev((ibpd->device));
772 	ha = dev->ha;
773 
774 	QL_DPRINT12(ha, "enter\n");
775 
776 	if (pd == NULL) {
777 		QL_DPRINT11(ha, "pd = NULL\n");
778 	} else {
779 		ecore_rdma_free_pd(dev->rdma_ctx, pd->pd_id);
780 		atomic_subtract_rel_32(&dev->pd_count, 1);
781 		QL_DPRINT12(ha, "exit [pd, pd_id, pd_count] = [%p, 0x%x, %d]\n",
782 			pd, pd->pd_id, dev->pd_count);
783 	}
784 
785 	QL_DPRINT12(ha, "exit\n");
786 }
787 
788 #define ROCE_WQE_ELEM_SIZE	sizeof(struct rdma_sq_sge)
789 #define	RDMA_MAX_SGE_PER_SRQ	(4) /* Should be part of HSI */
790 /* Should be part of HSI */
791 #define RDMA_MAX_SRQ_WQE_SIZE	(RDMA_MAX_SGE_PER_SRQ + 1) /* +1 for header */
792 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
793 
794 static void qlnxr_cleanup_user(struct qlnxr_dev *, struct qlnxr_qp *);
795 static void qlnxr_cleanup_kernel(struct qlnxr_dev *, struct qlnxr_qp *);
796 
797 int
798 qlnxr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
799 {
800 	struct qlnxr_dev	*dev;
801 	qlnx_host_t		*ha;
802 
803 	dev = get_qlnxr_dev(ibdev);
804 	ha = dev->ha;
805 
806 	QL_DPRINT12(ha, "enter index = 0x%x\n", index);
807 
808 	if (index > QLNXR_ROCE_PKEY_TABLE_LEN)
809 		return -EINVAL;
810 
811 	*pkey = QLNXR_ROCE_PKEY_DEFAULT;
812 
813 	QL_DPRINT12(ha, "exit\n");
814 	return 0;
815 }
816 
817 static inline bool
818 qlnxr_get_vlan_id_qp(qlnx_host_t *ha, struct ib_qp_attr *attr, int attr_mask,
819        u16 *vlan_id)
820 {
821 	bool ret = false;
822 
823 	QL_DPRINT12(ha, "enter \n");
824 
825 	*vlan_id = 0;
826 
827 #if __FreeBSD_version >= 1100000
828 	u16 tmp_vlan_id;
829 
830 #if __FreeBSD_version >= 1102000
831 	union ib_gid *dgid;
832 
833 	dgid = &attr->ah_attr.grh.dgid;
834 	tmp_vlan_id = (dgid->raw[11] << 8) | dgid->raw[12];
835 
836 	if (!(tmp_vlan_id & ~EVL_VLID_MASK)) {
837 		*vlan_id = tmp_vlan_id;
838 		ret = true;
839 	}
840 #else
841 	tmp_vlan_id = attr->vlan_id;
842 
843 	if ((attr_mask & IB_QP_VID) && (!(tmp_vlan_id & ~EVL_VLID_MASK))) {
844 		*vlan_id = tmp_vlan_id;
845 		ret = true;
846 	}
847 
848 #endif /* #if __FreeBSD_version > 1102000 */
849 
850 #else
851 	ret = true;
852 
853 #endif /* #if __FreeBSD_version >= 1100000 */
854 
855 	QL_DPRINT12(ha, "exit vlan_id = 0x%x ret = %d \n", *vlan_id, ret);
856 
857 	return (ret);
858 }
859 
860 static inline void
861 get_gid_info(struct ib_qp *ibqp, struct ib_qp_attr *attr,
862 	int attr_mask,
863 	struct qlnxr_dev *dev,
864 	struct qlnxr_qp *qp,
865 	struct ecore_rdma_modify_qp_in_params *qp_params)
866 {
867 	int		i;
868 	qlnx_host_t	*ha;
869 
870 	ha = dev->ha;
871 
872 	QL_DPRINT12(ha, "enter\n");
873 
874 	memcpy(&qp_params->sgid.bytes[0],
875 	       &dev->sgid_tbl[qp->sgid_idx].raw[0],
876 	       sizeof(qp_params->sgid.bytes));
877 	memcpy(&qp_params->dgid.bytes[0],
878 	       &attr->ah_attr.grh.dgid.raw[0],
879 	       sizeof(qp_params->dgid));
880 
881 	qlnxr_get_vlan_id_qp(ha, attr, attr_mask, &qp_params->vlan_id);
882 
883 	for (i = 0; i < (sizeof(qp_params->sgid.dwords)/sizeof(uint32_t)); i++) {
884 		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
885 		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
886 	}
887 
888 	QL_DPRINT12(ha, "exit\n");
889 	return;
890 }
891 
892 static int
893 qlnxr_add_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
894 {
895 	struct qlnxr_mm	*mm;
896 	qlnx_host_t	*ha;
897 
898 	ha = uctx->dev->ha;
899 
900 	QL_DPRINT12(ha, "enter\n");
901 
902 	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
903 	if (mm == NULL) {
904 		QL_DPRINT11(ha, "mm = NULL\n");
905 		return -ENOMEM;
906 	}
907 
908 	mm->key.phy_addr = phy_addr;
909 
910 	/* This function might be called with a length which is not a multiple
911 	 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
912 	 * forces this granularity by increasing the requested size if needed.
913 	 * When qedr_mmap is called, it will search the list with the updated
914 	 * length as a key. To prevent search failures, the length is rounded up
915 	 * in advance to PAGE_SIZE.
916 	 */
917 	mm->key.len = roundup(len, PAGE_SIZE);
918 	INIT_LIST_HEAD(&mm->entry);
919 
920 	mutex_lock(&uctx->mm_list_lock);
921 	list_add(&mm->entry, &uctx->mm_head);
922 	mutex_unlock(&uctx->mm_list_lock);
923 
924 	QL_DPRINT12(ha, "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
925 		(unsigned long long)mm->key.phy_addr,
926 		(unsigned long)mm->key.len, uctx);
927 
928 	return 0;
929 }
930 
931 static bool
932 qlnxr_search_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
933 {
934 	bool		found = false;
935 	struct qlnxr_mm	*mm;
936 	qlnx_host_t	*ha;
937 
938 	ha = uctx->dev->ha;
939 
940 	QL_DPRINT12(ha, "enter\n");
941 
942 	mutex_lock(&uctx->mm_list_lock);
943 	list_for_each_entry(mm, &uctx->mm_head, entry) {
944 		if (len != mm->key.len || phy_addr != mm->key.phy_addr)
945 			continue;
946 
947 		found = true;
948 		break;
949 	}
950 	mutex_unlock(&uctx->mm_list_lock);
951 
952 	QL_DPRINT12(ha,
953 		"searched for (addr=0x%llx,len=0x%lx) for ctx=%p, found=%d\n",
954 		mm->key.phy_addr, mm->key.len, uctx, found);
955 
956 	return found;
957 }
958 
959 int
960 qlnxr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
961 {
962         int rc;
963         struct qlnxr_ucontext *ctx = get_qlnxr_ucontext(uctx);
964         struct qlnxr_alloc_ucontext_resp uresp;
965         struct qlnxr_dev *dev = get_qlnxr_dev(uctx->device);
966         qlnx_host_t *ha = dev->ha;
967         struct ecore_rdma_add_user_out_params oparams;
968 
969         if (!udata)
970                 return -EFAULT;
971 
972 	rc = ecore_rdma_add_user(dev->rdma_ctx, &oparams);
973 	if (rc) {
974 		QL_DPRINT12(ha,
975 			"Failed to allocate a DPI for a new RoCE application "
976 			",rc = %d. To overcome this, consider to increase "
977 			"the number of DPIs, increase the doorbell BAR size "
978 			"or just close unnecessary RoCE applications. In "
979 			"order to increase the number of DPIs consult the "
980 			"README\n", rc);
981 		goto err;
982 	}
983 
984 	ctx->dpi = oparams.dpi;
985 	ctx->dpi_addr = oparams.dpi_addr;
986 	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
987 	ctx->dpi_size = oparams.dpi_size;
988 	INIT_LIST_HEAD(&ctx->mm_head);
989 	mutex_init(&ctx->mm_list_lock);
990 
991 	memset(&uresp, 0, sizeof(uresp));
992 	uresp.dpm_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, dpm_enabled)
993 				< udata->outlen ? dev->user_dpm_enabled : 0; //TODO: figure this out
994 	uresp.wids_enabled = offsetof(struct qlnxr_alloc_ucontext_resp, wids_enabled)
995 				< udata->outlen ? 1 : 0; //TODO: figure this out
996 	uresp.wid_count = offsetof(struct qlnxr_alloc_ucontext_resp, wid_count)
997 				< udata->outlen ? oparams.wid_count : 0; //TODO: figure this out
998         uresp.db_pa = ctx->dpi_phys_addr;
999         uresp.db_size = ctx->dpi_size;
1000         uresp.max_send_wr = dev->attr.max_sqe;
1001         uresp.max_recv_wr = dev->attr.max_rqe;
1002         uresp.max_srq_wr = dev->attr.max_srq_wr;
1003         uresp.sges_per_send_wr = QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
1004         uresp.sges_per_recv_wr = QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
1005         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
1006         uresp.max_cqes = QLNXR_MAX_CQES;
1007 
1008 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1009 	if (rc)
1010 		goto err;
1011 
1012 	ctx->dev = dev;
1013 
1014 	rc = qlnxr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
1015 	if (rc)
1016 		goto err;
1017 	QL_DPRINT12(ha, "Allocated user context %p\n",
1018 		&ctx->ibucontext);
1019 
1020 	return (0);
1021 err:
1022 	return (rc);
1023 }
1024 
1025 void
1026 qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx)
1027 {
1028         struct qlnxr_ucontext *uctx = get_qlnxr_ucontext(ibctx);
1029         struct qlnxr_dev *dev = uctx->dev;
1030         qlnx_host_t *ha = dev->ha;
1031         struct qlnxr_mm *mm, *tmp;
1032 
1033         QL_DPRINT12(ha, "Deallocating user context %p\n",
1034                         uctx);
1035 
1036         if (dev) {
1037                 ecore_rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
1038         }
1039 
1040         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
1041                 QL_DPRINT12(ha, "deleted addr= 0x%llx, len = 0x%lx for"
1042                                 " ctx=%p\n",
1043                                 mm->key.phy_addr, mm->key.len, uctx);
1044                 list_del(&mm->entry);
1045                 kfree(mm);
1046         }
1047 }
1048 
1049 int
1050 qlnxr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1051 {
1052 	struct qlnxr_ucontext	*ucontext = get_qlnxr_ucontext(context);
1053 	struct qlnxr_dev	*dev = get_qlnxr_dev((context->device));
1054 	unsigned long		vm_page = vma->vm_pgoff << PAGE_SHIFT;
1055 	u64 			unmapped_db;
1056 	unsigned long 		len = (vma->vm_end - vma->vm_start);
1057 	int 			rc = 0;
1058 	bool 			found;
1059 	qlnx_host_t		*ha;
1060 
1061 	ha = dev->ha;
1062 
1063 #if __FreeBSD_version > 1102000
1064 	unmapped_db = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
1065 #else
1066 	unmapped_db = dev->db_phys_addr;
1067 #endif /* #if __FreeBSD_version > 1102000 */
1068 
1069 	QL_DPRINT12(ha, "qedr_mmap enter vm_page=0x%lx"
1070 		" vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
1071 		vm_page, vma->vm_pgoff, unmapped_db,
1072 		dev->db_size, len);
1073 
1074 	if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
1075 		QL_DPRINT11(ha, "Vma_start not page aligned "
1076 			"vm_start = %ld vma_end = %ld\n", vma->vm_start,
1077 			vma->vm_end);
1078 		return -EINVAL;
1079 	}
1080 
1081 	found = qlnxr_search_mmap(ucontext, vm_page, len);
1082 	if (!found) {
1083 		QL_DPRINT11(ha, "Vma_pgoff not found in mapped array = %ld\n",
1084 			vma->vm_pgoff);
1085 		return -EINVAL;
1086 	}
1087 
1088 	QL_DPRINT12(ha, "Mapping doorbell bar\n");
1089 
1090 #if __FreeBSD_version > 1102000
1091 
1092 	if ((vm_page < unmapped_db) ||
1093 		((vm_page + len) > (unmapped_db + ucontext->dpi_size))) {
1094 		QL_DPRINT11(ha, "failed pages are outside of dpi;"
1095 			"page address=0x%lx, unmapped_db=0x%lx, dpi_size=0x%x\n",
1096 			vm_page, unmapped_db, ucontext->dpi_size);
1097 		return -EINVAL;
1098 	}
1099 
1100 	if (vma->vm_flags & VM_READ) {
1101 		QL_DPRINT11(ha, "failed mmap, cannot map doorbell bar for read\n");
1102 		return -EINVAL;
1103 	}
1104 
1105 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1106 	rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
1107 			vma->vm_page_prot);
1108 
1109 #else
1110 
1111 	if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
1112 		dev->db_size))) {
1113 		QL_DPRINT12(ha, "Mapping doorbell bar\n");
1114 
1115 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1116 
1117 		rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1118 					    PAGE_SIZE, vma->vm_page_prot);
1119 	} else {
1120 		QL_DPRINT12(ha, "Mapping chains\n");
1121 		rc = io_remap_pfn_range(vma, vma->vm_start,
1122 					 vma->vm_pgoff, len, vma->vm_page_prot);
1123 	}
1124 
1125 #endif /* #if __FreeBSD_version > 1102000 */
1126 
1127 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1128 	return rc;
1129 }
1130 
1131 struct ib_mr *
1132 qlnxr_get_dma_mr(struct ib_pd *ibpd, int acc)
1133 {
1134 	struct qlnxr_mr		*mr;
1135 	struct qlnxr_dev	*dev = get_qlnxr_dev((ibpd->device));
1136 	struct qlnxr_pd		*pd = get_qlnxr_pd(ibpd);
1137 	int			rc;
1138 	qlnx_host_t		*ha;
1139 
1140 	ha = dev->ha;
1141 
1142 	QL_DPRINT12(ha, "enter\n");
1143 
1144 	if (acc & IB_ACCESS_MW_BIND) {
1145 		QL_DPRINT12(ha, "Unsupported access flags received for dma mr\n");
1146 	}
1147 
1148 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1149 	if (!mr) {
1150 		rc = -ENOMEM;
1151 		QL_DPRINT12(ha, "kzalloc(mr) failed %d\n", rc);
1152 		goto err0;
1153 	}
1154 
1155 	mr->type = QLNXR_MR_DMA;
1156 
1157 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1158 	if (rc) {
1159 		QL_DPRINT12(ha, "ecore_rdma_alloc_tid failed %d\n", rc);
1160 		goto err1;
1161 	}
1162 
1163 	/* index only, 18 bit long, lkey = itid << 8 | key */
1164 	mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1165 	mr->hw_mr.pd = pd->pd_id;
1166 	mr->hw_mr.local_read = 1;
1167 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1168 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1169 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1170 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1171 	mr->hw_mr.dma_mr = true;
1172 
1173 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1174 	if (rc) {
1175 		QL_DPRINT12(ha, "ecore_rdma_register_tid failed %d\n", rc);
1176 		goto err2;
1177 	}
1178 
1179 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1180 
1181 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1182 		mr->hw_mr.remote_atomic) {
1183 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1184 	}
1185 
1186 	QL_DPRINT12(ha, "lkey = %x\n", mr->ibmr.lkey);
1187 
1188 	return &mr->ibmr;
1189 
1190 err2:
1191 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1192 err1:
1193 	kfree(mr);
1194 err0:
1195 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1196 
1197 	return ERR_PTR(rc);
1198 }
1199 
1200 static void
1201 qlnxr_free_pbl(struct qlnxr_dev *dev, struct qlnxr_pbl_info *pbl_info,
1202 	struct qlnxr_pbl *pbl)
1203 {
1204 	int		i;
1205 	qlnx_host_t	*ha;
1206 
1207 	ha = dev->ha;
1208 
1209 	QL_DPRINT12(ha, "enter\n");
1210 
1211 	for (i = 0; i < pbl_info->num_pbls; i++) {
1212 		if (!pbl[i].va)
1213 			continue;
1214 		qlnx_dma_free_coherent(&dev->ha->cdev, pbl[i].va, pbl[i].pa,
1215 			pbl_info->pbl_size);
1216 	}
1217 	kfree(pbl);
1218 
1219 	QL_DPRINT12(ha, "exit\n");
1220 	return;
1221 }
1222 
1223 #define MIN_FW_PBL_PAGE_SIZE (4*1024)
1224 #define MAX_FW_PBL_PAGE_SIZE (64*1024)
1225 
1226 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
1227 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
1228 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE*MAX_PBES_ON_PAGE)
1229 
1230 static struct qlnxr_pbl *
1231 qlnxr_alloc_pbl_tbl(struct qlnxr_dev *dev,
1232 	struct qlnxr_pbl_info *pbl_info, gfp_t flags)
1233 {
1234 	void			*va;
1235 	dma_addr_t		pa;
1236 	dma_addr_t		*pbl_main_tbl;
1237 	struct qlnxr_pbl	*pbl_table;
1238 	int			i;
1239 	qlnx_host_t		*ha;
1240 
1241 	ha = dev->ha;
1242 
1243 	QL_DPRINT12(ha, "enter\n");
1244 
1245 	pbl_table = kzalloc(sizeof(*pbl_table) * pbl_info->num_pbls, flags);
1246 
1247 	if (!pbl_table) {
1248 		QL_DPRINT12(ha, "pbl_table = NULL\n");
1249 		return NULL;
1250 	}
1251 
1252 	for (i = 0; i < pbl_info->num_pbls; i++) {
1253 		va = qlnx_dma_alloc_coherent(&dev->ha->cdev, &pa, pbl_info->pbl_size);
1254 		if (!va) {
1255 			QL_DPRINT11(ha, "Failed to allocate pbl#%d\n", i);
1256 			goto err;
1257 		}
1258 		memset(va, 0, pbl_info->pbl_size);
1259 		pbl_table[i].va = va;
1260 		pbl_table[i].pa = pa;
1261 	}
1262 
1263 	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
1264 	 * the first one with physical pointers to all of the rest
1265 	 */
1266 	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
1267 	for (i = 0; i < pbl_info->num_pbls - 1; i++)
1268 		pbl_main_tbl[i] = pbl_table[i + 1].pa;
1269 
1270 	QL_DPRINT12(ha, "exit\n");
1271 	return pbl_table;
1272 
1273 err:
1274 	qlnxr_free_pbl(dev, pbl_info, pbl_table);
1275 
1276 	QL_DPRINT12(ha, "exit with error\n");
1277 	return NULL;
1278 }
1279 
1280 static int
1281 qlnxr_prepare_pbl_tbl(struct qlnxr_dev *dev,
1282 	struct qlnxr_pbl_info *pbl_info,
1283 	u32 num_pbes,
1284 	int two_layer_capable)
1285 {
1286 	u32		pbl_capacity;
1287 	u32		pbl_size;
1288 	u32		num_pbls;
1289 	qlnx_host_t	*ha;
1290 
1291 	ha = dev->ha;
1292 
1293 	QL_DPRINT12(ha, "enter\n");
1294 
1295 	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
1296 		if (num_pbes > MAX_PBES_TWO_LAYER) {
1297 			QL_DPRINT11(ha, "prepare pbl table: too many pages %d\n",
1298 				num_pbes);
1299 			return -EINVAL;
1300 		}
1301 
1302 		/* calculate required pbl page size */
1303 		pbl_size = MIN_FW_PBL_PAGE_SIZE;
1304 		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
1305 			NUM_PBES_ON_PAGE(pbl_size);
1306 
1307 		while (pbl_capacity < num_pbes) {
1308 			pbl_size *= 2;
1309 			pbl_capacity = pbl_size / sizeof(u64);
1310 			pbl_capacity = pbl_capacity * pbl_capacity;
1311 		}
1312 
1313 		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
1314 		num_pbls++; /* One for the layer0 ( points to the pbls) */
1315 		pbl_info->two_layered = true;
1316 	} else {
1317 		/* One layered PBL */
1318 		num_pbls = 1;
1319 		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE, \
1320 				roundup_pow_of_two((num_pbes * sizeof(u64))));
1321 		pbl_info->two_layered = false;
1322 	}
1323 
1324 	pbl_info->num_pbls = num_pbls;
1325 	pbl_info->pbl_size = pbl_size;
1326 	pbl_info->num_pbes = num_pbes;
1327 
1328 	QL_DPRINT12(ha, "prepare pbl table: num_pbes=%d, num_pbls=%d pbl_size=%d\n",
1329 		pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
1330 
1331 	return 0;
1332 }
1333 
1334 static void
1335 qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
1336 	struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
1337 {
1338 	struct regpair		*pbe;
1339 	struct qlnxr_pbl	*pbl_tbl;
1340 	struct scatterlist	*sg;
1341 	int			shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
1342 	qlnx_host_t		*ha;
1343 
1344 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1345         int                     i;
1346         struct                  ib_umem_chunk *chunk = NULL;
1347 #else
1348         int                     entry;
1349 #endif
1350 
1351 	ha = dev->ha;
1352 
1353 	QL_DPRINT12(ha, "enter\n");
1354 
1355 	if (!pbl_info) {
1356 		QL_DPRINT11(ha, "PBL_INFO not initialized\n");
1357 		return;
1358 	}
1359 
1360 	if (!pbl_info->num_pbes) {
1361 		QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
1362 		return;
1363 	}
1364 
1365 	/* If we have a two layered pbl, the first pbl points to the rest
1366 	 * of the pbls and the first entry lays on the second pbl in the table
1367 	 */
1368 	if (pbl_info->two_layered)
1369 		pbl_tbl = &pbl[1];
1370 	else
1371 		pbl_tbl = pbl;
1372 
1373 	pbe = (struct regpair *)pbl_tbl->va;
1374 	if (!pbe) {
1375 		QL_DPRINT12(ha, "pbe is NULL\n");
1376 		return;
1377 	}
1378 
1379 	pbe_cnt = 0;
1380 
1381 	shift = ilog2(umem->page_size);
1382 
1383 #ifndef DEFINE_IB_UMEM_WITH_CHUNK
1384 
1385 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
1386 #else
1387 	list_for_each_entry(chunk, &umem->chunk_list, list) {
1388 		/* get all the dma regions from the chunk. */
1389 		for (i = 0; i < chunk->nmap; i++) {
1390 			sg = &chunk->page_list[i];
1391 #endif
1392 			pages = sg_dma_len(sg) >> shift;
1393 			for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
1394 				/* store the page address in pbe */
1395 				pbe->lo =
1396 				    cpu_to_le32(sg_dma_address(sg) +
1397 						(umem->page_size * pg_cnt));
1398 				pbe->hi =
1399 				    cpu_to_le32(upper_32_bits
1400 						((sg_dma_address(sg) +
1401 						  umem->page_size * pg_cnt)));
1402 
1403 				QL_DPRINT12(ha,
1404 					"Populate pbl table:"
1405 					" pbe->addr=0x%x:0x%x "
1406 					" pbe_cnt = %d total_num_pbes=%d"
1407 					" pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
1408 					total_num_pbes, pbe);
1409 
1410 				pbe_cnt ++;
1411 				total_num_pbes ++;
1412 				pbe++;
1413 
1414 				if (total_num_pbes == pbl_info->num_pbes)
1415 					return;
1416 
1417 				/* if the given pbl is full storing the pbes,
1418 				 * move to next pbl.
1419 				 */
1420 				if (pbe_cnt ==
1421 					(pbl_info->pbl_size / sizeof(u64))) {
1422 					pbl_tbl++;
1423 					pbe = (struct regpair *)pbl_tbl->va;
1424 					pbe_cnt = 0;
1425 				}
1426 			}
1427 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
1428 		}
1429 #endif
1430 	}
1431 	QL_DPRINT12(ha, "exit\n");
1432 	return;
1433 }
1434 
1435 static void
1436 free_mr_info(struct qlnxr_dev *dev, struct mr_info *info)
1437 {
1438 	struct qlnxr_pbl *pbl, *tmp;
1439 	qlnx_host_t		*ha;
1440 
1441 	ha = dev->ha;
1442 
1443 	QL_DPRINT12(ha, "enter\n");
1444 
1445 	if (info->pbl_table)
1446 		list_add_tail(&info->pbl_table->list_entry,
1447 			      &info->free_pbl_list);
1448 
1449 	if (!list_empty(&info->inuse_pbl_list))
1450 		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
1451 
1452 	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
1453 		list_del(&pbl->list_entry);
1454 		qlnxr_free_pbl(dev, &info->pbl_info, pbl);
1455 	}
1456 	QL_DPRINT12(ha, "exit\n");
1457 
1458 	return;
1459 }
1460 
1461 static int
1462 qlnxr_init_mr_info(struct qlnxr_dev *dev, struct mr_info *info,
1463 	size_t page_list_len, bool two_layered)
1464 {
1465 	int			rc;
1466 	struct qlnxr_pbl	*tmp;
1467 	qlnx_host_t		*ha;
1468 
1469 	ha = dev->ha;
1470 
1471 	QL_DPRINT12(ha, "enter\n");
1472 
1473 	INIT_LIST_HEAD(&info->free_pbl_list);
1474 	INIT_LIST_HEAD(&info->inuse_pbl_list);
1475 
1476 	rc = qlnxr_prepare_pbl_tbl(dev, &info->pbl_info,
1477 				  page_list_len, two_layered);
1478 	if (rc) {
1479 		QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl [%d]\n", rc);
1480 		goto done;
1481 	}
1482 
1483 	info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1484 
1485 	if (!info->pbl_table) {
1486 		rc = -ENOMEM;
1487 		QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl returned NULL\n");
1488 		goto done;
1489 	}
1490 
1491 	QL_DPRINT12(ha, "pbl_table_pa = %pa\n", &info->pbl_table->pa);
1492 
1493 	/* in usual case we use 2 PBLs, so we add one to free
1494 	 * list and allocating another one
1495 	 */
1496 	tmp = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
1497 
1498 	if (!tmp) {
1499 		QL_DPRINT11(ha, "Extra PBL is not allocated\n");
1500 		goto done; /* it's OK if second allocation fails, so rc = 0*/
1501 	}
1502 
1503 	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
1504 
1505 	QL_DPRINT12(ha, "extra pbl_table_pa = %pa\n", &tmp->pa);
1506 
1507 done:
1508 	if (rc)
1509 		free_mr_info(dev, info);
1510 
1511 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1512 
1513 	return rc;
1514 }
1515 
1516 struct ib_mr *
1517 #if __FreeBSD_version >= 1102000
1518 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1519 	u64 usr_addr, int acc, struct ib_udata *udata)
1520 #else
1521 qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
1522 	u64 usr_addr, int acc, struct ib_udata *udata, int mr_id)
1523 #endif /* #if __FreeBSD_version >= 1102000 */
1524 {
1525 	int		rc = -ENOMEM;
1526 	struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
1527 	struct qlnxr_mr *mr;
1528 	struct qlnxr_pd *pd;
1529 	qlnx_host_t	*ha;
1530 
1531 	ha = dev->ha;
1532 
1533 	QL_DPRINT12(ha, "enter\n");
1534 
1535 	pd = get_qlnxr_pd(ibpd);
1536 
1537 	QL_DPRINT12(ha, "qedr_register user mr pd = %d"
1538 		" start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
1539 		pd->pd_id, start, len, usr_addr, acc);
1540 
1541 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
1542 		QL_DPRINT11(ha,
1543 			"(acc & IB_ACCESS_REMOTE_WRITE &&"
1544 			" !(acc & IB_ACCESS_LOCAL_WRITE))\n");
1545 		return ERR_PTR(-EINVAL);
1546 	}
1547 
1548 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1549 	if (!mr) {
1550 		QL_DPRINT11(ha, "kzalloc(mr) failed\n");
1551 		return ERR_PTR(rc);
1552 	}
1553 
1554 	mr->type = QLNXR_MR_USER;
1555 
1556 	mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
1557 	if (IS_ERR(mr->umem)) {
1558 		rc = -EFAULT;
1559 		QL_DPRINT11(ha, "ib_umem_get failed [%p]\n", mr->umem);
1560 		goto err0;
1561 	}
1562 
1563 	rc = qlnxr_init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
1564 	if (rc) {
1565 		QL_DPRINT11(ha,
1566 			"qlnxr_init_mr_info failed [%d]\n", rc);
1567 		goto err1;
1568 	}
1569 
1570 	qlnxr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
1571 			   &mr->info.pbl_info);
1572 
1573 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
1574 
1575 	if (rc) {
1576 		QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
1577 		goto err1;
1578 	}
1579 
1580 	/* index only, 18 bit long, lkey = itid << 8 | key */
1581 	mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
1582 	mr->hw_mr.key = 0;
1583 	mr->hw_mr.pd = pd->pd_id;
1584 	mr->hw_mr.local_read = 1;
1585 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
1586 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
1587 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1588 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
1589 	mr->hw_mr.mw_bind = false; /* TBD MW BIND */
1590 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
1591 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
1592 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
1593 	mr->hw_mr.page_size_log = ilog2(mr->umem->page_size); /* for the MR pages */
1594 
1595 #if __FreeBSD_version >= 1102000
1596 	mr->hw_mr.fbo = ib_umem_offset(mr->umem);
1597 #else
1598 	mr->hw_mr.fbo = mr->umem->offset;
1599 #endif
1600 	mr->hw_mr.length = len;
1601 	mr->hw_mr.vaddr = usr_addr;
1602 	mr->hw_mr.zbva = false; /* TBD figure when this should be true */
1603 	mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
1604 	mr->hw_mr.dma_mr = false;
1605 
1606 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
1607 	if (rc) {
1608 		QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
1609 		goto err2;
1610 	}
1611 
1612 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1613 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
1614 		mr->hw_mr.remote_atomic)
1615 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
1616 
1617 	QL_DPRINT12(ha, "register user mr lkey: %x\n", mr->ibmr.lkey);
1618 
1619 	return (&mr->ibmr);
1620 
1621 err2:
1622 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
1623 err1:
1624 	qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1625 err0:
1626 	kfree(mr);
1627 
1628 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1629 	return (ERR_PTR(rc));
1630 }
1631 
1632 int
1633 qlnxr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1634 {
1635 	struct qlnxr_mr	*mr = get_qlnxr_mr(ib_mr);
1636 	struct qlnxr_dev *dev = get_qlnxr_dev((ib_mr->device));
1637 	int		rc = 0;
1638 	qlnx_host_t	*ha;
1639 
1640 	ha = dev->ha;
1641 
1642 	QL_DPRINT12(ha, "enter\n");
1643 
1644 	if ((mr->type != QLNXR_MR_DMA) && (mr->type != QLNXR_MR_FRMR))
1645 		qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
1646 
1647 	/* it could be user registered memory. */
1648 	if (mr->umem)
1649 		ib_umem_release(mr->umem);
1650 
1651 	kfree(mr->pages);
1652 
1653 	kfree(mr);
1654 
1655 	QL_DPRINT12(ha, "exit\n");
1656 	return rc;
1657 }
1658 
1659 static int
1660 qlnxr_copy_cq_uresp(struct qlnxr_dev *dev,
1661 	struct qlnxr_cq *cq, struct ib_udata *udata)
1662 {
1663 	struct qlnxr_create_cq_uresp	uresp;
1664 	int				rc;
1665 	qlnx_host_t			*ha;
1666 
1667 	ha = dev->ha;
1668 
1669 	QL_DPRINT12(ha, "enter\n");
1670 
1671 	memset(&uresp, 0, sizeof(uresp));
1672 
1673 	uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1674 	uresp.icid = cq->icid;
1675 
1676 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1677 
1678 	if (rc) {
1679 		QL_DPRINT12(ha, "ib_copy_to_udata error cqid=0x%x[%d]\n",
1680 			cq->icid, rc);
1681 	}
1682 
1683 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1684 	return rc;
1685 }
1686 
1687 static void
1688 consume_cqe(struct qlnxr_cq *cq)
1689 {
1690 
1691 	if (cq->latest_cqe == cq->toggle_cqe)
1692 		cq->pbl_toggle ^= RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1693 
1694 	cq->latest_cqe = ecore_chain_consume(&cq->pbl);
1695 }
1696 
1697 static inline int
1698 qlnxr_align_cq_entries(int entries)
1699 {
1700 	u64 size, aligned_size;
1701 
1702 	/* We allocate an extra entry that we don't report to the FW.
1703 	 * Why?
1704 	 * The CQE size is 32 bytes but the FW writes in chunks of 64 bytes
1705 	 * (for performance purposes). Allocating an extra entry and telling
1706 	 * the FW we have less prevents overwriting the first entry in case of
1707 	 * a wrap i.e. when the FW writes the last entry and the application
1708 	 * hasn't read the first one.
1709 	 */
1710 	size = (entries + 1) * QLNXR_CQE_SIZE;
1711 
1712 	/* We align to PAGE_SIZE.
1713 	 * Why?
1714 	 * Since the CQ is going to be mapped and the mapping is anyhow in whole
1715 	 * kernel pages we benefit from the possibly extra CQEs.
1716 	 */
1717 	aligned_size = ALIGN(size, PAGE_SIZE);
1718 
1719 	/* note: for CQs created in user space the result of this function
1720 	 * should match the size mapped in user space
1721 	 */
1722 	return (aligned_size / QLNXR_CQE_SIZE);
1723 }
1724 
1725 static inline int
1726 qlnxr_init_user_queue(struct ib_ucontext *ib_ctx, struct qlnxr_dev *dev,
1727 	struct qlnxr_userq *q, u64 buf_addr, size_t buf_len,
1728 	int access, int dmasync, int alloc_and_init)
1729 {
1730 	int		page_cnt;
1731 	int		rc;
1732 	qlnx_host_t	*ha;
1733 
1734 	ha = dev->ha;
1735 
1736 	QL_DPRINT12(ha, "enter\n");
1737 
1738 	q->buf_addr = buf_addr;
1739 	q->buf_len = buf_len;
1740 
1741 	QL_DPRINT12(ha, "buf_addr : %llx, buf_len : %x, access : %x"
1742 	      " dmasync : %x\n", q->buf_addr, q->buf_len,
1743 		access, dmasync);
1744 
1745 	q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
1746 
1747 	if (IS_ERR(q->umem)) {
1748 		QL_DPRINT11(ha, "ib_umem_get failed [%lx]\n", PTR_ERR(q->umem));
1749 		return PTR_ERR(q->umem);
1750 	}
1751 
1752 	page_cnt = ib_umem_page_count(q->umem);
1753 	rc = qlnxr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt,
1754 				  0 /* SQ and RQ don't support dual layer pbl.
1755 				     * CQ may, but this is yet uncoded.
1756 				     */);
1757 	if (rc) {
1758 		QL_DPRINT11(ha, "qlnxr_prepare_pbl_tbl failed [%d]\n", rc);
1759 		goto err;
1760 	}
1761 
1762 	if (alloc_and_init) {
1763 		q->pbl_tbl = qlnxr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
1764 
1765 		if (!q->pbl_tbl) {
1766 			QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1767 			rc = -ENOMEM;
1768 			goto err;
1769 		}
1770 
1771 		qlnxr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
1772 	} else {
1773 		q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
1774 
1775 		if (!q->pbl_tbl) {
1776 			QL_DPRINT11(ha, "qlnxr_alloc_pbl_tbl failed\n");
1777 			rc = -ENOMEM;
1778 			goto err;
1779 		}
1780 	}
1781 
1782 	QL_DPRINT12(ha, "exit\n");
1783 	return 0;
1784 
1785 err:
1786 	ib_umem_release(q->umem);
1787 	q->umem = NULL;
1788 
1789 	QL_DPRINT12(ha, "exit [%d]\n", rc);
1790 	return rc;
1791 }
1792 
1793 int
1794 qlnxr_create_cq(struct ib_cq *ibcq,
1795 		const struct ib_cq_init_attr *attr,
1796 		struct ib_udata *udata)
1797 {
1798 	struct qlnxr_ucontext			*ctx;
1799 	struct ecore_rdma_destroy_cq_out_params destroy_oparams;
1800 	struct ecore_rdma_destroy_cq_in_params	destroy_iparams;
1801 	struct qlnxr_dev			*dev;
1802 	struct ecore_rdma_create_cq_in_params	params;
1803 	struct qlnxr_create_cq_ureq		ureq;
1804 
1805 #if __FreeBSD_version >= 1100000
1806 	int					vector = attr->comp_vector;
1807 	int					entries = attr->cqe;
1808 #endif
1809 	struct qlnxr_cq				*cq = get_qlnxr_cq(ibcq);
1810 	int					chain_entries, rc, page_cnt;
1811 	u64					pbl_ptr;
1812 	u16					icid;
1813 	qlnx_host_t				*ha;
1814 
1815 	dev = get_qlnxr_dev(ibcq->device);
1816 	ha = dev->ha;
1817 
1818 	QL_DPRINT12(ha, "called from %s. entries = %d, "
1819 		"vector = %d\n",
1820 		(udata ? "User Lib" : "Kernel"), entries, vector);
1821 
1822         memset(&params, 0, sizeof(struct ecore_rdma_create_cq_in_params));
1823         memset(&destroy_iparams, 0, sizeof(struct ecore_rdma_destroy_cq_in_params));
1824         memset(&destroy_oparams, 0, sizeof(struct ecore_rdma_destroy_cq_out_params));
1825 
1826 	if (entries > QLNXR_MAX_CQES) {
1827 		QL_DPRINT11(ha,
1828 			"the number of entries %d is too high. "
1829 			"Must be equal or below %d.\n",
1830 			entries, QLNXR_MAX_CQES);
1831 		return -EINVAL;
1832 	}
1833 	chain_entries = qlnxr_align_cq_entries(entries);
1834 	chain_entries = min_t(int, chain_entries, QLNXR_MAX_CQES);
1835 
1836 	if (udata) {
1837 		ctx = rdma_udata_to_drv_context(
1838 		    udata, struct qlnxr_ucontext, ibucontext);
1839 
1840 		memset(&ureq, 0, sizeof(ureq));
1841 
1842 		if (ib_copy_from_udata(&ureq, udata,
1843 			min(sizeof(ureq), udata->inlen))) {
1844 			QL_DPRINT11(ha, "ib_copy_from_udata failed\n");
1845 			goto err0;
1846 		}
1847 
1848 		if (!ureq.len) {
1849 			QL_DPRINT11(ha, "ureq.len == 0\n");
1850 			goto err0;
1851 		}
1852 
1853 		cq->cq_type = QLNXR_CQ_TYPE_USER;
1854 
1855 		qlnxr_init_user_queue(&ctx->ibucontext, dev, &cq->q, ureq.addr, ureq.len,
1856 				     IB_ACCESS_LOCAL_WRITE, 1, 1);
1857 
1858 		pbl_ptr = cq->q.pbl_tbl->pa;
1859 		page_cnt = cq->q.pbl_info.num_pbes;
1860 		cq->ibcq.cqe = chain_entries;
1861 	} else {
1862 		ctx = NULL;
1863 
1864 		cq->cq_type = QLNXR_CQ_TYPE_KERNEL;
1865 
1866                 rc = ecore_chain_alloc(&dev->ha->cdev,
1867                            ECORE_CHAIN_USE_TO_CONSUME,
1868                            ECORE_CHAIN_MODE_PBL,
1869                            ECORE_CHAIN_CNT_TYPE_U32,
1870                            chain_entries,
1871                            sizeof(union roce_cqe),
1872                            &cq->pbl, NULL);
1873 
1874 		if (rc)
1875 			goto err1;
1876 
1877 		page_cnt = ecore_chain_get_page_cnt(&cq->pbl);
1878 		pbl_ptr = ecore_chain_get_pbl_phys(&cq->pbl);
1879 		cq->ibcq.cqe = cq->pbl.capacity;
1880 	}
1881 
1882         params.cq_handle_hi = upper_32_bits((uintptr_t)cq);
1883         params.cq_handle_lo = lower_32_bits((uintptr_t)cq);
1884         params.cnq_id = vector;
1885         params.cq_size = chain_entries - 1;
1886         params.pbl_num_pages = page_cnt;
1887         params.pbl_ptr = pbl_ptr;
1888         params.pbl_two_level = 0;
1889 
1890 	if (udata) {
1891         	params.dpi = ctx->dpi;
1892 	} else {
1893         	params.dpi = dev->dpi;
1894 	}
1895 
1896 	rc = ecore_rdma_create_cq(dev->rdma_ctx, &params, &icid);
1897 	if (rc)
1898 		goto err2;
1899 
1900 	cq->icid = icid;
1901 	cq->sig = QLNXR_CQ_MAGIC_NUMBER;
1902 	spin_lock_init(&cq->cq_lock);
1903 
1904 	if (udata) {
1905 		rc = qlnxr_copy_cq_uresp(dev, cq, udata);
1906 		if (rc)
1907 			goto err3;
1908 	} else {
1909 		/* Generate doorbell address.
1910 		 * Configure bits 3-9 with DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT.
1911 		 * TODO: consider moving to device scope as it is a function of
1912 		 *       the device.
1913 		 * TODO: add ifdef if plan to support 16 bit.
1914 		 */
1915 		cq->db_addr = dev->db_addr +
1916 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
1917 		cq->db.data.icid = cq->icid;
1918 		cq->db.data.params = DB_AGG_CMD_SET <<
1919 				     RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1920 
1921 		/* point to the very last element, passing it we will toggle */
1922 		cq->toggle_cqe = ecore_chain_get_last_elem(&cq->pbl);
1923 		cq->pbl_toggle = RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK;
1924 
1925 		/* must be different from pbl_toggle */
1926 		cq->latest_cqe = NULL;
1927 		consume_cqe(cq);
1928 		cq->cq_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
1929 	}
1930 
1931 	QL_DPRINT12(ha, "exit icid = 0x%0x, addr = %p,"
1932 		" number of entries = 0x%x\n",
1933 		cq->icid, cq, params.cq_size);
1934 	QL_DPRINT12(ha,"cq_addr = %p\n", cq);
1935 	return (0);
1936 
1937 err3:
1938 	destroy_iparams.icid = cq->icid;
1939 	ecore_rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams, &destroy_oparams);
1940 err2:
1941 	if (udata)
1942 		qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1943 	else
1944 		ecore_chain_free(&dev->ha->cdev, &cq->pbl);
1945 err1:
1946 	if (udata)
1947 		ib_umem_release(cq->q.umem);
1948 err0:
1949 	QL_DPRINT12(ha, "exit error\n");
1950 
1951 	return (-EINVAL);
1952 }
1953 
1954 int qlnxr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1955 {
1956 	int			status = 0;
1957 	struct qlnxr_dev	*dev = get_qlnxr_dev((ibcq->device));
1958 	qlnx_host_t		*ha;
1959 
1960 	ha = dev->ha;
1961 
1962 	QL_DPRINT12(ha, "enter/exit\n");
1963 
1964 	return status;
1965 }
1966 
1967 void
1968 qlnxr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1969 {
1970 	struct qlnxr_dev			*dev = get_qlnxr_dev((ibcq->device));
1971 	struct ecore_rdma_destroy_cq_out_params oparams;
1972 	struct ecore_rdma_destroy_cq_in_params	iparams;
1973 	struct qlnxr_cq				*cq = get_qlnxr_cq(ibcq);
1974 	int					rc = 0;
1975 	qlnx_host_t				*ha;
1976 
1977 	ha = dev->ha;
1978 
1979 	QL_DPRINT12(ha, "enter cq_id = %d\n", cq->icid);
1980 
1981 	cq->destroyed = 1;
1982 
1983 	/* TODO: Syncronize irq of the CNQ the CQ belongs to for validation
1984 	 * that all completions with notification are dealt with. The rest
1985 	 * of the completions are not interesting
1986 	 */
1987 
1988 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
1989 
1990 	if (cq->cq_type != QLNXR_CQ_TYPE_GSI) {
1991 		iparams.icid = cq->icid;
1992 
1993 		rc = ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1994 
1995 		if (rc) {
1996 			QL_DPRINT12(ha, "ecore_rdma_destroy_cq failed cq_id = %d\n",
1997 				cq->icid);
1998 			return;
1999 		}
2000 
2001 		QL_DPRINT12(ha, "free cq->pbl cq_id = %d\n", cq->icid);
2002 		ecore_chain_free(&dev->ha->cdev, &cq->pbl);
2003 	}
2004 
2005 	if (udata) {
2006 		qlnxr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
2007 		ib_umem_release(cq->q.umem);
2008 	}
2009 
2010 	cq->sig = ~cq->sig;
2011 
2012 	QL_DPRINT12(ha, "exit cq_id = %d\n", cq->icid);
2013 }
2014 
2015 static int
2016 qlnxr_check_qp_attrs(struct ib_pd *ibpd,
2017 	struct qlnxr_dev *dev,
2018 	struct ib_qp_init_attr *attrs,
2019 	struct ib_udata *udata)
2020 {
2021 	struct ecore_rdma_device	*qattr;
2022 	qlnx_host_t			*ha;
2023 
2024 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
2025 	ha = dev->ha;
2026 
2027 	QL_DPRINT12(ha, "enter\n");
2028 
2029 	QL_DPRINT12(ha, "attrs->sq_sig_type = %d\n", attrs->sq_sig_type);
2030 	QL_DPRINT12(ha, "attrs->qp_type = %d\n", attrs->qp_type);
2031 	QL_DPRINT12(ha, "attrs->create_flags = %d\n", attrs->create_flags);
2032 
2033 #if __FreeBSD_version < 1102000
2034 	QL_DPRINT12(ha, "attrs->qpg_type = %d\n", attrs->qpg_type);
2035 #endif
2036 
2037 	QL_DPRINT12(ha, "attrs->port_num = %d\n", attrs->port_num);
2038 	QL_DPRINT12(ha, "attrs->cap.max_send_wr = 0x%x\n", attrs->cap.max_send_wr);
2039 	QL_DPRINT12(ha, "attrs->cap.max_recv_wr = 0x%x\n", attrs->cap.max_recv_wr);
2040 	QL_DPRINT12(ha, "attrs->cap.max_send_sge = 0x%x\n", attrs->cap.max_send_sge);
2041 	QL_DPRINT12(ha, "attrs->cap.max_recv_sge = 0x%x\n", attrs->cap.max_recv_sge);
2042 	QL_DPRINT12(ha, "attrs->cap.max_inline_data = 0x%x\n",
2043 		attrs->cap.max_inline_data);
2044 
2045 #if __FreeBSD_version < 1102000
2046 	QL_DPRINT12(ha, "attrs->cap.qpg_tss_mask_sz = 0x%x\n",
2047 		attrs->cap.qpg_tss_mask_sz);
2048 #endif
2049 
2050 	QL_DPRINT12(ha, "\n\nqattr->vendor_id = 0x%x\n", qattr->vendor_id);
2051 	QL_DPRINT12(ha, "qattr->vendor_part_id = 0x%x\n", qattr->vendor_part_id);
2052 	QL_DPRINT12(ha, "qattr->hw_ver = 0x%x\n", qattr->hw_ver);
2053 	QL_DPRINT12(ha, "qattr->fw_ver = %p\n", (void *)qattr->fw_ver);
2054 	QL_DPRINT12(ha, "qattr->node_guid = %p\n", (void *)qattr->node_guid);
2055 	QL_DPRINT12(ha, "qattr->sys_image_guid = %p\n",
2056 		(void *)qattr->sys_image_guid);
2057 	QL_DPRINT12(ha, "qattr->max_cnq = 0x%x\n", qattr->max_cnq);
2058 	QL_DPRINT12(ha, "qattr->max_sge = 0x%x\n", qattr->max_sge);
2059 	QL_DPRINT12(ha, "qattr->max_srq_sge = 0x%x\n", qattr->max_srq_sge);
2060 	QL_DPRINT12(ha, "qattr->max_inline = 0x%x\n", qattr->max_inline);
2061 	QL_DPRINT12(ha, "qattr->max_wqe = 0x%x\n", qattr->max_wqe);
2062 	QL_DPRINT12(ha, "qattr->max_srq_wqe = 0x%x\n", qattr->max_srq_wqe);
2063 	QL_DPRINT12(ha, "qattr->max_qp_resp_rd_atomic_resc = 0x%x\n",
2064 		qattr->max_qp_resp_rd_atomic_resc);
2065 	QL_DPRINT12(ha, "qattr->max_qp_req_rd_atomic_resc = 0x%x\n",
2066 		qattr->max_qp_req_rd_atomic_resc);
2067 	QL_DPRINT12(ha, "qattr->max_dev_resp_rd_atomic_resc = 0x%x\n",
2068 		qattr->max_dev_resp_rd_atomic_resc);
2069 	QL_DPRINT12(ha, "qattr->max_cq = 0x%x\n", qattr->max_cq);
2070 	QL_DPRINT12(ha, "qattr->max_qp = 0x%x\n", qattr->max_qp);
2071 	QL_DPRINT12(ha, "qattr->max_srq = 0x%x\n", qattr->max_srq);
2072 	QL_DPRINT12(ha, "qattr->max_mr = 0x%x\n", qattr->max_mr);
2073 	QL_DPRINT12(ha, "qattr->max_mr_size = %p\n", (void *)qattr->max_mr_size);
2074 	QL_DPRINT12(ha, "qattr->max_cqe = 0x%x\n", qattr->max_cqe);
2075 	QL_DPRINT12(ha, "qattr->max_mw = 0x%x\n", qattr->max_mw);
2076 	QL_DPRINT12(ha, "qattr->max_fmr = 0x%x\n", qattr->max_fmr);
2077 	QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_pbl = 0x%x\n",
2078 		qattr->max_mr_mw_fmr_pbl);
2079 	QL_DPRINT12(ha, "qattr->max_mr_mw_fmr_size = %p\n",
2080 		(void *)qattr->max_mr_mw_fmr_size);
2081 	QL_DPRINT12(ha, "qattr->max_pd = 0x%x\n", qattr->max_pd);
2082 	QL_DPRINT12(ha, "qattr->max_ah = 0x%x\n", qattr->max_ah);
2083 	QL_DPRINT12(ha, "qattr->max_pkey = 0x%x\n", qattr->max_pkey);
2084 	QL_DPRINT12(ha, "qattr->max_srq_wr = 0x%x\n", qattr->max_srq_wr);
2085 	QL_DPRINT12(ha, "qattr->max_stats_queues = 0x%x\n",
2086 		qattr->max_stats_queues);
2087 	//QL_DPRINT12(ha, "qattr->dev_caps = 0x%x\n", qattr->dev_caps);
2088 	QL_DPRINT12(ha, "qattr->page_size_caps = %p\n",
2089 		(void *)qattr->page_size_caps);
2090 	QL_DPRINT12(ha, "qattr->dev_ack_delay = 0x%x\n", qattr->dev_ack_delay);
2091 	QL_DPRINT12(ha, "qattr->reserved_lkey = 0x%x\n", qattr->reserved_lkey);
2092 	QL_DPRINT12(ha, "qattr->bad_pkey_counter = 0x%x\n",
2093 		qattr->bad_pkey_counter);
2094 
2095 	if ((attrs->qp_type == IB_QPT_GSI) && udata) {
2096 		QL_DPRINT12(ha, "unexpected udata when creating GSI QP\n");
2097 		return -EINVAL;
2098 	}
2099 
2100 	if (udata && !(ibpd->uobject && ibpd->uobject->context)) {
2101 		QL_DPRINT12(ha, "called from user without context\n");
2102 		return -EINVAL;
2103 	}
2104 
2105 	/* QP0... attrs->qp_type == IB_QPT_GSI */
2106 	if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
2107 		QL_DPRINT12(ha, "unsupported qp type=0x%x requested\n",
2108 			   attrs->qp_type);
2109 		return -EINVAL;
2110 	}
2111 	if (attrs->qp_type == IB_QPT_GSI && attrs->srq) {
2112 		QL_DPRINT12(ha, "cannot create GSI qp with SRQ\n");
2113 		return -EINVAL;
2114 	}
2115 	/* Skip the check for QP1 to support CM size of 128 */
2116 	if (attrs->cap.max_send_wr > qattr->max_wqe) {
2117 		QL_DPRINT12(ha, "cannot create a SQ with %d elements "
2118 			" (max_send_wr=0x%x)\n",
2119 			attrs->cap.max_send_wr, qattr->max_wqe);
2120 		return -EINVAL;
2121 	}
2122 	if (!attrs->srq && (attrs->cap.max_recv_wr > qattr->max_wqe)) {
2123 		QL_DPRINT12(ha, "cannot create a RQ with %d elements"
2124 			" (max_recv_wr=0x%x)\n",
2125 			attrs->cap.max_recv_wr, qattr->max_wqe);
2126 		return -EINVAL;
2127 	}
2128 	if (attrs->cap.max_inline_data > qattr->max_inline) {
2129 		QL_DPRINT12(ha,
2130 			"unsupported inline data size=0x%x "
2131 			"requested (max_inline=0x%x)\n",
2132 			attrs->cap.max_inline_data, qattr->max_inline);
2133 		return -EINVAL;
2134 	}
2135 	if (attrs->cap.max_send_sge > qattr->max_sge) {
2136 		QL_DPRINT12(ha,
2137 			"unsupported send_sge=0x%x "
2138 			"requested (max_send_sge=0x%x)\n",
2139 			attrs->cap.max_send_sge, qattr->max_sge);
2140 		return -EINVAL;
2141 	}
2142 	if (attrs->cap.max_recv_sge > qattr->max_sge) {
2143 		QL_DPRINT12(ha,
2144 			"unsupported recv_sge=0x%x requested "
2145 			" (max_recv_sge=0x%x)\n",
2146 			attrs->cap.max_recv_sge, qattr->max_sge);
2147 		return -EINVAL;
2148 	}
2149 	/* unprivileged user space cannot create special QP */
2150 	if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
2151 		QL_DPRINT12(ha,
2152 			"userspace can't create special QPs of type=0x%x\n",
2153 			attrs->qp_type);
2154 		return -EINVAL;
2155 	}
2156 	/* allow creating only one GSI type of QP */
2157 	if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
2158 		QL_DPRINT12(ha,
2159 			"create qp: GSI special QPs already created.\n");
2160 		return -EINVAL;
2161 	}
2162 
2163 	/* verify consumer QPs are not trying to use GSI QP's CQ */
2164 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
2165 		struct qlnxr_cq *send_cq = get_qlnxr_cq(attrs->send_cq);
2166 		struct qlnxr_cq *recv_cq = get_qlnxr_cq(attrs->recv_cq);
2167 
2168 		if ((send_cq->cq_type == QLNXR_CQ_TYPE_GSI) ||
2169 		    (recv_cq->cq_type == QLNXR_CQ_TYPE_GSI)) {
2170 			QL_DPRINT11(ha, "consumer QP cannot use GSI CQs.\n");
2171 			return -EINVAL;
2172 		}
2173 	}
2174 	QL_DPRINT12(ha, "exit\n");
2175 	return 0;
2176 }
2177 
2178 static int
2179 qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
2180 	struct qlnxr_srq *srq,
2181 	struct ib_udata *udata)
2182 {
2183 	struct qlnxr_create_srq_uresp	uresp;
2184 	qlnx_host_t			*ha;
2185 	int				rc;
2186 
2187 	ha = dev->ha;
2188 
2189 	QL_DPRINT12(ha, "enter\n");
2190 
2191 	memset(&uresp, 0, sizeof(uresp));
2192 
2193 	uresp.srq_id = srq->srq_id;
2194 
2195 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2196 
2197 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2198 	return rc;
2199 }
2200 
2201 static void
2202 qlnxr_copy_rq_uresp(struct qlnxr_dev *dev,
2203 	struct qlnxr_create_qp_uresp *uresp,
2204 	struct qlnxr_qp *qp)
2205 {
2206 	qlnx_host_t	*ha;
2207 
2208 	ha = dev->ha;
2209 
2210 	/* Return if QP is associated with SRQ instead of RQ */
2211 	QL_DPRINT12(ha, "enter qp->srq = %p\n", qp->srq);
2212 
2213 	if (qp->srq)
2214 		return;
2215 
2216 	/* iWARP requires two doorbells per RQ. */
2217 	if (QLNX_IS_IWARP(dev)) {
2218 		uresp->rq_db_offset =
2219 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2220 		uresp->rq_db2_offset =
2221 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2222 
2223 		QL_DPRINT12(ha, "uresp->rq_db_offset = 0x%x "
2224 			"uresp->rq_db2_offset = 0x%x\n",
2225 			uresp->rq_db_offset, uresp->rq_db2_offset);
2226 	} else {
2227 		uresp->rq_db_offset =
2228 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2229 	}
2230 	uresp->rq_icid = qp->icid;
2231 
2232 	QL_DPRINT12(ha, "exit\n");
2233 	return;
2234 }
2235 
2236 static void
2237 qlnxr_copy_sq_uresp(struct qlnxr_dev *dev,
2238 	struct qlnxr_create_qp_uresp *uresp,
2239 	struct qlnxr_qp *qp)
2240 {
2241 	qlnx_host_t	*ha;
2242 
2243 	ha = dev->ha;
2244 
2245 	QL_DPRINT12(ha, "enter\n");
2246 
2247 	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2248 
2249 	/* iWARP uses the same cid for rq and sq*/
2250 	if (QLNX_IS_IWARP(dev)) {
2251 		uresp->sq_icid = qp->icid;
2252 		QL_DPRINT12(ha, "uresp->sq_icid = 0x%x\n", uresp->sq_icid);
2253 	} else
2254 		uresp->sq_icid = qp->icid + 1;
2255 
2256 	QL_DPRINT12(ha, "exit\n");
2257 	return;
2258 }
2259 
2260 static int
2261 qlnxr_copy_qp_uresp(struct qlnxr_dev *dev,
2262 	struct qlnxr_qp *qp,
2263 	struct ib_udata *udata)
2264 {
2265 	int				rc;
2266 	struct qlnxr_create_qp_uresp	uresp;
2267 	qlnx_host_t			*ha;
2268 
2269 	ha = dev->ha;
2270 
2271 	QL_DPRINT12(ha, "enter qp->icid =0x%x\n", qp->icid);
2272 
2273 	memset(&uresp, 0, sizeof(uresp));
2274 	qlnxr_copy_sq_uresp(dev, &uresp, qp);
2275 	qlnxr_copy_rq_uresp(dev, &uresp, qp);
2276 
2277 	uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
2278 	uresp.qp_id = qp->qp_id;
2279 
2280 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2281 
2282 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2283 	return rc;
2284 }
2285 
2286 static void
2287 qlnxr_set_common_qp_params(struct qlnxr_dev *dev,
2288 	struct qlnxr_qp *qp,
2289 	struct qlnxr_pd *pd,
2290 	struct ib_qp_init_attr *attrs)
2291 {
2292 	qlnx_host_t			*ha;
2293 
2294 	ha = dev->ha;
2295 
2296 	QL_DPRINT12(ha, "enter\n");
2297 
2298 	spin_lock_init(&qp->q_lock);
2299 
2300 	atomic_set(&qp->refcnt, 1);
2301 	qp->pd = pd;
2302 	qp->sig = QLNXR_QP_MAGIC_NUMBER;
2303 	qp->qp_type = attrs->qp_type;
2304 	qp->max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2305 	qp->sq.max_sges = attrs->cap.max_send_sge;
2306 	qp->state = ECORE_ROCE_QP_STATE_RESET;
2307 	qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
2308 	qp->sq_cq = get_qlnxr_cq(attrs->send_cq);
2309 	qp->rq_cq = get_qlnxr_cq(attrs->recv_cq);
2310 	qp->dev = dev;
2311 
2312 	if (!attrs->srq) {
2313 		/* QP is associated with RQ instead of SRQ */
2314 		qp->rq.max_sges = attrs->cap.max_recv_sge;
2315 		QL_DPRINT12(ha, "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
2316 			qp->rq.max_sges, qp->rq_cq->icid);
2317 	} else {
2318 		qp->srq = get_qlnxr_srq(attrs->srq);
2319 	}
2320 
2321 	QL_DPRINT12(ha,
2322 		"QP params:\tpd = %d, qp_type = %d, max_inline_data = %d,"
2323 		" state = %d, signaled = %d, use_srq=%d\n",
2324 		pd->pd_id, qp->qp_type, qp->max_inline_data,
2325 		qp->state, qp->signaled, ((attrs->srq) ? 1 : 0));
2326 	QL_DPRINT12(ha, "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
2327 		qp->sq.max_sges, qp->sq_cq->icid);
2328 	return;
2329 }
2330 
2331 static int
2332 qlnxr_check_srq_params(struct qlnxr_dev *dev,
2333 	struct ib_srq_init_attr *attrs)
2334 {
2335 	struct ecore_rdma_device *qattr;
2336 	qlnx_host_t		*ha;
2337 
2338 	ha = dev->ha;
2339 	qattr = ecore_rdma_query_device(dev->rdma_ctx);
2340 
2341 	QL_DPRINT12(ha, "enter\n");
2342 
2343 	if (attrs->attr.max_wr > qattr->max_srq_wqe) {
2344 		QL_DPRINT12(ha, "unsupported srq_wr=0x%x"
2345 			" requested (max_srq_wr=0x%x)\n",
2346 			attrs->attr.max_wr, qattr->max_srq_wr);
2347 		return -EINVAL;
2348 	}
2349 
2350 	if (attrs->attr.max_sge > qattr->max_sge) {
2351 		QL_DPRINT12(ha,
2352 			"unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
2353 			attrs->attr.max_sge, qattr->max_sge);
2354 		return -EINVAL;
2355 	}
2356 
2357 	if (attrs->attr.srq_limit > attrs->attr.max_wr) {
2358 		QL_DPRINT12(ha,
2359 		       "unsupported srq_limit=0x%x requested"
2360 			" (max_srq_limit=0x%x)\n",
2361 			attrs->attr.srq_limit, attrs->attr.srq_limit);
2362 		return -EINVAL;
2363 	}
2364 
2365 	QL_DPRINT12(ha, "exit\n");
2366 	return 0;
2367 }
2368 
2369 static void
2370 qlnxr_free_srq_user_params(struct qlnxr_srq *srq)
2371 {
2372 	struct qlnxr_dev	*dev = srq->dev;
2373 	qlnx_host_t		*ha;
2374 
2375 	ha = dev->ha;
2376 
2377 	QL_DPRINT12(ha, "enter\n");
2378 
2379 	qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2380 	ib_umem_release(srq->usrq.umem);
2381 	ib_umem_release(srq->prod_umem);
2382 
2383 	QL_DPRINT12(ha, "exit\n");
2384 	return;
2385 }
2386 
2387 static void
2388 qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq)
2389 {
2390 	struct qlnxr_srq_hwq_info *hw_srq  = &srq->hw_srq;
2391 	struct qlnxr_dev	*dev = srq->dev;
2392 	qlnx_host_t		*ha;
2393 
2394 	ha = dev->ha;
2395 
2396 	QL_DPRINT12(ha, "enter\n");
2397 
2398 	ecore_chain_free(dev->cdev, &hw_srq->pbl);
2399 
2400 	qlnx_dma_free_coherent(&dev->cdev,
2401 		hw_srq->virt_prod_pair_addr,
2402 		hw_srq->phy_prod_pair_addr,
2403 		sizeof(struct rdma_srq_producers));
2404 
2405 	QL_DPRINT12(ha, "exit\n");
2406 
2407 	return;
2408 }
2409 
2410 static int
2411 qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
2412 	struct qlnxr_srq *srq,
2413 	struct qlnxr_create_srq_ureq *ureq,
2414 	int access, int dmasync)
2415 {
2416 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2417 	struct ib_umem_chunk	*chunk;
2418 #endif
2419 	struct scatterlist	*sg;
2420 	int			rc;
2421 	struct qlnxr_dev	*dev = srq->dev;
2422 	qlnx_host_t		*ha;
2423 
2424 	ha = dev->ha;
2425 
2426 	QL_DPRINT12(ha, "enter\n");
2427 
2428 	rc = qlnxr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
2429 				  ureq->srq_len, access, dmasync, 1);
2430 	if (rc)
2431 		return rc;
2432 
2433 	srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
2434 				     sizeof(struct rdma_srq_producers),
2435 				     access, dmasync);
2436 	if (IS_ERR(srq->prod_umem)) {
2437 		qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
2438 		ib_umem_release(srq->usrq.umem);
2439 
2440 		QL_DPRINT12(ha, "ib_umem_get failed for producer [%p]\n",
2441 			PTR_ERR(srq->prod_umem));
2442 
2443 		return PTR_ERR(srq->prod_umem);
2444 	}
2445 
2446 #ifdef DEFINE_IB_UMEM_WITH_CHUNK
2447 	chunk = container_of((&srq->prod_umem->chunk_list)->next,
2448 			     typeof(*chunk), list);
2449 	sg = &chunk->page_list[0];
2450 #else
2451 	sg = srq->prod_umem->sg_head.sgl;
2452 #endif
2453 	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
2454 
2455 	QL_DPRINT12(ha, "exit\n");
2456 	return 0;
2457 }
2458 
2459 static int
2460 qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
2461 	struct qlnxr_dev *dev,
2462 	struct ib_srq_init_attr *init_attr)
2463 {
2464 	struct qlnxr_srq_hwq_info	*hw_srq  = &srq->hw_srq;
2465 	dma_addr_t			phy_prod_pair_addr;
2466 	u32				num_elems, max_wr;
2467 	void				*va;
2468 	int				rc;
2469 	qlnx_host_t			*ha;
2470 
2471 	ha = dev->ha;
2472 
2473 	QL_DPRINT12(ha, "enter\n");
2474 
2475 	va = qlnx_dma_alloc_coherent(&dev->cdev,
2476 			&phy_prod_pair_addr,
2477 			sizeof(struct rdma_srq_producers));
2478 	if (!va) {
2479 		QL_DPRINT11(ha, "qlnx_dma_alloc_coherent failed for produceer\n");
2480 		return -ENOMEM;
2481 	}
2482 
2483 	hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
2484 	hw_srq->virt_prod_pair_addr = va;
2485 
2486 	max_wr = init_attr->attr.max_wr;
2487 
2488 	num_elems = max_wr * RDMA_MAX_SRQ_WQE_SIZE;
2489 
2490         rc = ecore_chain_alloc(dev->cdev,
2491                    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2492                    ECORE_CHAIN_MODE_PBL,
2493                    ECORE_CHAIN_CNT_TYPE_U32,
2494                    num_elems,
2495                    ECORE_RDMA_SRQ_WQE_ELEM_SIZE,
2496                    &hw_srq->pbl, NULL);
2497 
2498 	if (rc) {
2499 		QL_DPRINT11(ha, "ecore_chain_alloc failed [%d]\n", rc);
2500 		goto err0;
2501 	}
2502 
2503 	hw_srq->max_wr = max_wr;
2504 	hw_srq->num_elems = num_elems;
2505 	hw_srq->max_sges = RDMA_MAX_SGE_PER_SRQ;
2506 
2507 	QL_DPRINT12(ha, "exit\n");
2508 	return 0;
2509 
2510 err0:
2511 	qlnx_dma_free_coherent(&dev->cdev, va, phy_prod_pair_addr,
2512 		sizeof(struct rdma_srq_producers));
2513 
2514 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2515 	return rc;
2516 }
2517 
2518 static inline void
2519 qlnxr_init_common_qp_in_params(struct qlnxr_dev *dev,
2520 	struct qlnxr_pd *pd,
2521 	struct qlnxr_qp *qp,
2522 	struct ib_qp_init_attr *attrs,
2523 	bool fmr_and_reserved_lkey,
2524 	struct ecore_rdma_create_qp_in_params *params)
2525 {
2526 	qlnx_host_t	*ha;
2527 
2528 	ha = dev->ha;
2529 
2530 	QL_DPRINT12(ha, "enter\n");
2531 
2532 	/* QP handle to be written in an async event */
2533 	params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
2534 	params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
2535 
2536 	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
2537 	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
2538 	params->pd = pd->pd_id;
2539 	params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
2540 	params->sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
2541 	params->stats_queue = 0;
2542 
2543 	params->rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
2544 
2545 	if (qp->srq) {
2546 		/* QP is associated with SRQ instead of RQ */
2547 		params->srq_id = qp->srq->srq_id;
2548 		params->use_srq = true;
2549 		QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
2550 			params->srq_id, params->use_srq);
2551 		return;
2552 	}
2553 
2554 	params->srq_id = 0;
2555 	params->use_srq = false;
2556 
2557 	QL_DPRINT12(ha, "exit\n");
2558 	return;
2559 }
2560 
2561 static inline void
2562 qlnxr_qp_user_print( struct qlnxr_dev *dev,
2563 	struct qlnxr_qp *qp)
2564 {
2565 	QL_DPRINT12((dev->ha), "qp=%p. sq_addr=0x%llx, sq_len=%zd, "
2566 		"rq_addr=0x%llx, rq_len=%zd\n",
2567 		qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
2568 		qp->urq.buf_len);
2569 	return;
2570 }
2571 
2572 static int
2573 qlnxr_idr_add(struct qlnxr_dev *dev, void *ptr, u32 id)
2574 {
2575 	u32		newid;
2576 	int		rc;
2577 	qlnx_host_t	*ha;
2578 
2579 	ha = dev->ha;
2580 
2581 	QL_DPRINT12(ha, "enter\n");
2582 
2583 	if (!QLNX_IS_IWARP(dev))
2584 		return 0;
2585 
2586 	do {
2587 		if (!idr_pre_get(&dev->qpidr, GFP_KERNEL)) {
2588 			QL_DPRINT11(ha, "idr_pre_get failed\n");
2589 			return -ENOMEM;
2590 		}
2591 
2592 		mtx_lock(&dev->idr_lock);
2593 
2594 		rc = idr_get_new_above(&dev->qpidr, ptr, id, &newid);
2595 
2596 		mtx_unlock(&dev->idr_lock);
2597 
2598 	} while (rc == -EAGAIN);
2599 
2600 	QL_DPRINT12(ha, "exit [%d]\n", rc);
2601 
2602 	return rc;
2603 }
2604 
2605 static void
2606 qlnxr_idr_remove(struct qlnxr_dev *dev, u32 id)
2607 {
2608 	qlnx_host_t	*ha;
2609 
2610 	ha = dev->ha;
2611 
2612 	QL_DPRINT12(ha, "enter\n");
2613 
2614 	if (!QLNX_IS_IWARP(dev))
2615 		return;
2616 
2617 	mtx_lock(&dev->idr_lock);
2618 	idr_remove(&dev->qpidr, id);
2619 	mtx_unlock(&dev->idr_lock);
2620 
2621 	QL_DPRINT12(ha, "exit \n");
2622 
2623 	return;
2624 }
2625 
2626 static inline void
2627 qlnxr_iwarp_populate_user_qp(struct qlnxr_dev *dev,
2628 	struct qlnxr_qp *qp,
2629 	struct ecore_rdma_create_qp_out_params *out_params)
2630 {
2631 	qlnx_host_t	*ha;
2632 
2633 	ha = dev->ha;
2634 
2635 	QL_DPRINT12(ha, "enter\n");
2636 
2637 	qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
2638 	qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
2639 
2640 	qlnxr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
2641 			   &qp->usq.pbl_info);
2642 
2643 	if (qp->srq) {
2644 		QL_DPRINT11(ha, "qp->srq = %p\n", qp->srq);
2645 		return;
2646 	}
2647 
2648 	qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
2649 	qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
2650 
2651 	qlnxr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
2652 			   &qp->urq.pbl_info);
2653 
2654 	QL_DPRINT12(ha, "exit\n");
2655 	return;
2656 }
2657 
2658 static int
2659 qlnxr_create_user_qp(struct qlnxr_dev *dev,
2660 	struct qlnxr_qp *qp,
2661 	struct ib_pd *ibpd,
2662 	struct ib_udata *udata,
2663 	struct ib_qp_init_attr *attrs)
2664 {
2665 	struct ecore_rdma_destroy_qp_out_params d_out_params;
2666 	struct ecore_rdma_create_qp_in_params in_params;
2667 	struct ecore_rdma_create_qp_out_params out_params;
2668 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
2669 	struct ib_ucontext *ib_ctx = NULL;
2670 	struct qlnxr_create_qp_ureq ureq;
2671 	int alloc_and_init = QLNX_IS_ROCE(dev);
2672 	int rc = -EINVAL;
2673 	qlnx_host_t	*ha;
2674 
2675 	ha = dev->ha;
2676 
2677 	QL_DPRINT12(ha, "enter\n");
2678 
2679 	ib_ctx = ibpd->uobject->context;
2680 
2681 	memset(&ureq, 0, sizeof(ureq));
2682 	rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
2683 
2684 	if (rc) {
2685 		QL_DPRINT11(ha, "ib_copy_from_udata failed [%d]\n", rc);
2686 		return rc;
2687 	}
2688 
2689 	/* SQ - read access only (0), dma sync not required (0) */
2690 	rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
2691 				  ureq.sq_len, 0, 0,
2692 				  alloc_and_init);
2693 	if (rc) {
2694 		QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2695 		return rc;
2696 	}
2697 
2698 	if (!qp->srq) {
2699 		/* RQ - read access only (0), dma sync not required (0) */
2700 		rc = qlnxr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
2701 					  ureq.rq_len, 0, 0,
2702 					  alloc_and_init);
2703 
2704 		if (rc) {
2705 			QL_DPRINT11(ha, "qlnxr_init_user_queue failed [%d]\n", rc);
2706 			return rc;
2707 		}
2708 	}
2709 
2710 	memset(&in_params, 0, sizeof(in_params));
2711 	qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
2712 	in_params.qp_handle_lo = ureq.qp_handle_lo;
2713 	in_params.qp_handle_hi = ureq.qp_handle_hi;
2714 	in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
2715 	in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
2716 
2717 	if (!qp->srq) {
2718 		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
2719 		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
2720 	}
2721 
2722 	qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, &in_params, &out_params);
2723 
2724 	if (!qp->ecore_qp) {
2725 		rc = -ENOMEM;
2726 		QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2727 		goto err1;
2728 	}
2729 
2730 	if (QLNX_IS_IWARP(dev))
2731 		qlnxr_iwarp_populate_user_qp(dev, qp, &out_params);
2732 
2733 	qp->qp_id = out_params.qp_id;
2734 	qp->icid = out_params.icid;
2735 
2736 	rc = qlnxr_copy_qp_uresp(dev, qp, udata);
2737 
2738 	if (rc) {
2739 		QL_DPRINT11(ha, "qlnxr_copy_qp_uresp failed\n");
2740 		goto err;
2741 	}
2742 
2743 	qlnxr_qp_user_print(dev, qp);
2744 
2745 	QL_DPRINT12(ha, "exit\n");
2746 	return 0;
2747 err:
2748 	rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
2749 
2750 	if (rc)
2751 		QL_DPRINT12(ha, "fatal fault\n");
2752 
2753 err1:
2754 	qlnxr_cleanup_user(dev, qp);
2755 
2756 	QL_DPRINT12(ha, "exit[%d]\n", rc);
2757 	return rc;
2758 }
2759 
2760 static void
2761 qlnxr_set_roce_db_info(struct qlnxr_dev *dev,
2762 	struct qlnxr_qp *qp)
2763 {
2764 	qlnx_host_t	*ha;
2765 
2766 	ha = dev->ha;
2767 
2768 	QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2769 
2770 	qp->sq.db = dev->db_addr +
2771 		DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2772 	qp->sq.db_data.data.icid = qp->icid + 1;
2773 
2774 	if (!qp->srq) {
2775 		qp->rq.db = dev->db_addr +
2776 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
2777 		qp->rq.db_data.data.icid = qp->icid;
2778 	}
2779 
2780 	QL_DPRINT12(ha, "exit\n");
2781 	return;
2782 }
2783 
2784 static void
2785 qlnxr_set_iwarp_db_info(struct qlnxr_dev *dev,
2786 	struct qlnxr_qp *qp)
2787 
2788 {
2789 	qlnx_host_t	*ha;
2790 
2791 	ha = dev->ha;
2792 
2793 	QL_DPRINT12(ha, "enter qp = %p qp->srq %p\n", qp, qp->srq);
2794 
2795 	qp->sq.db = dev->db_addr +
2796 		DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2797 	qp->sq.db_data.data.icid = qp->icid;
2798 
2799 	if (!qp->srq) {
2800 		qp->rq.db = dev->db_addr +
2801 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2802 		qp->rq.db_data.data.icid = qp->icid;
2803 
2804 		qp->rq.iwarp_db2 = dev->db_addr +
2805 			DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2806 		qp->rq.iwarp_db2_data.data.icid = qp->icid;
2807 		qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2808 	}
2809 
2810 	QL_DPRINT12(ha,
2811 		"qp->sq.db = %p qp->sq.db_data.data.icid =0x%x\n"
2812 		"\t\t\tqp->rq.db = %p qp->rq.db_data.data.icid =0x%x\n"
2813 		"\t\t\tqp->rq.iwarp_db2 = %p qp->rq.iwarp_db2.data.icid =0x%x"
2814 		" qp->rq.iwarp_db2.data.prod_val =0x%x\n",
2815 		qp->sq.db, qp->sq.db_data.data.icid,
2816 		qp->rq.db, qp->rq.db_data.data.icid,
2817 		qp->rq.iwarp_db2, qp->rq.iwarp_db2_data.data.icid,
2818 		qp->rq.iwarp_db2_data.data.value);
2819 
2820 	QL_DPRINT12(ha, "exit\n");
2821 	return;
2822 }
2823 
2824 static int
2825 qlnxr_roce_create_kernel_qp(struct qlnxr_dev *dev,
2826 	struct qlnxr_qp *qp,
2827 	struct ecore_rdma_create_qp_in_params *in_params,
2828 	u32 n_sq_elems,
2829 	u32 n_rq_elems)
2830 {
2831 	struct ecore_rdma_create_qp_out_params out_params;
2832 	int		rc;
2833 	qlnx_host_t	*ha;
2834 
2835 	ha = dev->ha;
2836 
2837 	QL_DPRINT12(ha, "enter\n");
2838 
2839         rc = ecore_chain_alloc(
2840                 dev->cdev,
2841                 ECORE_CHAIN_USE_TO_PRODUCE,
2842                 ECORE_CHAIN_MODE_PBL,
2843                 ECORE_CHAIN_CNT_TYPE_U32,
2844                 n_sq_elems,
2845                 QLNXR_SQE_ELEMENT_SIZE,
2846                 &qp->sq.pbl,
2847                 NULL);
2848 
2849 	if (rc) {
2850 		QL_DPRINT11(ha, "ecore_chain_alloc qp->sq.pbl failed[%d]\n", rc);
2851 		return rc;
2852 	}
2853 
2854 	in_params->sq_num_pages = ecore_chain_get_page_cnt(&qp->sq.pbl);
2855 	in_params->sq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->sq.pbl);
2856 
2857 	if (!qp->srq) {
2858                 rc = ecore_chain_alloc(
2859                         dev->cdev,
2860                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2861                         ECORE_CHAIN_MODE_PBL,
2862                         ECORE_CHAIN_CNT_TYPE_U32,
2863                         n_rq_elems,
2864                         QLNXR_RQE_ELEMENT_SIZE,
2865                         &qp->rq.pbl,
2866                         NULL);
2867 
2868 		if (rc) {
2869 			QL_DPRINT11(ha,
2870 				"ecore_chain_alloc qp->rq.pbl failed[%d]\n", rc);
2871 			return rc;
2872 		}
2873 
2874 		in_params->rq_num_pages = ecore_chain_get_page_cnt(&qp->rq.pbl);
2875 		in_params->rq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->rq.pbl);
2876 	}
2877 
2878 	qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2879 
2880 	if (!qp->ecore_qp) {
2881 		QL_DPRINT11(ha, "qp->ecore_qp == NULL\n");
2882 		return -EINVAL;
2883 	}
2884 
2885 	qp->qp_id = out_params.qp_id;
2886 	qp->icid = out_params.icid;
2887 
2888 	qlnxr_set_roce_db_info(dev, qp);
2889 
2890 	QL_DPRINT12(ha, "exit\n");
2891 	return 0;
2892 }
2893 
2894 static int
2895 qlnxr_iwarp_create_kernel_qp(struct qlnxr_dev *dev,
2896 	struct qlnxr_qp *qp,
2897 	struct ecore_rdma_create_qp_in_params *in_params,
2898 	u32 n_sq_elems,
2899 	u32 n_rq_elems)
2900 {
2901 	struct ecore_rdma_destroy_qp_out_params d_out_params;
2902 	struct ecore_rdma_create_qp_out_params out_params;
2903 	struct ecore_chain_ext_pbl ext_pbl;
2904 	int rc;
2905 	qlnx_host_t	*ha;
2906 
2907 	ha = dev->ha;
2908 
2909 	QL_DPRINT12(ha, "enter\n");
2910 
2911 	in_params->sq_num_pages = ECORE_CHAIN_PAGE_CNT(n_sq_elems,
2912 						     QLNXR_SQE_ELEMENT_SIZE,
2913 						     ECORE_CHAIN_MODE_PBL);
2914 	in_params->rq_num_pages = ECORE_CHAIN_PAGE_CNT(n_rq_elems,
2915 						     QLNXR_RQE_ELEMENT_SIZE,
2916 						     ECORE_CHAIN_MODE_PBL);
2917 
2918 	QL_DPRINT12(ha, "n_sq_elems = 0x%x"
2919 		" n_rq_elems = 0x%x in_params\n"
2920 		"\t\t\tqp_handle_lo\t\t= 0x%08x\n"
2921 		"\t\t\tqp_handle_hi\t\t= 0x%08x\n"
2922 		"\t\t\tqp_handle_async_lo\t\t= 0x%08x\n"
2923 		"\t\t\tqp_handle_async_hi\t\t= 0x%08x\n"
2924 		"\t\t\tuse_srq\t\t\t= 0x%x\n"
2925 		"\t\t\tsignal_all\t\t= 0x%x\n"
2926 		"\t\t\tfmr_and_reserved_lkey\t= 0x%x\n"
2927 		"\t\t\tpd\t\t\t= 0x%x\n"
2928 		"\t\t\tdpi\t\t\t= 0x%x\n"
2929 		"\t\t\tsq_cq_id\t\t\t= 0x%x\n"
2930 		"\t\t\tsq_num_pages\t\t= 0x%x\n"
2931 		"\t\t\tsq_pbl_ptr\t\t= %p\n"
2932 		"\t\t\tmax_sq_sges\t\t= 0x%x\n"
2933 		"\t\t\trq_cq_id\t\t\t= 0x%x\n"
2934 		"\t\t\trq_num_pages\t\t= 0x%x\n"
2935 		"\t\t\trq_pbl_ptr\t\t= %p\n"
2936 		"\t\t\tsrq_id\t\t\t= 0x%x\n"
2937 		"\t\t\tstats_queue\t\t= 0x%x\n",
2938 		n_sq_elems, n_rq_elems,
2939 		in_params->qp_handle_lo,
2940 		in_params->qp_handle_hi,
2941 		in_params->qp_handle_async_lo,
2942 		in_params->qp_handle_async_hi,
2943 		in_params->use_srq,
2944 		in_params->signal_all,
2945 		in_params->fmr_and_reserved_lkey,
2946 		in_params->pd,
2947 		in_params->dpi,
2948 		in_params->sq_cq_id,
2949 		in_params->sq_num_pages,
2950 		(void *)in_params->sq_pbl_ptr,
2951 		in_params->max_sq_sges,
2952 		in_params->rq_cq_id,
2953 		in_params->rq_num_pages,
2954 		(void *)in_params->rq_pbl_ptr,
2955 		in_params->srq_id,
2956 		in_params->stats_queue );
2957 
2958 	memset(&out_params, 0, sizeof (struct ecore_rdma_create_qp_out_params));
2959 	memset(&ext_pbl, 0, sizeof (struct ecore_chain_ext_pbl));
2960 
2961 	qp->ecore_qp = ecore_rdma_create_qp(dev->rdma_ctx, in_params, &out_params);
2962 
2963 	if (!qp->ecore_qp) {
2964 		QL_DPRINT11(ha, "ecore_rdma_create_qp failed\n");
2965 		return -EINVAL;
2966 	}
2967 
2968 	/* Now we allocate the chain */
2969 	ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
2970 	ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
2971 
2972 	QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
2973 		"ext_pbl.p_pbl_phys = %p\n",
2974 		ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
2975 
2976         rc = ecore_chain_alloc(
2977                 dev->cdev,
2978                 ECORE_CHAIN_USE_TO_PRODUCE,
2979                 ECORE_CHAIN_MODE_PBL,
2980                 ECORE_CHAIN_CNT_TYPE_U32,
2981                 n_sq_elems,
2982                 QLNXR_SQE_ELEMENT_SIZE,
2983                 &qp->sq.pbl,
2984                 &ext_pbl);
2985 
2986 	if (rc) {
2987 		QL_DPRINT11(ha,
2988 			"ecore_chain_alloc qp->sq.pbl failed rc = %d\n", rc);
2989 		goto err;
2990 	}
2991 
2992 	ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
2993 	ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
2994 
2995 	QL_DPRINT12(ha, "ext_pbl.p_pbl_virt = %p "
2996 		"ext_pbl.p_pbl_phys = %p\n",
2997 		ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
2998 
2999 	if (!qp->srq) {
3000                 rc = ecore_chain_alloc(
3001                         dev->cdev,
3002                         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
3003                         ECORE_CHAIN_MODE_PBL,
3004                         ECORE_CHAIN_CNT_TYPE_U32,
3005                         n_rq_elems,
3006                         QLNXR_RQE_ELEMENT_SIZE,
3007                         &qp->rq.pbl,
3008                         &ext_pbl);
3009 
3010 		if (rc) {
3011 			QL_DPRINT11(ha,, "ecore_chain_alloc qp->rq.pbl"
3012 				" failed rc = %d\n", rc);
3013 			goto err;
3014 		}
3015 	}
3016 
3017 	QL_DPRINT12(ha, "qp_id = 0x%x icid =0x%x\n",
3018 		out_params.qp_id, out_params.icid);
3019 
3020 	qp->qp_id = out_params.qp_id;
3021 	qp->icid = out_params.icid;
3022 
3023 	qlnxr_set_iwarp_db_info(dev, qp);
3024 
3025 	QL_DPRINT12(ha, "exit\n");
3026 	return 0;
3027 
3028 err:
3029 	ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp, &d_out_params);
3030 
3031 	QL_DPRINT12(ha, "exit rc = %d\n", rc);
3032 	return rc;
3033 }
3034 
3035 static int
3036 qlnxr_create_kernel_qp(struct qlnxr_dev *dev,
3037 	struct qlnxr_qp *qp,
3038 	struct ib_pd *ibpd,
3039 	struct ib_qp_init_attr *attrs)
3040 {
3041 	struct ecore_rdma_create_qp_in_params in_params;
3042 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3043 	int rc = -EINVAL;
3044 	u32 n_rq_elems;
3045 	u32 n_sq_elems;
3046 	u32 n_sq_entries;
3047 	struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3048 	qlnx_host_t	*ha;
3049 
3050 	ha = dev->ha;
3051 
3052 	QL_DPRINT12(ha, "enter\n");
3053 
3054 	memset(&in_params, 0, sizeof(in_params));
3055 
3056 	/* A single work request may take up to MAX_SQ_WQE_SIZE elements in
3057 	 * the ring. The ring should allow at least a single WR, even if the
3058 	 * user requested none, due to allocation issues.
3059 	 * We should add an extra WR since the prod and cons indices of
3060 	 * wqe_wr_id are managed in such a way that the WQ is considered full
3061 	 * when (prod+1)%max_wr==cons. We currently don't do that because we
3062 	 * double the number of entries due an iSER issue that pushes far more
3063 	 * WRs than indicated. If we decline its ib_post_send() then we get
3064 	 * error prints in the dmesg we'd like to avoid.
3065 	 */
3066 	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
3067 			      qattr->max_wqe);
3068 
3069 	qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id),
3070 			GFP_KERNEL);
3071 	if (!qp->wqe_wr_id) {
3072 		QL_DPRINT11(ha, "failed SQ shadow memory allocation\n");
3073 		return -ENOMEM;
3074 	}
3075 
3076 	/* QP handle to be written in CQE */
3077 	in_params.qp_handle_lo = lower_32_bits((uintptr_t)qp);
3078 	in_params.qp_handle_hi = upper_32_bits((uintptr_t)qp);
3079 
3080 	/* A single work request may take up to MAX_RQ_WQE_SIZE elements in
3081 	 * the ring. There ring should allow at least a single WR, even if the
3082 	 * user requested none, due to allocation issues.
3083 	 */
3084 	qp->rq.max_wr = (u16)max_t(u32, attrs->cap.max_recv_wr, 1);
3085 
3086 	/* Allocate driver internal RQ array */
3087 	if (!qp->srq) {
3088 		qp->rqe_wr_id = kzalloc(qp->rq.max_wr * sizeof(*qp->rqe_wr_id),
3089 					GFP_KERNEL);
3090 		if (!qp->rqe_wr_id) {
3091 			QL_DPRINT11(ha, "failed RQ shadow memory allocation\n");
3092 			kfree(qp->wqe_wr_id);
3093 			return -ENOMEM;
3094 		}
3095 	}
3096 
3097 	//qlnxr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
3098 
3099         in_params.qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
3100         in_params.qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
3101 
3102         in_params.signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
3103         in_params.fmr_and_reserved_lkey = true;
3104         in_params.pd = pd->pd_id;
3105         in_params.dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
3106         in_params.sq_cq_id = get_qlnxr_cq(attrs->send_cq)->icid;
3107         in_params.stats_queue = 0;
3108 
3109         in_params.rq_cq_id = get_qlnxr_cq(attrs->recv_cq)->icid;
3110 
3111         if (qp->srq) {
3112                 /* QP is associated with SRQ instead of RQ */
3113                 in_params.srq_id = qp->srq->srq_id;
3114                 in_params.use_srq = true;
3115                 QL_DPRINT11(ha, "exit srq_id = 0x%x use_srq = 0x%x\n",
3116                         in_params.srq_id, in_params.use_srq);
3117         } else {
3118         	in_params.srq_id = 0;
3119 		in_params.use_srq = false;
3120 	}
3121 
3122 	n_sq_entries = attrs->cap.max_send_wr;
3123 	n_sq_entries = min_t(u32, n_sq_entries, qattr->max_wqe);
3124 	n_sq_entries = max_t(u32, n_sq_entries, 1);
3125 	n_sq_elems = n_sq_entries * QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
3126 
3127 	n_rq_elems = qp->rq.max_wr * QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
3128 
3129 	if (QLNX_IS_ROCE(dev)) {
3130 		rc = qlnxr_roce_create_kernel_qp(dev, qp, &in_params,
3131 						n_sq_elems, n_rq_elems);
3132 	} else {
3133 		rc = qlnxr_iwarp_create_kernel_qp(dev, qp, &in_params,
3134 						 n_sq_elems, n_rq_elems);
3135 	}
3136 
3137 	if (rc)
3138 		qlnxr_cleanup_kernel(dev, qp);
3139 
3140 	QL_DPRINT12(ha, "exit [%d]\n", rc);
3141 	return rc;
3142 }
3143 
3144 struct ib_qp *
3145 qlnxr_create_qp(struct ib_pd *ibpd,
3146 		struct ib_qp_init_attr *attrs,
3147 		struct ib_udata *udata)
3148 {
3149 	struct qlnxr_dev *dev = get_qlnxr_dev(ibpd->device);
3150 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
3151 	struct qlnxr_qp *qp;
3152 	int rc = 0;
3153 	qlnx_host_t	*ha;
3154 
3155 	ha = dev->ha;
3156 
3157 	QL_DPRINT12(ha, "enter\n");
3158 
3159 	rc = qlnxr_check_qp_attrs(ibpd, dev, attrs, udata);
3160 	if (rc) {
3161 		QL_DPRINT11(ha, "qlnxr_check_qp_attrs failed [%d]\n", rc);
3162 		return ERR_PTR(rc);
3163 	}
3164 
3165 	QL_DPRINT12(ha, "called from %s, event_handle=%p,"
3166 		" eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
3167 		(udata ? "user library" : "kernel"),
3168 		attrs->event_handler, pd,
3169 		get_qlnxr_cq(attrs->send_cq),
3170 		get_qlnxr_cq(attrs->send_cq)->icid,
3171 		get_qlnxr_cq(attrs->recv_cq),
3172 		get_qlnxr_cq(attrs->recv_cq)->icid);
3173 
3174 	qp = qlnx_zalloc(sizeof(struct qlnxr_qp));
3175 
3176 	if (!qp) {
3177 		QL_DPRINT11(ha, "kzalloc(qp) failed\n");
3178 		return ERR_PTR(-ENOMEM);
3179 	}
3180 
3181 	qlnxr_set_common_qp_params(dev, qp, pd, attrs);
3182 
3183 	if (attrs->qp_type == IB_QPT_GSI) {
3184 		QL_DPRINT11(ha, "calling qlnxr_create_gsi_qp\n");
3185 		return qlnxr_create_gsi_qp(dev, attrs, qp);
3186 	}
3187 
3188 	if (udata) {
3189 		rc = qlnxr_create_user_qp(dev, qp, ibpd, udata, attrs);
3190 
3191 		if (rc) {
3192 			QL_DPRINT11(ha, "qlnxr_create_user_qp failed\n");
3193 			goto err;
3194 		}
3195 	} else {
3196 		rc = qlnxr_create_kernel_qp(dev, qp, ibpd, attrs);
3197 
3198 		if (rc) {
3199 			QL_DPRINT11(ha, "qlnxr_create_kernel_qp failed\n");
3200 			goto err;
3201 		}
3202 	}
3203 
3204 	qp->ibqp.qp_num = qp->qp_id;
3205 
3206 	rc = qlnxr_idr_add(dev, qp, qp->qp_id);
3207 
3208 	if (rc) {
3209 		QL_DPRINT11(ha, "qlnxr_idr_add failed\n");
3210 		goto err;
3211 	}
3212 
3213 	QL_DPRINT12(ha, "exit [%p]\n", &qp->ibqp);
3214 
3215 	return &qp->ibqp;
3216 err:
3217 	QL_DPRINT12(ha, "failed exit\n");
3218 	return ERR_PTR(-EFAULT);
3219 }
3220 
3221 static enum ib_qp_state
3222 qlnxr_get_ibqp_state(enum ecore_roce_qp_state qp_state)
3223 {
3224 	enum ib_qp_state state = IB_QPS_ERR;
3225 
3226 	switch (qp_state) {
3227 	case ECORE_ROCE_QP_STATE_RESET:
3228 		state = IB_QPS_RESET;
3229 		break;
3230 
3231 	case ECORE_ROCE_QP_STATE_INIT:
3232 		state = IB_QPS_INIT;
3233 		break;
3234 
3235 	case ECORE_ROCE_QP_STATE_RTR:
3236 		state = IB_QPS_RTR;
3237 		break;
3238 
3239 	case ECORE_ROCE_QP_STATE_RTS:
3240 		state = IB_QPS_RTS;
3241 		break;
3242 
3243 	case ECORE_ROCE_QP_STATE_SQD:
3244 		state = IB_QPS_SQD;
3245 		break;
3246 
3247 	case ECORE_ROCE_QP_STATE_ERR:
3248 		state = IB_QPS_ERR;
3249 		break;
3250 
3251 	case ECORE_ROCE_QP_STATE_SQE:
3252 		state = IB_QPS_SQE;
3253 		break;
3254 	}
3255 	return state;
3256 }
3257 
3258 static enum ecore_roce_qp_state
3259 qlnxr_get_state_from_ibqp( enum ib_qp_state qp_state)
3260 {
3261 	enum ecore_roce_qp_state ecore_qp_state;
3262 
3263 	ecore_qp_state = ECORE_ROCE_QP_STATE_ERR;
3264 
3265 	switch (qp_state) {
3266 	case IB_QPS_RESET:
3267 		ecore_qp_state =  ECORE_ROCE_QP_STATE_RESET;
3268 		break;
3269 
3270 	case IB_QPS_INIT:
3271 		ecore_qp_state =  ECORE_ROCE_QP_STATE_INIT;
3272 		break;
3273 
3274 	case IB_QPS_RTR:
3275 		ecore_qp_state =  ECORE_ROCE_QP_STATE_RTR;
3276 		break;
3277 
3278 	case IB_QPS_RTS:
3279 		ecore_qp_state =  ECORE_ROCE_QP_STATE_RTS;
3280 		break;
3281 
3282 	case IB_QPS_SQD:
3283 		ecore_qp_state =  ECORE_ROCE_QP_STATE_SQD;
3284 		break;
3285 
3286 	case IB_QPS_ERR:
3287 		ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3288 		break;
3289 
3290 	default:
3291 		ecore_qp_state =  ECORE_ROCE_QP_STATE_ERR;
3292 		break;
3293 	}
3294 
3295 	return (ecore_qp_state);
3296 }
3297 
3298 static void
3299 qlnxr_reset_qp_hwq_info(struct qlnxr_qp_hwq_info *qph)
3300 {
3301 	ecore_chain_reset(&qph->pbl);
3302 	qph->prod = qph->cons = 0;
3303 	qph->wqe_cons = 0;
3304 	qph->db_data.data.value = cpu_to_le16(0);
3305 
3306 	return;
3307 }
3308 
3309 static int
3310 qlnxr_update_qp_state(struct qlnxr_dev *dev,
3311 	struct qlnxr_qp *qp,
3312 	enum ecore_roce_qp_state new_state)
3313 {
3314 	int		status = 0;
3315 	uint32_t	reg_addr;
3316 	struct ecore_dev *cdev;
3317 	qlnx_host_t	*ha;
3318 
3319 	ha = dev->ha;
3320 	cdev = &ha->cdev;
3321 
3322 	QL_DPRINT12(ha, "enter qp = %p new_state = 0x%x qp->state = 0x%x\n",
3323 		qp, new_state, qp->state);
3324 
3325 	if (new_state == qp->state) {
3326 		return 0;
3327 	}
3328 
3329 	switch (qp->state) {
3330 	case ECORE_ROCE_QP_STATE_RESET:
3331 		switch (new_state) {
3332 		case ECORE_ROCE_QP_STATE_INIT:
3333 			qp->prev_wqe_size = 0;
3334 			qlnxr_reset_qp_hwq_info(&qp->sq);
3335 			if (!(qp->srq))
3336 				qlnxr_reset_qp_hwq_info(&qp->rq);
3337 			break;
3338 		default:
3339 			status = -EINVAL;
3340 			break;
3341 		};
3342 		break;
3343 	case ECORE_ROCE_QP_STATE_INIT:
3344 		/* INIT->XXX */
3345 		switch (new_state) {
3346 		case ECORE_ROCE_QP_STATE_RTR:
3347 		/* Update doorbell (in case post_recv was done before move to RTR) */
3348 			if (qp->srq)
3349 				break;
3350 			wmb();
3351 			//writel(qp->rq.db_data.raw, qp->rq.db);
3352 			//if (QLNX_IS_IWARP(dev))
3353 			//	writel(qp->rq.iwarp_db2_data.raw,
3354 			//	       qp->rq.iwarp_db2);
3355 
3356 			reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
3357 					(uint8_t *)cdev->doorbells);
3358 
3359 			bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
3360 			bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
3361 
3362 			if (QLNX_IS_IWARP(dev)) {
3363 				reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
3364 					(uint8_t *)cdev->doorbells);
3365 				bus_write_4(ha->pci_dbells, reg_addr,\
3366 					qp->rq.iwarp_db2_data.raw);
3367 				bus_barrier(ha->pci_dbells,  0, 0,\
3368 					BUS_SPACE_BARRIER_READ);
3369 			}
3370 
3371 
3372 			mmiowb();
3373 			break;
3374 		case ECORE_ROCE_QP_STATE_ERR:
3375 			/* TBD:flush qps... */
3376 			break;
3377 		default:
3378 			/* invalid state change. */
3379 			status = -EINVAL;
3380 			break;
3381 		};
3382 		break;
3383 	case ECORE_ROCE_QP_STATE_RTR:
3384 		/* RTR->XXX */
3385 		switch (new_state) {
3386 		case ECORE_ROCE_QP_STATE_RTS:
3387 			break;
3388 		case ECORE_ROCE_QP_STATE_ERR:
3389 			break;
3390 		default:
3391 			/* invalid state change. */
3392 			status = -EINVAL;
3393 			break;
3394 		};
3395 		break;
3396 	case ECORE_ROCE_QP_STATE_RTS:
3397 		/* RTS->XXX */
3398 		switch (new_state) {
3399 		case ECORE_ROCE_QP_STATE_SQD:
3400 			break;
3401 		case ECORE_ROCE_QP_STATE_ERR:
3402 			break;
3403 		default:
3404 			/* invalid state change. */
3405 			status = -EINVAL;
3406 			break;
3407 		};
3408 		break;
3409 	case ECORE_ROCE_QP_STATE_SQD:
3410 		/* SQD->XXX */
3411 		switch (new_state) {
3412 		case ECORE_ROCE_QP_STATE_RTS:
3413 		case ECORE_ROCE_QP_STATE_ERR:
3414 			break;
3415 		default:
3416 			/* invalid state change. */
3417 			status = -EINVAL;
3418 			break;
3419 		};
3420 		break;
3421 	case ECORE_ROCE_QP_STATE_ERR:
3422 		/* ERR->XXX */
3423 		switch (new_state) {
3424 		case ECORE_ROCE_QP_STATE_RESET:
3425 			if ((qp->rq.prod != qp->rq.cons) ||
3426 			    (qp->sq.prod != qp->sq.cons)) {
3427 				QL_DPRINT11(ha,
3428 					"Error->Reset with rq/sq "
3429 					"not empty rq.prod=0x%x rq.cons=0x%x"
3430 					" sq.prod=0x%x sq.cons=0x%x\n",
3431 					qp->rq.prod, qp->rq.cons,
3432 					qp->sq.prod, qp->sq.cons);
3433 				status = -EINVAL;
3434 			}
3435 			break;
3436 		default:
3437 			status = -EINVAL;
3438 			break;
3439 		};
3440 		break;
3441 	default:
3442 		status = -EINVAL;
3443 		break;
3444 	};
3445 
3446 	QL_DPRINT12(ha, "exit\n");
3447 	return status;
3448 }
3449 
3450 int
3451 qlnxr_modify_qp(struct ib_qp	*ibqp,
3452 	struct ib_qp_attr	*attr,
3453 	int			attr_mask,
3454 	struct ib_udata		*udata)
3455 {
3456 	int rc = 0;
3457 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3458 	struct qlnxr_dev *dev = get_qlnxr_dev(&qp->dev->ibdev);
3459 	struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
3460 	enum ib_qp_state old_qp_state, new_qp_state;
3461 	struct ecore_rdma_device *qattr = ecore_rdma_query_device(dev->rdma_ctx);
3462 	qlnx_host_t	*ha;
3463 
3464 	ha = dev->ha;
3465 
3466 	QL_DPRINT12(ha,
3467 		"enter qp = %p attr_mask = 0x%x, state = %d udata = %p\n",
3468 		qp, attr_mask, attr->qp_state, udata);
3469 
3470 	old_qp_state = qlnxr_get_ibqp_state(qp->state);
3471 	if (attr_mask & IB_QP_STATE)
3472 		new_qp_state = attr->qp_state;
3473 	else
3474 		new_qp_state = old_qp_state;
3475 
3476 	if (QLNX_IS_ROCE(dev)) {
3477 		if (!ib_modify_qp_is_ok(old_qp_state,
3478 					new_qp_state,
3479 					ibqp->qp_type,
3480 					attr_mask )) {
3481 			QL_DPRINT12(ha,
3482 				"invalid attribute mask=0x%x"
3483 				" specified for qpn=0x%x of type=0x%x \n"
3484 				" old_qp_state=0x%x, new_qp_state=0x%x\n",
3485 				attr_mask, qp->qp_id, ibqp->qp_type,
3486 				old_qp_state, new_qp_state);
3487 			rc = -EINVAL;
3488 			goto err;
3489 		}
3490 	}
3491 	/* translate the masks... */
3492 	if (attr_mask & IB_QP_STATE) {
3493 		SET_FIELD(qp_params.modify_flags,
3494 			  ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
3495 		qp_params.new_state = qlnxr_get_state_from_ibqp(attr->qp_state);
3496 	}
3497 
3498 	// TBD consider changing ecore to be a flag as well...
3499 	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
3500 		qp_params.sqd_async = true;
3501 
3502 	if (attr_mask & IB_QP_PKEY_INDEX) {
3503 		SET_FIELD(qp_params.modify_flags,
3504 			  ECORE_ROCE_MODIFY_QP_VALID_PKEY,
3505 			  1);
3506 		if (attr->pkey_index >= QLNXR_ROCE_PKEY_TABLE_LEN) {
3507 			rc = -EINVAL;
3508 			goto err;
3509 		}
3510 
3511 		qp_params.pkey = QLNXR_ROCE_PKEY_DEFAULT;
3512 	}
3513 
3514 	if (attr_mask & IB_QP_QKEY) {
3515 		qp->qkey = attr->qkey;
3516 	}
3517 
3518 	/* tbd consider splitting in ecore.. */
3519 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
3520 		SET_FIELD(qp_params.modify_flags,
3521 			  ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
3522 		qp_params.incoming_rdma_read_en =
3523 			attr->qp_access_flags & IB_ACCESS_REMOTE_READ;
3524 		qp_params.incoming_rdma_write_en =
3525 			attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE;
3526 		qp_params.incoming_atomic_en =
3527 			attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC;
3528 	}
3529 
3530 	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
3531 		if (attr_mask & IB_QP_PATH_MTU) {
3532 			if (attr->path_mtu < IB_MTU_256 ||
3533 			    attr->path_mtu > IB_MTU_4096) {
3534 				QL_DPRINT12(ha,
3535 					"Only MTU sizes of 256, 512, 1024,"
3536 					" 2048 and 4096 are supported "
3537 					" attr->path_mtu = [%d]\n",
3538 					attr->path_mtu);
3539 
3540 				rc = -EINVAL;
3541 				goto err;
3542 			}
3543 			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
3544 				      ib_mtu_enum_to_int(
3545 						iboe_get_mtu(dev->ha->ifp->if_mtu)));
3546 		}
3547 
3548 		if (qp->mtu == 0) {
3549 			qp->mtu = ib_mtu_enum_to_int(
3550 					iboe_get_mtu(dev->ha->ifp->if_mtu));
3551 			QL_DPRINT12(ha, "fixing zetoed MTU to qp->mtu = %d\n",
3552 				qp->mtu);
3553 		}
3554 
3555 		SET_FIELD(qp_params.modify_flags,
3556 			  ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR,
3557 			  1);
3558 
3559 		qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
3560 		qp_params.flow_label = attr->ah_attr.grh.flow_label;
3561 		qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
3562 
3563 		qp->sgid_idx = attr->ah_attr.grh.sgid_index;
3564 
3565 		get_gid_info(ibqp, attr, attr_mask, dev, qp, &qp_params);
3566 
3567 		rc = qlnxr_get_dmac(dev, &attr->ah_attr, qp_params.remote_mac_addr);
3568 		if (rc)
3569 			return rc;
3570 
3571 		qp_params.use_local_mac = true;
3572 		memcpy(qp_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
3573 
3574 		QL_DPRINT12(ha, "dgid=0x%x:0x%x:0x%x:0x%x\n",
3575 		       qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
3576 		       qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
3577 		QL_DPRINT12(ha, "sgid=0x%x:0x%x:0x%x:0x%x\n",
3578 		       qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
3579 		       qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
3580 		QL_DPRINT12(ha,
3581 			"remote_mac=[0x%x:0x%x:0x%x:0x%x:0x%x:0x%x]\n",
3582 			qp_params.remote_mac_addr[0],
3583 			qp_params.remote_mac_addr[1],
3584 			qp_params.remote_mac_addr[2],
3585 			qp_params.remote_mac_addr[3],
3586 			qp_params.remote_mac_addr[4],
3587 			qp_params.remote_mac_addr[5]);
3588 
3589 		qp_params.mtu = qp->mtu;
3590 	}
3591 
3592 	if (qp_params.mtu == 0) {
3593 		/* stay with current MTU */
3594 		if (qp->mtu) {
3595 			qp_params.mtu = qp->mtu;
3596 		} else {
3597 			qp_params.mtu = ib_mtu_enum_to_int(
3598 						iboe_get_mtu(dev->ha->ifp->if_mtu));
3599 		}
3600 	}
3601 
3602 	if (attr_mask & IB_QP_TIMEOUT) {
3603 		SET_FIELD(qp_params.modify_flags, \
3604 			ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
3605 
3606 		qp_params.ack_timeout = attr->timeout;
3607 		if (attr->timeout) {
3608 			u32 temp;
3609 
3610 			/* 12.7.34 LOCAL ACK TIMEOUT
3611 			 * Value representing the transport (ACK) timeout for
3612 			 * use by the remote, expressed as (4.096 μS*2Local ACK
3613 			 * Timeout)
3614 			 */
3615 			/* We use 1UL since the temporal value may be  overflow
3616 			 * 32 bits
3617 			 */
3618 			temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
3619 			qp_params.ack_timeout = temp; /* FW requires [msec] */
3620 		}
3621 		else
3622 			qp_params.ack_timeout = 0; /* infinite */
3623 	}
3624 	if (attr_mask & IB_QP_RETRY_CNT) {
3625 		SET_FIELD(qp_params.modify_flags,\
3626 			 ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
3627 		qp_params.retry_cnt = attr->retry_cnt;
3628 	}
3629 
3630 	if (attr_mask & IB_QP_RNR_RETRY) {
3631 		SET_FIELD(qp_params.modify_flags,
3632 			  ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT,
3633 			  1);
3634 		qp_params.rnr_retry_cnt = attr->rnr_retry;
3635 	}
3636 
3637 	if (attr_mask & IB_QP_RQ_PSN) {
3638 		SET_FIELD(qp_params.modify_flags,
3639 			  ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN,
3640 			  1);
3641 		qp_params.rq_psn = attr->rq_psn;
3642 		qp->rq_psn = attr->rq_psn;
3643 	}
3644 
3645 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
3646 		if (attr->max_rd_atomic > qattr->max_qp_req_rd_atomic_resc) {
3647 			rc = -EINVAL;
3648 			QL_DPRINT12(ha,
3649 				"unsupported  max_rd_atomic=%d, supported=%d\n",
3650 				attr->max_rd_atomic,
3651 				qattr->max_qp_req_rd_atomic_resc);
3652 			goto err;
3653 		}
3654 
3655 		SET_FIELD(qp_params.modify_flags,
3656 			  ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ,
3657 			  1);
3658 		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
3659 	}
3660 
3661 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
3662 		SET_FIELD(qp_params.modify_flags,
3663 			  ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER,
3664 			  1);
3665 		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
3666 	}
3667 
3668 	if (attr_mask & IB_QP_SQ_PSN) {
3669 		SET_FIELD(qp_params.modify_flags,
3670 			  ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN,
3671 			  1);
3672 		qp_params.sq_psn = attr->sq_psn;
3673 		qp->sq_psn = attr->sq_psn;
3674 	}
3675 
3676 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
3677 		if (attr->max_dest_rd_atomic >
3678 		    qattr->max_qp_resp_rd_atomic_resc) {
3679 			QL_DPRINT12(ha,
3680 				"unsupported max_dest_rd_atomic=%d, "
3681 				"supported=%d\n",
3682 				attr->max_dest_rd_atomic,
3683 				qattr->max_qp_resp_rd_atomic_resc);
3684 
3685 			rc = -EINVAL;
3686 			goto err;
3687 		}
3688 
3689 		SET_FIELD(qp_params.modify_flags,
3690 			  ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP,
3691 			  1);
3692 		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
3693 	}
3694 
3695  	if (attr_mask & IB_QP_DEST_QPN) {
3696 		SET_FIELD(qp_params.modify_flags,
3697 			  ECORE_ROCE_MODIFY_QP_VALID_DEST_QP,
3698 			  1);
3699 
3700 		qp_params.dest_qp = attr->dest_qp_num;
3701 		qp->dest_qp_num = attr->dest_qp_num;
3702 	}
3703 
3704 	/*
3705 	 * Update the QP state before the actual ramrod to prevent a race with
3706 	 * fast path. Modifying the QP state to error will cause the device to
3707 	 * flush the CQEs and while polling the flushed CQEs will considered as
3708 	 * a potential issue if the QP isn't in error state.
3709 	 */
3710 	if ((attr_mask & IB_QP_STATE) && (qp->qp_type != IB_QPT_GSI) &&
3711 		(!udata) && (qp_params.new_state == ECORE_ROCE_QP_STATE_ERR))
3712 		qp->state = ECORE_ROCE_QP_STATE_ERR;
3713 
3714 	if (qp->qp_type != IB_QPT_GSI)
3715 		rc = ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
3716 
3717 	if (attr_mask & IB_QP_STATE) {
3718 		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
3719 			rc = qlnxr_update_qp_state(dev, qp, qp_params.new_state);
3720 		qp->state = qp_params.new_state;
3721 	}
3722 
3723 err:
3724 	QL_DPRINT12(ha, "exit\n");
3725 	return rc;
3726 }
3727 
3728 static int
3729 qlnxr_to_ib_qp_acc_flags(struct ecore_rdma_query_qp_out_params *params)
3730 {
3731 	int ib_qp_acc_flags = 0;
3732 
3733 	if (params->incoming_rdma_write_en)
3734 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
3735 	if (params->incoming_rdma_read_en)
3736 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
3737 	if (params->incoming_atomic_en)
3738 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
3739 	if (true) /* FIXME -> local write ?? */
3740 		ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
3741 
3742 	return ib_qp_acc_flags;
3743 }
3744 
3745 static enum ib_mtu
3746 qlnxr_mtu_int_to_enum(u16 mtu)
3747 {
3748 	enum ib_mtu ib_mtu_size;
3749 
3750 	switch (mtu) {
3751 	case 256:
3752 		ib_mtu_size = IB_MTU_256;
3753 		break;
3754 
3755 	case 512:
3756 		ib_mtu_size = IB_MTU_512;
3757 		break;
3758 
3759 	case 1024:
3760 		ib_mtu_size = IB_MTU_1024;
3761 		break;
3762 
3763 	case 2048:
3764 		ib_mtu_size = IB_MTU_2048;
3765 		break;
3766 
3767 	case 4096:
3768 		ib_mtu_size = IB_MTU_4096;
3769 		break;
3770 
3771 	default:
3772 		ib_mtu_size = IB_MTU_1024;
3773 		break;
3774 	}
3775 	return (ib_mtu_size);
3776 }
3777 
3778 int
3779 qlnxr_query_qp(struct ib_qp *ibqp,
3780 	struct ib_qp_attr *qp_attr,
3781 	int attr_mask,
3782 	struct ib_qp_init_attr *qp_init_attr)
3783 {
3784 	int rc = 0;
3785 	struct ecore_rdma_query_qp_out_params params;
3786 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3787 	struct qlnxr_dev *dev = qp->dev;
3788 	qlnx_host_t	*ha;
3789 
3790 	ha = dev->ha;
3791 
3792 	QL_DPRINT12(ha, "enter\n");
3793 
3794 	memset(&params, 0, sizeof(params));
3795 
3796 	rc = ecore_rdma_query_qp(dev->rdma_ctx, qp->ecore_qp, &params);
3797 	if (rc)
3798 		goto err;
3799 
3800 	memset(qp_attr, 0, sizeof(*qp_attr));
3801 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3802 
3803 	qp_attr->qp_state = qlnxr_get_ibqp_state(params.state);
3804 	qp_attr->cur_qp_state = qlnxr_get_ibqp_state(params.state);
3805 
3806 	/* In some cases in iWARP qelr will ask for the state only */
3807 	if (QLNX_IS_IWARP(dev) && (attr_mask == IB_QP_STATE)) {
3808 		QL_DPRINT11(ha, "only state requested\n");
3809 		return 0;
3810 	}
3811 
3812 	qp_attr->path_mtu = qlnxr_mtu_int_to_enum(params.mtu);
3813 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
3814 	qp_attr->rq_psn = params.rq_psn;
3815 	qp_attr->sq_psn = params.sq_psn;
3816 	qp_attr->dest_qp_num = params.dest_qp;
3817 
3818 	qp_attr->qp_access_flags = qlnxr_to_ib_qp_acc_flags(&params);
3819 
3820 	QL_DPRINT12(ha, "qp_state = 0x%x cur_qp_state = 0x%x "
3821 		"path_mtu = %d qp_access_flags = 0x%x\n",
3822 		qp_attr->qp_state, qp_attr->cur_qp_state, qp_attr->path_mtu,
3823 		qp_attr->qp_access_flags);
3824 
3825 	qp_attr->cap.max_send_wr = qp->sq.max_wr;
3826 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
3827 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
3828 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
3829 	qp_attr->cap.max_inline_data = qp->max_inline_data;
3830 	qp_init_attr->cap = qp_attr->cap;
3831 
3832 	memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
3833 	       sizeof(qp_attr->ah_attr.grh.dgid.raw));
3834 
3835 	qp_attr->ah_attr.grh.flow_label = params.flow_label;
3836 	qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
3837 	qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
3838 	qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
3839 
3840 	qp_attr->ah_attr.ah_flags = IB_AH_GRH;
3841 	qp_attr->ah_attr.port_num = 1; /* FIXME -> check this */
3842 	qp_attr->ah_attr.sl = 0;/* FIXME -> check this */
3843 	qp_attr->timeout = params.timeout;
3844 	qp_attr->rnr_retry = params.rnr_retry;
3845 	qp_attr->retry_cnt = params.retry_cnt;
3846 	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
3847 	qp_attr->pkey_index = params.pkey_index;
3848 	qp_attr->port_num = 1; /* FIXME -> check this */
3849 	qp_attr->ah_attr.src_path_bits = 0;
3850 	qp_attr->ah_attr.static_rate = 0;
3851 	qp_attr->alt_pkey_index = 0;
3852 	qp_attr->alt_port_num = 0;
3853 	qp_attr->alt_timeout = 0;
3854 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
3855 
3856 	qp_attr->sq_draining = (params.state == ECORE_ROCE_QP_STATE_SQD) ? 1 : 0;
3857 	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
3858 	qp_attr->max_rd_atomic = params.max_rd_atomic;
3859 	qp_attr->en_sqd_async_notify = (params.sqd_async)? 1 : 0;
3860 
3861 	QL_DPRINT12(ha, "max_inline_data=%d\n",
3862 		qp_attr->cap.max_inline_data);
3863 
3864 err:
3865 	QL_DPRINT12(ha, "exit\n");
3866 	return rc;
3867 }
3868 
3869 static void
3870 qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3871 {
3872 	qlnx_host_t	*ha;
3873 
3874 	ha = dev->ha;
3875 
3876 	QL_DPRINT12(ha, "enter\n");
3877 
3878 	if (qp->usq.umem)
3879 		ib_umem_release(qp->usq.umem);
3880 
3881 	qp->usq.umem = NULL;
3882 
3883 	if (qp->urq.umem)
3884 		ib_umem_release(qp->urq.umem);
3885 
3886 	qp->urq.umem = NULL;
3887 
3888 	QL_DPRINT12(ha, "exit\n");
3889 	return;
3890 }
3891 
3892 static void
3893 qlnxr_cleanup_kernel(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
3894 {
3895 	qlnx_host_t	*ha;
3896 
3897 	ha = dev->ha;
3898 
3899 	QL_DPRINT12(ha, "enter\n");
3900 
3901 	if (qlnxr_qp_has_sq(qp)) {
3902 		QL_DPRINT12(ha, "freeing SQ\n");
3903 		ha->qlnxr_debug = 1;
3904 //		ecore_chain_free(dev->cdev, &qp->sq.pbl);
3905 		ha->qlnxr_debug = 0;
3906 		kfree(qp->wqe_wr_id);
3907 	}
3908 
3909 	if (qlnxr_qp_has_rq(qp)) {
3910 		QL_DPRINT12(ha, "freeing RQ\n");
3911 		ha->qlnxr_debug = 1;
3912 	//	ecore_chain_free(dev->cdev, &qp->rq.pbl);
3913 		ha->qlnxr_debug = 0;
3914 		kfree(qp->rqe_wr_id);
3915 	}
3916 
3917 	QL_DPRINT12(ha, "exit\n");
3918 	return;
3919 }
3920 
3921 static int
3922 qlnxr_free_qp_resources(struct qlnxr_dev *dev,
3923     struct qlnxr_qp *qp, struct ib_udata *udata)
3924 {
3925 	int		rc = 0;
3926 	qlnx_host_t	*ha;
3927 	struct ecore_rdma_destroy_qp_out_params d_out_params;
3928 
3929 	ha = dev->ha;
3930 
3931 	QL_DPRINT12(ha, "enter\n");
3932 
3933 #if 0
3934 	if (qp->qp_type != IB_QPT_GSI) {
3935 		rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
3936 				&d_out_params);
3937 		if (rc)
3938 			return rc;
3939 	}
3940 
3941 	if (udata)
3942 		qlnxr_cleanup_user(dev, qp);
3943 	else
3944 		qlnxr_cleanup_kernel(dev, qp);
3945 #endif
3946 
3947 	if (udata)
3948 		qlnxr_cleanup_user(dev, qp);
3949 	else
3950 		qlnxr_cleanup_kernel(dev, qp);
3951 
3952 	if (qp->qp_type != IB_QPT_GSI) {
3953 		rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
3954 				&d_out_params);
3955 		if (rc)
3956 			return rc;
3957 	}
3958 
3959 	QL_DPRINT12(ha, "exit\n");
3960 	return 0;
3961 }
3962 
3963 int
3964 qlnxr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3965 {
3966 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
3967 	struct qlnxr_dev *dev = qp->dev;
3968 	int rc = 0;
3969 	struct ib_qp_attr attr;
3970 	int attr_mask = 0;
3971 	qlnx_host_t	*ha;
3972 
3973 	ha = dev->ha;
3974 
3975 	QL_DPRINT12(ha, "enter qp = %p, qp_type=%d\n", qp, qp->qp_type);
3976 
3977 	qp->destroyed = 1;
3978 
3979 	if (QLNX_IS_ROCE(dev) && (qp->state != (ECORE_ROCE_QP_STATE_RESET |
3980 				  ECORE_ROCE_QP_STATE_ERR |
3981 				  ECORE_ROCE_QP_STATE_INIT))) {
3982 		attr.qp_state = IB_QPS_ERR;
3983 		attr_mask |= IB_QP_STATE;
3984 
3985 		/* change the QP state to ERROR */
3986 		qlnxr_modify_qp(ibqp, &attr, attr_mask, NULL);
3987 	}
3988 
3989 	if (qp->qp_type == IB_QPT_GSI)
3990 		qlnxr_destroy_gsi_qp(dev);
3991 
3992 	qp->sig = ~qp->sig;
3993 
3994 	qlnxr_free_qp_resources(dev, qp, udata);
3995 
3996 	if (atomic_dec_and_test(&qp->refcnt)) {
3997 		/* TODO: only for iWARP? */
3998 		qlnxr_idr_remove(dev, qp->qp_id);
3999 	}
4000 
4001 	QL_DPRINT12(ha, "exit\n");
4002 	return rc;
4003 }
4004 
4005 static inline int
4006 qlnxr_wq_is_full(struct qlnxr_qp_hwq_info *wq)
4007 {
4008 	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
4009 }
4010 
4011 static int
4012 sge_data_len(struct ib_sge *sg_list, int num_sge)
4013 {
4014 	int i, len = 0;
4015 	for (i = 0; i < num_sge; i++)
4016 		len += sg_list[i].length;
4017 	return len;
4018 }
4019 
4020 static void
4021 swap_wqe_data64(u64 *p)
4022 {
4023 	int i;
4024 
4025 	for (i = 0; i < QLNXR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
4026 		*p = cpu_to_be64(cpu_to_le64(*p));
4027 }
4028 
4029 static u32
4030 qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
4031 	struct qlnxr_qp		*qp,
4032 	u8			*wqe_size,
4033 	const struct ib_send_wr	*wr,
4034 	const struct ib_send_wr	**bad_wr,
4035 	u8			*bits,
4036 	u8			bit)
4037 {
4038 	int i, seg_siz;
4039 	char *seg_prt, *wqe;
4040 	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
4041 	qlnx_host_t	*ha;
4042 
4043 	ha = dev->ha;
4044 
4045 	QL_DPRINT12(ha, "enter[%d]\n", data_size);
4046 
4047 	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
4048 		QL_DPRINT12(ha,
4049 			"Too much inline data in WR:[%d, %d]\n",
4050 			data_size, ROCE_REQ_MAX_INLINE_DATA_SIZE);
4051 		*bad_wr = wr;
4052 		return 0;
4053 	}
4054 
4055 	if (!data_size)
4056 		return data_size;
4057 
4058 	/* set the bit */
4059 	*bits |= bit;
4060 
4061 	seg_prt = wqe = NULL;
4062 	seg_siz = 0;
4063 
4064 	/* copy data inline */
4065 	for (i = 0; i < wr->num_sge; i++) {
4066 		u32 len = wr->sg_list[i].length;
4067 		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
4068 
4069 		while (len > 0) {
4070 			u32 cur;
4071 
4072 			/* new segment required */
4073 			if (!seg_siz) {
4074 				wqe = (char *)ecore_chain_produce(&qp->sq.pbl);
4075 				seg_prt = wqe;
4076 				seg_siz = sizeof(struct rdma_sq_common_wqe);
4077 				(*wqe_size)++;
4078 			}
4079 
4080 			/* calculate currently allowed length */
4081 			cur = MIN(len, seg_siz);
4082 
4083 			memcpy(seg_prt, src, cur);
4084 
4085 			/* update segment variables */
4086 			seg_prt += cur;
4087 			seg_siz -= cur;
4088 			/* update sge variables */
4089 			src += cur;
4090 			len -= cur;
4091 
4092 			/* swap fully-completed segments */
4093 			if (!seg_siz)
4094 				swap_wqe_data64((u64 *)wqe);
4095 		}
4096 	}
4097 
4098 	/* swap last not completed segment */
4099 	if (seg_siz)
4100 		swap_wqe_data64((u64 *)wqe);
4101 
4102 	QL_DPRINT12(ha, "exit\n");
4103 	return data_size;
4104 }
4105 
4106 static u32
4107 qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
4108 	u8 *wqe_size, const struct ib_send_wr *wr)
4109 {
4110 	int i;
4111 	u32 data_size = 0;
4112 	qlnx_host_t	*ha;
4113 
4114 	ha = dev->ha;
4115 
4116 	QL_DPRINT12(ha, "enter wr->num_sge = %d \n", wr->num_sge);
4117 
4118 	for (i = 0; i < wr->num_sge; i++) {
4119 		struct rdma_sq_sge *sge = ecore_chain_produce(&qp->sq.pbl);
4120 
4121 		TYPEPTR_ADDR_SET(sge, addr, wr->sg_list[i].addr);
4122 		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
4123 		sge->length = cpu_to_le32(wr->sg_list[i].length);
4124 		data_size += wr->sg_list[i].length;
4125 	}
4126 
4127 	if (wqe_size)
4128 		*wqe_size += wr->num_sge;
4129 
4130 	QL_DPRINT12(ha, "exit data_size = %d\n", data_size);
4131 	return data_size;
4132 }
4133 
4134 static u32
4135 qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
4136 	struct qlnxr_qp *qp,
4137 	struct rdma_sq_rdma_wqe_1st *rwqe,
4138 	struct rdma_sq_rdma_wqe_2nd *rwqe2,
4139 	const struct ib_send_wr *wr,
4140 	const struct ib_send_wr **bad_wr)
4141 {
4142 	qlnx_host_t	*ha;
4143 	u32             ret = 0;
4144 
4145 	ha = dev->ha;
4146 
4147 	QL_DPRINT12(ha, "enter\n");
4148 
4149 	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
4150 	TYPEPTR_ADDR_SET(rwqe2, remote_va, rdma_wr(wr)->remote_addr);
4151 
4152 	if (wr->send_flags & IB_SEND_INLINE) {
4153 		u8 flags = 0;
4154 		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
4155 		return qlnxr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size,
4156 				wr, bad_wr, &rwqe->flags, flags);
4157 	}
4158 
4159 	ret = qlnxr_prepare_sq_sges(dev, qp, &rwqe->wqe_size, wr);
4160 
4161 	QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4162 
4163 	return (ret);
4164 }
4165 
4166 static u32
4167 qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
4168 	struct qlnxr_qp *qp,
4169 	struct rdma_sq_send_wqe *swqe,
4170 	struct rdma_sq_send_wqe *swqe2,
4171 	const struct ib_send_wr *wr,
4172 	const struct ib_send_wr **bad_wr)
4173 {
4174 	qlnx_host_t	*ha;
4175 	u32             ret = 0;
4176 
4177 	ha = dev->ha;
4178 
4179 	QL_DPRINT12(ha, "enter\n");
4180 
4181 	memset(swqe2, 0, sizeof(*swqe2));
4182 
4183 	if (wr->send_flags & IB_SEND_INLINE) {
4184 		u8 flags = 0;
4185 		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
4186 		return qlnxr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size,
4187 				wr, bad_wr, &swqe->flags, flags);
4188 	}
4189 
4190 	ret = qlnxr_prepare_sq_sges(dev, qp, &swqe->wqe_size, wr);
4191 
4192 	QL_DPRINT12(ha, "exit ret = 0x%x\n", ret);
4193 
4194 	return (ret);
4195 }
4196 
4197 static void
4198 qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
4199 {
4200 	qlnx_host_t	*ha;
4201 
4202 	ha = dev->ha;
4203 
4204 	int work = info->completed - info->completed_handled - 1;
4205 
4206 	QL_DPRINT12(ha, "enter [%d]\n", work);
4207 
4208 	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
4209 		struct qlnxr_pbl *pbl;
4210 
4211 		/* Free all the page list that are possible to be freed
4212 		 * (all the ones that were invalidated), under the assumption
4213 		 * that if an FMR was completed successfully that means that
4214 		 * if there was an invalidate operation before it also ended
4215 		 */
4216 		pbl = list_first_entry(&info->inuse_pbl_list,
4217 				       struct qlnxr_pbl,
4218 				       list_entry);
4219 		list_del(&pbl->list_entry);
4220 		list_add_tail(&pbl->list_entry, &info->free_pbl_list);
4221 		info->completed_handled++;
4222 	}
4223 
4224 	QL_DPRINT12(ha, "exit\n");
4225 	return;
4226 }
4227 
4228 #if __FreeBSD_version >= 1102000
4229 
4230 static int qlnxr_prepare_reg(struct qlnxr_qp *qp,
4231 		struct rdma_sq_fmr_wqe_1st *fwqe1,
4232 		const struct ib_reg_wr *wr)
4233 {
4234 	struct qlnxr_mr *mr = get_qlnxr_mr(wr->mr);
4235 	struct rdma_sq_fmr_wqe_2nd *fwqe2;
4236 
4237 	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)ecore_chain_produce(&qp->sq.pbl);
4238 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
4239 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
4240 	fwqe1->l_key = wr->key;
4241 
4242 	fwqe2->access_ctrl = 0;
4243 
4244 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
4245 		!!(wr->access & IB_ACCESS_REMOTE_READ));
4246 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
4247 		!!(wr->access & IB_ACCESS_REMOTE_WRITE));
4248 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
4249 		!!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
4250 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
4251 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
4252 		!!(wr->access & IB_ACCESS_LOCAL_WRITE));
4253 	fwqe2->fmr_ctrl = 0;
4254 
4255 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4256 		ilog2(mr->ibmr.page_size) - 12);
4257 
4258 	fwqe2->length_hi = 0; /* TODO - figure out why length is only 32bit.. */
4259 	fwqe2->length_lo = mr->ibmr.length;
4260 	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
4261 	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
4262 
4263 	qp->wqe_wr_id[qp->sq.prod].mr = mr;
4264 
4265 	return 0;
4266 }
4267 
4268 #else
4269 
4270 static void
4271 build_frmr_pbes(struct qlnxr_dev *dev, const struct ib_send_wr *wr,
4272 	struct mr_info *info)
4273 {
4274 	int i;
4275 	u64 buf_addr = 0;
4276 	int num_pbes, total_num_pbes = 0;
4277 	struct regpair *pbe;
4278 	struct qlnxr_pbl *pbl_tbl = info->pbl_table;
4279 	struct qlnxr_pbl_info *pbl_info = &info->pbl_info;
4280 	qlnx_host_t	*ha;
4281 
4282 	ha = dev->ha;
4283 
4284 	QL_DPRINT12(ha, "enter\n");
4285 
4286 	pbe = (struct regpair *)pbl_tbl->va;
4287 	num_pbes = 0;
4288 
4289 	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
4290 		buf_addr = wr->wr.fast_reg.page_list->page_list[i];
4291 		pbe->lo = cpu_to_le32((u32)buf_addr);
4292 		pbe->hi = cpu_to_le32((u32)upper_32_bits(buf_addr));
4293 
4294 		num_pbes += 1;
4295 		pbe++;
4296 		total_num_pbes++;
4297 
4298 		if (total_num_pbes == pbl_info->num_pbes)
4299 			return;
4300 
4301 		/* if the given pbl is full storing the pbes,
4302 		 * move to next pbl.
4303 		 */
4304 		if (num_pbes ==
4305 		    (pbl_info->pbl_size / sizeof(u64))) {
4306 			pbl_tbl++;
4307 			pbe = (struct regpair *)pbl_tbl->va;
4308 			num_pbes = 0;
4309 		}
4310 	}
4311 	QL_DPRINT12(ha, "exit\n");
4312 
4313 	return;
4314 }
4315 
4316 static int
4317 qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
4318 {
4319 	int rc = 0;
4320 	qlnx_host_t	*ha;
4321 
4322 	ha = dev->ha;
4323 
4324 	QL_DPRINT12(ha, "enter\n");
4325 
4326 	if (info->completed == 0) {
4327 		//DP_VERBOSE(dev, QLNXR_MSG_MR, "First FMR\n");
4328 		/* first fmr */
4329 		return 0;
4330 	}
4331 
4332 	qlnx_handle_completed_mrs(dev, info);
4333 
4334 	list_add_tail(&info->pbl_table->list_entry, &info->inuse_pbl_list);
4335 
4336 	if (list_empty(&info->free_pbl_list)) {
4337 		info->pbl_table = qlnxr_alloc_pbl_tbl(dev, &info->pbl_info,
4338 							  GFP_ATOMIC);
4339 	} else {
4340 		info->pbl_table = list_first_entry(&info->free_pbl_list,
4341 					struct qlnxr_pbl,
4342 					list_entry);
4343 		list_del(&info->pbl_table->list_entry);
4344 	}
4345 
4346 	if (!info->pbl_table)
4347 		rc = -ENOMEM;
4348 
4349 	QL_DPRINT12(ha, "exit\n");
4350 	return rc;
4351 }
4352 
4353 static inline int
4354 qlnxr_prepare_fmr(struct qlnxr_qp *qp,
4355 	struct rdma_sq_fmr_wqe_1st *fwqe1,
4356 	const struct ib_send_wr *wr)
4357 {
4358 	struct qlnxr_dev *dev = qp->dev;
4359 	u64 fbo;
4360 	struct qlnxr_fast_reg_page_list *frmr_list =
4361 		get_qlnxr_frmr_list(wr->wr.fast_reg.page_list);
4362 	struct rdma_sq_fmr_wqe *fwqe2 =
4363 		(struct rdma_sq_fmr_wqe *)ecore_chain_produce(&qp->sq.pbl);
4364 	int rc = 0;
4365 	qlnx_host_t	*ha;
4366 
4367 	ha = dev->ha;
4368 
4369 	QL_DPRINT12(ha, "enter\n");
4370 
4371 	if (wr->wr.fast_reg.page_list_len == 0)
4372 		BUG();
4373 
4374 	rc = qlnxr_prepare_safe_pbl(dev, &frmr_list->info);
4375 	if (rc)
4376 		return rc;
4377 
4378 	fwqe1->addr.hi = upper_32_bits(wr->wr.fast_reg.iova_start);
4379 	fwqe1->addr.lo = lower_32_bits(wr->wr.fast_reg.iova_start);
4380 	fwqe1->l_key = wr->wr.fast_reg.rkey;
4381 
4382 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_READ,
4383 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ));
4384 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_REMOTE_WRITE,
4385 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE));
4386 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_ENABLE_ATOMIC,
4387 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_ATOMIC));
4388 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_READ, 1);
4389 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_LOCAL_WRITE,
4390 		   !!(wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE));
4391 
4392 	fwqe2->fmr_ctrl = 0;
4393 
4394 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
4395 		   ilog2(1 << wr->wr.fast_reg.page_shift) - 12);
4396 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_ZERO_BASED, 0);
4397 
4398 	fwqe2->length_hi = 0; /* Todo - figure this out... why length is only 32bit.. */
4399 	fwqe2->length_lo = wr->wr.fast_reg.length;
4400 	fwqe2->pbl_addr.hi = upper_32_bits(frmr_list->info.pbl_table->pa);
4401 	fwqe2->pbl_addr.lo = lower_32_bits(frmr_list->info.pbl_table->pa);
4402 
4403 	/* produce another wqe for fwqe3 */
4404 	ecore_chain_produce(&qp->sq.pbl);
4405 
4406 	fbo = wr->wr.fast_reg.iova_start -
4407 	    (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
4408 
4409 	QL_DPRINT12(ha, "wr.fast_reg.iova_start = %p rkey=%x addr=%x:%x"
4410 		" length = %x pbl_addr %x:%x\n",
4411 		wr->wr.fast_reg.iova_start, wr->wr.fast_reg.rkey,
4412 		fwqe1->addr.hi, fwqe1->addr.lo, fwqe2->length_lo,
4413 		fwqe2->pbl_addr.hi, fwqe2->pbl_addr.lo);
4414 
4415 	build_frmr_pbes(dev, wr, &frmr_list->info);
4416 
4417 	qp->wqe_wr_id[qp->sq.prod].frmr = frmr_list;
4418 
4419 	QL_DPRINT12(ha, "exit\n");
4420 	return 0;
4421 }
4422 
4423 #endif /* #if __FreeBSD_version >= 1102000 */
4424 
4425 static enum ib_wc_opcode
4426 qlnxr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
4427 {
4428 	switch (opcode) {
4429 	case IB_WR_RDMA_WRITE:
4430 	case IB_WR_RDMA_WRITE_WITH_IMM:
4431 		return IB_WC_RDMA_WRITE;
4432 	case IB_WR_SEND_WITH_IMM:
4433 	case IB_WR_SEND:
4434 	case IB_WR_SEND_WITH_INV:
4435 		return IB_WC_SEND;
4436 	case IB_WR_RDMA_READ:
4437 		return IB_WC_RDMA_READ;
4438 	case IB_WR_ATOMIC_CMP_AND_SWP:
4439 		return IB_WC_COMP_SWAP;
4440 	case IB_WR_ATOMIC_FETCH_AND_ADD:
4441 		return IB_WC_FETCH_ADD;
4442 
4443 #if __FreeBSD_version >= 1102000
4444 	case IB_WR_REG_MR:
4445 		return IB_WC_REG_MR;
4446 #else
4447 	case IB_WR_FAST_REG_MR:
4448 		return IB_WC_FAST_REG_MR;
4449 #endif /* #if __FreeBSD_version >= 1102000 */
4450 
4451 	case IB_WR_LOCAL_INV:
4452 		return IB_WC_LOCAL_INV;
4453 	default:
4454 		return IB_WC_SEND;
4455 	}
4456 }
4457 static inline bool
4458 qlnxr_can_post_send(struct qlnxr_qp *qp, const struct ib_send_wr *wr)
4459 {
4460 	int wq_is_full, err_wr, pbl_is_full;
4461 	struct qlnxr_dev *dev = qp->dev;
4462 	qlnx_host_t	*ha;
4463 
4464 	ha = dev->ha;
4465 
4466 	QL_DPRINT12(ha, "enter[qp, wr] = [%p,%p]\n", qp, wr);
4467 
4468 	/* prevent SQ overflow and/or processing of a bad WR */
4469 	err_wr = wr->num_sge > qp->sq.max_sges;
4470 	wq_is_full = qlnxr_wq_is_full(&qp->sq);
4471 	pbl_is_full = ecore_chain_get_elem_left_u32(&qp->sq.pbl) <
4472 		      QLNXR_MAX_SQE_ELEMENTS_PER_SQE;
4473 	if (wq_is_full || err_wr || pbl_is_full) {
4474 		if (wq_is_full &&
4475 		    !(qp->err_bitmap & QLNXR_QP_ERR_SQ_FULL)) {
4476 			qp->err_bitmap |= QLNXR_QP_ERR_SQ_FULL;
4477 
4478 			QL_DPRINT12(ha,
4479 				"error: WQ is full. Post send on QP failed"
4480 				" (this error appears only once) "
4481 				"[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4482 				qp, wr, qp->err_bitmap);
4483 		}
4484 
4485 		if (err_wr &&
4486 		    !(qp->err_bitmap & QLNXR_QP_ERR_BAD_SR)) {
4487 			qp->err_bitmap |= QLNXR_QP_ERR_BAD_SR;
4488 
4489 			QL_DPRINT12(ha,
4490 				"error: WQ is bad. Post send on QP failed"
4491 				" (this error appears only once) "
4492 				"[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4493 				qp, wr, qp->err_bitmap);
4494 		}
4495 
4496 		if (pbl_is_full &&
4497 		    !(qp->err_bitmap & QLNXR_QP_ERR_SQ_PBL_FULL)) {
4498 			qp->err_bitmap |= QLNXR_QP_ERR_SQ_PBL_FULL;
4499 
4500 			QL_DPRINT12(ha,
4501 				"error: WQ PBL is full. Post send on QP failed"
4502 				" (this error appears only once) "
4503 				"[qp, wr, qp->err_bitmap]=[%p, %p, 0x%x]\n",
4504 				qp, wr, qp->err_bitmap);
4505 		}
4506 		return false;
4507 	}
4508 	QL_DPRINT12(ha, "exit[qp, wr] = [%p,%p]\n", qp, wr);
4509 	return true;
4510 }
4511 
4512 int
4513 qlnxr_post_send(struct ib_qp *ibqp,
4514 	const struct ib_send_wr *wr,
4515 	const struct ib_send_wr **bad_wr)
4516 {
4517 	struct qlnxr_dev	*dev = get_qlnxr_dev(ibqp->device);
4518 	struct qlnxr_qp		*qp = get_qlnxr_qp(ibqp);
4519 	unsigned long 		flags;
4520 	int 			status = 0, rc = 0;
4521 	bool			comp;
4522 	qlnx_host_t		*ha;
4523 	uint32_t		reg_addr;
4524 
4525 	*bad_wr = NULL;
4526 	ha = dev->ha;
4527 
4528 	QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4529 		ibqp, wr, bad_wr);
4530 
4531 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4532 		return -EINVAL;
4533 
4534 	if (qp->qp_type == IB_QPT_GSI)
4535 		return qlnxr_gsi_post_send(ibqp, wr, bad_wr);
4536 
4537 	spin_lock_irqsave(&qp->q_lock, flags);
4538 
4539 	if (QLNX_IS_ROCE(dev) && (qp->state != ECORE_ROCE_QP_STATE_RTS) &&
4540 	    (qp->state != ECORE_ROCE_QP_STATE_ERR) &&
4541 	    (qp->state != ECORE_ROCE_QP_STATE_SQD)) {
4542 		spin_unlock_irqrestore(&qp->q_lock, flags);
4543 		*bad_wr = wr;
4544 		QL_DPRINT11(ha, "QP in wrong state! QP icid=0x%x state %d\n",
4545 			qp->icid, qp->state);
4546 		return -EINVAL;
4547 	}
4548 
4549 	if (!wr) {
4550 		QL_DPRINT11(ha, "Got an empty post send???\n");
4551 	}
4552 
4553 	while (wr) {
4554 		struct rdma_sq_common_wqe	*wqe;
4555 		struct rdma_sq_send_wqe		*swqe;
4556 		struct rdma_sq_send_wqe		*swqe2;
4557 		struct rdma_sq_rdma_wqe_1st	*rwqe;
4558 		struct rdma_sq_rdma_wqe_2nd	*rwqe2;
4559 		struct rdma_sq_local_inv_wqe	*iwqe;
4560 		struct rdma_sq_atomic_wqe	*awqe1;
4561 		struct rdma_sq_atomic_wqe	*awqe2;
4562 		struct rdma_sq_atomic_wqe	*awqe3;
4563 		struct rdma_sq_fmr_wqe_1st	*fwqe1;
4564 
4565 		if (!qlnxr_can_post_send(qp, wr)) {
4566 			status = -ENOMEM;
4567 			*bad_wr = wr;
4568 			break;
4569 		}
4570 
4571 		wqe = ecore_chain_produce(&qp->sq.pbl);
4572 
4573 		qp->wqe_wr_id[qp->sq.prod].signaled =
4574 			!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
4575 
4576 		/* common fields */
4577 		wqe->flags = 0;
4578 		wqe->flags |= (RDMA_SQ_SEND_WQE_COMP_FLG_MASK <<
4579 				RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT);
4580 
4581 		SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG, \
4582 			!!(wr->send_flags & IB_SEND_SOLICITED));
4583 
4584 		comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) ||
4585 				(qp->signaled);
4586 
4587 		SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
4588 		SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,  \
4589 			!!(wr->send_flags & IB_SEND_FENCE));
4590 
4591 		wqe->prev_wqe_size = qp->prev_wqe_size;
4592 
4593 		qp->wqe_wr_id[qp->sq.prod].opcode = qlnxr_ib_to_wc_opcode(wr->opcode);
4594 
4595 		switch (wr->opcode) {
4596 		case IB_WR_SEND_WITH_IMM:
4597 
4598 			wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
4599 			swqe = (struct rdma_sq_send_wqe *)wqe;
4600 			swqe->wqe_size = 2;
4601 			swqe2 = (struct rdma_sq_send_wqe *)
4602 					ecore_chain_produce(&qp->sq.pbl);
4603 			swqe->inv_key_or_imm_data =
4604 				cpu_to_le32(wr->ex.imm_data);
4605 			swqe->length = cpu_to_le32(
4606 						qlnxr_prepare_sq_send_data(dev,
4607 							qp, swqe, swqe2, wr,
4608 							bad_wr));
4609 
4610 			qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4611 			qp->prev_wqe_size = swqe->wqe_size;
4612 			qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4613 
4614 			QL_DPRINT12(ha, "SEND w/ IMM length = %d imm data=%x\n",
4615 				swqe->length, wr->ex.imm_data);
4616 
4617 			break;
4618 
4619 		case IB_WR_SEND:
4620 
4621 			wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
4622 			swqe = (struct rdma_sq_send_wqe *)wqe;
4623 
4624 			swqe->wqe_size = 2;
4625 			swqe2 = (struct rdma_sq_send_wqe *)
4626 					ecore_chain_produce(&qp->sq.pbl);
4627 			swqe->length = cpu_to_le32(
4628 						qlnxr_prepare_sq_send_data(dev,
4629 							qp, swqe, swqe2, wr,
4630 							bad_wr));
4631 			qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4632 			qp->prev_wqe_size = swqe->wqe_size;
4633 			qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4634 
4635 			QL_DPRINT12(ha, "SEND w/o IMM length = %d\n",
4636 				swqe->length);
4637 
4638 			break;
4639 
4640 		case IB_WR_SEND_WITH_INV:
4641 
4642 			wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
4643 			swqe = (struct rdma_sq_send_wqe *)wqe;
4644 			swqe2 = (struct rdma_sq_send_wqe *)
4645 					ecore_chain_produce(&qp->sq.pbl);
4646 			swqe->wqe_size = 2;
4647 			swqe->inv_key_or_imm_data =
4648 				cpu_to_le32(wr->ex.invalidate_rkey);
4649 			swqe->length = cpu_to_le32(qlnxr_prepare_sq_send_data(dev,
4650 						qp, swqe, swqe2, wr, bad_wr));
4651 			qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
4652 			qp->prev_wqe_size = swqe->wqe_size;
4653 			qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
4654 
4655 			QL_DPRINT12(ha, "SEND w INVALIDATE length = %d\n",
4656 				swqe->length);
4657 			break;
4658 
4659 		case IB_WR_RDMA_WRITE_WITH_IMM:
4660 
4661 			wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
4662 			rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4663 
4664 			rwqe->wqe_size = 2;
4665 			rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
4666 			rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4667 					ecore_chain_produce(&qp->sq.pbl);
4668 			rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4669 						qp, rwqe, rwqe2, wr, bad_wr));
4670 			qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4671 			qp->prev_wqe_size = rwqe->wqe_size;
4672 			qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4673 
4674 			QL_DPRINT12(ha,
4675 				"RDMA WRITE w/ IMM length = %d imm data=%x\n",
4676 				rwqe->length, rwqe->imm_data);
4677 
4678 			break;
4679 
4680 		case IB_WR_RDMA_WRITE:
4681 
4682 			wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
4683 			rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4684 
4685 			rwqe->wqe_size = 2;
4686 			rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4687 					ecore_chain_produce(&qp->sq.pbl);
4688 			rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4689 						qp, rwqe, rwqe2, wr, bad_wr));
4690 			qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4691 			qp->prev_wqe_size = rwqe->wqe_size;
4692 			qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4693 
4694 			QL_DPRINT12(ha,
4695 				"RDMA WRITE w/o IMM length = %d\n",
4696 				rwqe->length);
4697 
4698 			break;
4699 
4700 		case IB_WR_RDMA_READ_WITH_INV:
4701 
4702 			QL_DPRINT12(ha,
4703 				"RDMA READ WITH INVALIDATE not supported\n");
4704 
4705 			*bad_wr = wr;
4706 			rc = -EINVAL;
4707 
4708 			break;
4709 
4710 		case IB_WR_RDMA_READ:
4711 
4712 			wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
4713 			rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
4714 
4715 			rwqe->wqe_size = 2;
4716 			rwqe2 = (struct rdma_sq_rdma_wqe_2nd *)
4717 					ecore_chain_produce(&qp->sq.pbl);
4718 			rwqe->length = cpu_to_le32(qlnxr_prepare_sq_rdma_data(dev,
4719 						qp, rwqe, rwqe2, wr, bad_wr));
4720 
4721 			qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
4722 			qp->prev_wqe_size = rwqe->wqe_size;
4723 			qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
4724 
4725 			QL_DPRINT12(ha, "RDMA READ length = %d\n",
4726 				rwqe->length);
4727 
4728 			break;
4729 
4730 		case IB_WR_ATOMIC_CMP_AND_SWP:
4731 		case IB_WR_ATOMIC_FETCH_AND_ADD:
4732 
4733 			QL_DPRINT12(ha,
4734 				"ATOMIC operation = %s\n",
4735 				((wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) ?
4736 					"IB_WR_ATOMIC_CMP_AND_SWP" :
4737 					"IB_WR_ATOMIC_FETCH_AND_ADD"));
4738 
4739 			awqe1 = (struct rdma_sq_atomic_wqe *)wqe;
4740 			awqe1->prev_wqe_size = 4;
4741 
4742 			awqe2 = (struct rdma_sq_atomic_wqe *)
4743 					ecore_chain_produce(&qp->sq.pbl);
4744 
4745 			TYPEPTR_ADDR_SET(awqe2, remote_va, \
4746 				atomic_wr(wr)->remote_addr);
4747 
4748 			awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
4749 
4750 			awqe3 = (struct rdma_sq_atomic_wqe *)
4751 					ecore_chain_produce(&qp->sq.pbl);
4752 
4753 			if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
4754 				wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
4755 				TYPEPTR_ADDR_SET(awqe3, swap_data,
4756 						 atomic_wr(wr)->compare_add);
4757 			} else {
4758 				wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
4759 				TYPEPTR_ADDR_SET(awqe3, swap_data,
4760 						 atomic_wr(wr)->swap);
4761 				TYPEPTR_ADDR_SET(awqe3, cmp_data,
4762 						 atomic_wr(wr)->compare_add);
4763 			}
4764 
4765 			qlnxr_prepare_sq_sges(dev, qp, NULL, wr);
4766 
4767 			qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->prev_wqe_size;
4768 			qp->prev_wqe_size = awqe1->prev_wqe_size;
4769 
4770 			break;
4771 
4772 		case IB_WR_LOCAL_INV:
4773 
4774 			QL_DPRINT12(ha,
4775 				"INVALIDATE length (IB_WR_LOCAL_INV)\n");
4776 
4777 			iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
4778 			iwqe->prev_wqe_size = 1;
4779 
4780 			iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
4781 			iwqe->inv_l_key = wr->ex.invalidate_rkey;
4782 			qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->prev_wqe_size;
4783 			qp->prev_wqe_size = iwqe->prev_wqe_size;
4784 
4785 			break;
4786 
4787 #if __FreeBSD_version >= 1102000
4788 
4789 		case IB_WR_REG_MR:
4790 
4791 			QL_DPRINT12(ha, "IB_WR_REG_MR\n");
4792 
4793 			wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4794 			fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4795 			fwqe1->wqe_size = 2;
4796 
4797 			rc = qlnxr_prepare_reg(qp, fwqe1, reg_wr(wr));
4798 			if (rc) {
4799 				QL_DPRINT11(ha, "IB_WR_REG_MR failed rc=%d\n", rc);
4800 				*bad_wr = wr;
4801 				break;
4802 			}
4803 
4804 			qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
4805 			qp->prev_wqe_size = fwqe1->wqe_size;
4806 
4807 			break;
4808 #else
4809 		case IB_WR_FAST_REG_MR:
4810 
4811 			QL_DPRINT12(ha, "FAST_MR (IB_WR_FAST_REG_MR)\n");
4812 
4813 			wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
4814 			fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
4815 			fwqe1->prev_wqe_size = 3;
4816 
4817 			rc = qlnxr_prepare_fmr(qp, fwqe1, wr);
4818 
4819 			if (rc) {
4820 				QL_DPRINT12(ha,
4821 					"FAST_MR (IB_WR_FAST_REG_MR) failed"
4822 					" rc = %d\n", rc);
4823 				*bad_wr = wr;
4824 				break;
4825 			}
4826 
4827 			qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->prev_wqe_size;
4828 			qp->prev_wqe_size = fwqe1->prev_wqe_size;
4829 
4830 			break;
4831 #endif /* #if __FreeBSD_version >= 1102000 */
4832 
4833 		default:
4834 
4835 			QL_DPRINT12(ha, "Invalid Opcode 0x%x!\n", wr->opcode);
4836 
4837 			rc = -EINVAL;
4838 			*bad_wr = wr;
4839 			break;
4840 		}
4841 
4842 		if (*bad_wr) {
4843 			/*
4844 			 * restore prod to its position before this WR was processed
4845 			 */
4846 			ecore_chain_set_prod(&qp->sq.pbl,
4847 			     le16_to_cpu(qp->sq.db_data.data.value),
4848 			     wqe);
4849 			/* restore prev_wqe_size */
4850 			qp->prev_wqe_size = wqe->prev_wqe_size;
4851 			status = rc;
4852 
4853 			QL_DPRINT12(ha, "failed *bad_wr = %p\n", *bad_wr);
4854 			break; /* out of the loop */
4855 		}
4856 
4857 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
4858 
4859 		qlnxr_inc_sw_prod(&qp->sq);
4860 
4861 		qp->sq.db_data.data.value++;
4862 
4863 		wr = wr->next;
4864 	}
4865 
4866 	/* Trigger doorbell
4867 	 * If there was a failure in the first WR then it will be triggered in
4868 	 * vane. However this is not harmful (as long as the producer value is
4869 	 * unchanged). For performance reasons we avoid checking for this
4870 	 * redundant doorbell.
4871 	 */
4872 	wmb();
4873 	//writel(qp->sq.db_data.raw, qp->sq.db);
4874 
4875 	reg_addr = (uint32_t)((uint8_t *)qp->sq.db - (uint8_t *)ha->cdev.doorbells);
4876         bus_write_4(ha->pci_dbells, reg_addr, qp->sq.db_data.raw);
4877         bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
4878 
4879 	mmiowb();
4880 
4881 	spin_unlock_irqrestore(&qp->q_lock, flags);
4882 
4883 	QL_DPRINT12(ha, "exit[ibqp, wr, bad_wr] = [%p, %p, %p]\n",
4884 		ibqp, wr, bad_wr);
4885 
4886 	return status;
4887 }
4888 
4889 static u32
4890 qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
4891 {
4892 	u32 used;
4893 
4894 	/* Calculate number of elements used based on producer
4895 	 * count and consumer count and subtract it from max
4896 	 * work request supported so that we get elements left.
4897 	 */
4898 	used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
4899 
4900 	return hw_srq->max_wr - used;
4901 }
4902 
4903 int
4904 qlnxr_post_recv(struct ib_qp *ibqp,
4905 	const struct ib_recv_wr *wr,
4906 	const struct ib_recv_wr **bad_wr)
4907 {
4908  	struct qlnxr_qp		*qp = get_qlnxr_qp(ibqp);
4909 	struct qlnxr_dev	*dev = qp->dev;
4910 	unsigned long		flags;
4911 	int			status = 0;
4912 	qlnx_host_t		*ha;
4913 	uint32_t		reg_addr;
4914 
4915 	ha = dev->ha;
4916 
4917 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
4918 		return -EINVAL;
4919 
4920 	QL_DPRINT12(ha, "enter\n");
4921 
4922 	if (qp->qp_type == IB_QPT_GSI) {
4923 		QL_DPRINT12(ha, "(qp->qp_type = IB_QPT_GSI)\n");
4924 		return qlnxr_gsi_post_recv(ibqp, wr, bad_wr);
4925 	}
4926 
4927 	if (qp->srq) {
4928 		QL_DPRINT11(ha, "qp->srq [%p]"
4929 			" QP is associated with SRQ, cannot post RQ buffers\n",
4930 			qp->srq);
4931 		return -EINVAL;
4932 	}
4933 
4934 	spin_lock_irqsave(&qp->q_lock, flags);
4935 
4936 	if (qp->state == ECORE_ROCE_QP_STATE_RESET) {
4937 		spin_unlock_irqrestore(&qp->q_lock, flags);
4938 		*bad_wr = wr;
4939 
4940 		QL_DPRINT11(ha, "qp->qp_type = ECORE_ROCE_QP_STATE_RESET\n");
4941 
4942 		return -EINVAL;
4943 	}
4944 
4945 	while (wr) {
4946 		int i;
4947 
4948 		if ((ecore_chain_get_elem_left_u32(&qp->rq.pbl) <
4949 			QLNXR_MAX_RQE_ELEMENTS_PER_RQE) ||
4950 			(wr->num_sge > qp->rq.max_sges)) {
4951 			status = -ENOMEM;
4952 			*bad_wr = wr;
4953 			break;
4954 		}
4955 		for (i = 0; i < wr->num_sge; i++) {
4956 			u32 flags = 0;
4957 			struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
4958 
4959 			/* first one must include the number of SGE in the list */
4960 			if (!i)
4961 				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge);
4962 
4963 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, wr->sg_list[i].lkey);
4964 
4965 			RQ_SGE_SET(rqe, wr->sg_list[i].addr, \
4966 				wr->sg_list[i].length, flags);
4967 		}
4968 		/* Special case of no sges. FW requires between 1-4 sges...
4969 		 * in this case we need to post 1 sge with length zero. this is
4970 		 * because rdma write with immediate consumes an RQ. */
4971 		if (!wr->num_sge) {
4972 			u32 flags = 0;
4973 			struct rdma_rq_sge *rqe = ecore_chain_produce(&qp->rq.pbl);
4974 
4975 			/* first one must include the number of SGE in the list */
4976 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
4977 			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
4978 
4979 			//RQ_SGE_SET(rqe, 0, 0, flags);
4980 			rqe->addr.hi = 0;
4981 			rqe->addr.lo = 0;
4982 
4983 			rqe->length = 0;
4984 			rqe->flags = cpu_to_le32(flags);
4985 
4986 			i = 1;
4987 		}
4988 
4989 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
4990 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
4991 
4992 		qlnxr_inc_sw_prod(&qp->rq);
4993 
4994 		wmb();
4995 
4996 		qp->rq.db_data.data.value++;
4997 
4998 	//	writel(qp->rq.db_data.raw, qp->rq.db);
4999 		mmiowb();
5000 	//	if (QLNX_IS_IWARP(dev)) {
5001 	//		writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
5002 	//		mmiowb(); /* for second doorbell */
5003 	//	}
5004 
5005 		reg_addr = (uint32_t)((uint8_t *)qp->rq.db -
5006 				(uint8_t *)ha->cdev.doorbells);
5007 
5008 		bus_write_4(ha->pci_dbells, reg_addr, qp->rq.db_data.raw);
5009 		bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5010 
5011 		if (QLNX_IS_IWARP(dev)) {
5012 			reg_addr = (uint32_t)((uint8_t *)qp->rq.iwarp_db2 -
5013 						(uint8_t *)ha->cdev.doorbells);
5014 			bus_write_4(ha->pci_dbells, reg_addr, \
5015 				qp->rq.iwarp_db2_data.raw);
5016 			bus_barrier(ha->pci_dbells,  0, 0, \
5017 				BUS_SPACE_BARRIER_READ);
5018 		}
5019 
5020 		wr = wr->next;
5021 	}
5022 
5023 	spin_unlock_irqrestore(&qp->q_lock, flags);
5024 
5025 	QL_DPRINT12(ha, "exit status = 0x%x\n", status);
5026 
5027 	return status;
5028 }
5029 
5030 /* In fmr we need to increase the number of fmr completed counter for the fmr
5031  * algorithm determining whether we can free a pbl or not.
5032  * we need to perform this whether the work request was signaled or not. for
5033  * this purpose we call this function from the condition that checks if a wr
5034  * should be skipped, to make sure we don't miss it ( possibly this fmr
5035  * operation was not signalted)
5036  */
5037 static inline void
5038 qlnxr_chk_if_fmr(struct qlnxr_qp *qp)
5039 {
5040 #if __FreeBSD_version >= 1102000
5041 
5042 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
5043 		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5044 #else
5045 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_FAST_REG_MR)
5046 		qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5047 
5048 #endif /* #if __FreeBSD_version >= 1102000 */
5049 }
5050 
5051 static int
5052 process_req(struct qlnxr_dev *dev,
5053 	struct qlnxr_qp *qp,
5054 	struct qlnxr_cq *cq,
5055 	int num_entries,
5056 	struct ib_wc *wc,
5057 	u16 hw_cons,
5058 	enum ib_wc_status status,
5059 	int force)
5060 {
5061 	u16		cnt = 0;
5062 	qlnx_host_t	*ha = dev->ha;
5063 
5064 	QL_DPRINT12(ha, "enter\n");
5065 
5066 	while (num_entries && qp->sq.wqe_cons != hw_cons) {
5067 		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
5068 			qlnxr_chk_if_fmr(qp);
5069 			/* skip WC */
5070 			goto next_cqe;
5071 		}
5072 
5073 		/* fill WC */
5074 		wc->status = status;
5075 		wc->vendor_err = 0;
5076 		wc->wc_flags = 0;
5077 		wc->src_qp = qp->id;
5078 		wc->qp = &qp->ibqp;
5079 
5080 		// common section
5081 		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
5082 		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
5083 
5084 		switch (wc->opcode) {
5085 		case IB_WC_RDMA_WRITE:
5086 
5087 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
5088 
5089 			QL_DPRINT12(ha,
5090 				"opcode = IB_WC_RDMA_WRITE bytes = %d\n",
5091 				qp->wqe_wr_id[qp->sq.cons].bytes_len);
5092 			break;
5093 
5094 		case IB_WC_COMP_SWAP:
5095 		case IB_WC_FETCH_ADD:
5096 			wc->byte_len = 8;
5097 			break;
5098 
5099 #if __FreeBSD_version >= 1102000
5100 		case IB_WC_REG_MR:
5101 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
5102 			break;
5103 #else
5104 		case IB_WC_FAST_REG_MR:
5105 			qp->wqe_wr_id[qp->sq.cons].frmr->info.completed++;
5106 			break;
5107 #endif /* #if __FreeBSD_version >= 1102000 */
5108 
5109 		case IB_WC_RDMA_READ:
5110 		case IB_WC_SEND:
5111 
5112 			QL_DPRINT12(ha, "opcode = 0x%x \n", wc->opcode);
5113 			break;
5114 		default:
5115 			;//DP_ERR("TBD ERROR");
5116 		}
5117 
5118 		num_entries--;
5119 		wc++;
5120 		cnt++;
5121 next_cqe:
5122 		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
5123 			ecore_chain_consume(&qp->sq.pbl);
5124 		qlnxr_inc_sw_cons(&qp->sq);
5125 	}
5126 
5127 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5128 	return cnt;
5129 }
5130 
5131 static int
5132 qlnxr_poll_cq_req(struct qlnxr_dev *dev,
5133 	struct qlnxr_qp *qp,
5134 	struct qlnxr_cq *cq,
5135 	int num_entries,
5136 	struct ib_wc *wc,
5137 	struct rdma_cqe_requester *req)
5138 {
5139 	int		cnt = 0;
5140 	qlnx_host_t	*ha = dev->ha;
5141 
5142 	QL_DPRINT12(ha, "enter req->status = 0x%x\n", req->status);
5143 
5144 	switch (req->status) {
5145 	case RDMA_CQE_REQ_STS_OK:
5146 
5147 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5148 			IB_WC_SUCCESS, 0);
5149 		break;
5150 
5151 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
5152 
5153 		if (qp->state != ECORE_ROCE_QP_STATE_ERR)
5154 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
5155 				  IB_WC_WR_FLUSH_ERR, 1);
5156 		break;
5157 
5158 	default: /* other errors case */
5159 
5160 		/* process all WQE before the cosumer */
5161 		qp->state = ECORE_ROCE_QP_STATE_ERR;
5162 		cnt = process_req(dev, qp, cq, num_entries, wc,
5163 				req->sq_cons - 1, IB_WC_SUCCESS, 0);
5164 		wc += cnt;
5165 		/* if we have extra WC fill it with actual error info */
5166 
5167 		if (cnt < num_entries) {
5168 			enum ib_wc_status wc_status;
5169 
5170 			switch (req->status) {
5171 			case 	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
5172 				wc_status = IB_WC_BAD_RESP_ERR;
5173 				break;
5174 			case 	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
5175 				wc_status = IB_WC_LOC_LEN_ERR;
5176 				break;
5177 			case    RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
5178 				wc_status = IB_WC_LOC_QP_OP_ERR;
5179 				break;
5180 			case    RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
5181 				wc_status = IB_WC_LOC_PROT_ERR;
5182 				break;
5183 			case    RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
5184 				wc_status = IB_WC_MW_BIND_ERR;
5185 				break;
5186 			case    RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
5187 				wc_status = IB_WC_REM_INV_REQ_ERR;
5188 				break;
5189 			case    RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
5190 				wc_status = IB_WC_REM_ACCESS_ERR;
5191 				break;
5192 			case    RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
5193 				wc_status = IB_WC_REM_OP_ERR;
5194 				break;
5195 			case    RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
5196 				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
5197 				break;
5198 			case    RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
5199 				wc_status = IB_WC_RETRY_EXC_ERR;
5200 				break;
5201 			default:
5202 				wc_status = IB_WC_GENERAL_ERR;
5203 			}
5204 
5205 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
5206 					wc_status, 1 /* force use of WC */);
5207 		}
5208 	}
5209 
5210 	QL_DPRINT12(ha, "exit cnt = %d\n", cnt);
5211 	return cnt;
5212 }
5213 
5214 static void
5215 __process_resp_one(struct qlnxr_dev *dev,
5216 	struct qlnxr_qp *qp,
5217 	struct qlnxr_cq *cq,
5218 	struct ib_wc *wc,
5219 	struct rdma_cqe_responder *resp,
5220 	u64 wr_id)
5221 {
5222 	enum ib_wc_status	wc_status = IB_WC_SUCCESS;
5223 #if __FreeBSD_version < 1102000
5224 	u8			flags;
5225 #endif
5226 	qlnx_host_t		*ha = dev->ha;
5227 
5228 	QL_DPRINT12(ha, "enter qp = %p resp->status = 0x%x\n",
5229 		qp, resp->status);
5230 
5231 	wc->opcode = IB_WC_RECV;
5232 	wc->wc_flags = 0;
5233 
5234 	switch (resp->status) {
5235 	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
5236 		wc_status = IB_WC_LOC_ACCESS_ERR;
5237 		break;
5238 
5239 	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
5240 		wc_status = IB_WC_LOC_LEN_ERR;
5241 		break;
5242 
5243 	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
5244 		wc_status = IB_WC_LOC_QP_OP_ERR;
5245 		break;
5246 
5247 	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
5248 		wc_status = IB_WC_LOC_PROT_ERR;
5249 		break;
5250 
5251 	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
5252 		wc_status = IB_WC_MW_BIND_ERR;
5253 		break;
5254 
5255 	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
5256 		wc_status = IB_WC_REM_INV_RD_REQ_ERR;
5257 		break;
5258 
5259 	case RDMA_CQE_RESP_STS_OK:
5260 
5261 #if __FreeBSD_version >= 1102000
5262 		if (resp->flags & QLNXR_RESP_IMM) {
5263 			wc->ex.imm_data =
5264 				le32_to_cpu(resp->imm_data_or_inv_r_Key);
5265 			wc->wc_flags |= IB_WC_WITH_IMM;
5266 
5267 			if (resp->flags & QLNXR_RESP_RDMA)
5268 				wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5269 
5270 			if (resp->flags & QLNXR_RESP_INV) {
5271 				QL_DPRINT11(ha,
5272 					"Invalid flags QLNXR_RESP_INV [0x%x]"
5273 					"qp = %p qp->id = 0x%x cq = %p"
5274 					" cq->icid = 0x%x\n",
5275 					resp->flags, qp, qp->id, cq, cq->icid );
5276 			}
5277 		} else if (resp->flags & QLNXR_RESP_INV) {
5278 			wc->ex.imm_data =
5279 				le32_to_cpu(resp->imm_data_or_inv_r_Key);
5280 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
5281 
5282 			if (resp->flags & QLNXR_RESP_RDMA) {
5283 				QL_DPRINT11(ha,
5284 					"Invalid flags QLNXR_RESP_RDMA [0x%x]"
5285 					"qp = %p qp->id = 0x%x cq = %p"
5286 					" cq->icid = 0x%x\n",
5287 					resp->flags, qp, qp->id, cq, cq->icid );
5288 			}
5289 		} else if (resp->flags & QLNXR_RESP_RDMA) {
5290 			QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5291 				"qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5292 				resp->flags, qp, qp->id, cq, cq->icid );
5293 		}
5294 #else
5295 		wc_status = IB_WC_SUCCESS;
5296 		wc->byte_len = le32_to_cpu(resp->length);
5297 
5298 		flags = resp->flags & QLNXR_RESP_RDMA_IMM;
5299 
5300 		switch (flags) {
5301 		case QLNXR_RESP_RDMA_IMM:
5302 			/* update opcode */
5303 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
5304 			/* fall to set imm data */
5305 		case QLNXR_RESP_IMM:
5306 			wc->ex.imm_data =
5307 				le32_to_cpu(resp->imm_data_or_inv_r_Key);
5308 			wc->wc_flags |= IB_WC_WITH_IMM;
5309 			break;
5310 		case QLNXR_RESP_RDMA:
5311 			QL_DPRINT11(ha, "Invalid flags QLNXR_RESP_RDMA [0x%x]"
5312 				"qp = %p qp->id = 0x%x cq = %p cq->icid = 0x%x\n",
5313 				resp->flags, qp, qp->id, cq, cq->icid );
5314 			break;
5315 		default:
5316 			/* valid configuration, but nothing todo here */
5317 			;
5318 		}
5319 #endif /* #if __FreeBSD_version >= 1102000 */
5320 
5321 		break;
5322 	default:
5323 		wc_status = IB_WC_GENERAL_ERR;
5324 	}
5325 
5326 	/* fill WC */
5327 	wc->status = wc_status;
5328 	wc->vendor_err = 0;
5329 	wc->src_qp = qp->id;
5330 	wc->qp = &qp->ibqp;
5331 	wc->wr_id = wr_id;
5332 
5333 	QL_DPRINT12(ha, "exit status = 0x%x\n", wc_status);
5334 
5335 	return;
5336 }
5337 
5338 static int
5339 process_resp_one_srq(struct qlnxr_dev *dev,
5340 	struct qlnxr_qp *qp,
5341 	struct qlnxr_cq *cq,
5342 	struct ib_wc *wc,
5343 	struct rdma_cqe_responder *resp)
5344 {
5345 	struct qlnxr_srq	*srq = qp->srq;
5346 	u64			wr_id;
5347 	qlnx_host_t		*ha = dev->ha;
5348 
5349 	QL_DPRINT12(ha, "enter\n");
5350 
5351 	wr_id = HILO_U64(resp->srq_wr_id.hi, resp->srq_wr_id.lo);
5352 
5353 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5354 		wc->status = IB_WC_WR_FLUSH_ERR;
5355 		wc->vendor_err = 0;
5356 		wc->wr_id = wr_id;
5357 		wc->byte_len = 0;
5358 		wc->src_qp = qp->id;
5359 		wc->qp = &qp->ibqp;
5360 		wc->wr_id = wr_id;
5361 	} else {
5362 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
5363 	}
5364 
5365 	/* PBL is maintained in case of WR granularity.
5366 	 * So increment WR consumer after consuming WR
5367 	 */
5368 	srq->hw_srq.wr_cons_cnt++;
5369 
5370 	QL_DPRINT12(ha, "exit\n");
5371 	return 1;
5372 }
5373 
5374 static int
5375 process_resp_one(struct qlnxr_dev *dev,
5376 	struct qlnxr_qp *qp,
5377 	struct qlnxr_cq *cq,
5378 	struct ib_wc *wc,
5379 	struct rdma_cqe_responder *resp)
5380 {
5381 	qlnx_host_t	*ha = dev->ha;
5382 	u64		wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5383 
5384 	QL_DPRINT12(ha, "enter\n");
5385 
5386 	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
5387 
5388 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5389 		ecore_chain_consume(&qp->rq.pbl);
5390 	qlnxr_inc_sw_cons(&qp->rq);
5391 
5392 	QL_DPRINT12(ha, "exit\n");
5393 	return 1;
5394 }
5395 
5396 static int
5397 process_resp_flush(struct qlnxr_qp *qp,
5398 	int num_entries,
5399 	struct ib_wc *wc,
5400 	u16 hw_cons)
5401 {
5402 	u16		cnt = 0;
5403 	qlnx_host_t	*ha = qp->dev->ha;
5404 
5405 	QL_DPRINT12(ha, "enter\n");
5406 
5407 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
5408 		/* fill WC */
5409 		wc->status = IB_WC_WR_FLUSH_ERR;
5410 		wc->vendor_err = 0;
5411 		wc->wc_flags = 0;
5412 		wc->src_qp = qp->id;
5413 		wc->byte_len = 0;
5414 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
5415 		wc->qp = &qp->ibqp;
5416 		num_entries--;
5417 		wc++;
5418 		cnt++;
5419 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
5420 			ecore_chain_consume(&qp->rq.pbl);
5421 		qlnxr_inc_sw_cons(&qp->rq);
5422 	}
5423 
5424 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5425 	return cnt;
5426 }
5427 
5428 static void
5429 try_consume_resp_cqe(struct qlnxr_cq *cq,
5430 	struct qlnxr_qp *qp,
5431 	struct rdma_cqe_responder *resp,
5432 	int *update)
5433 {
5434 	if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
5435 		consume_cqe(cq);
5436 		*update |= 1;
5437 	}
5438 }
5439 
5440 static int
5441 qlnxr_poll_cq_resp_srq(struct qlnxr_dev *dev,
5442 	struct qlnxr_qp *qp,
5443 	struct qlnxr_cq *cq,
5444 	int num_entries,
5445 	struct ib_wc *wc,
5446 	struct rdma_cqe_responder *resp,
5447 	int *update)
5448 {
5449 	int		cnt;
5450 	qlnx_host_t	*ha = dev->ha;
5451 
5452 	QL_DPRINT12(ha, "enter\n");
5453 
5454 	cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
5455 	consume_cqe(cq);
5456 	*update |= 1;
5457 
5458 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5459 	return cnt;
5460 }
5461 
5462 static int
5463 qlnxr_poll_cq_resp(struct qlnxr_dev *dev,
5464 	struct qlnxr_qp *qp,
5465 	struct qlnxr_cq *cq,
5466 	int num_entries,
5467 	struct ib_wc *wc,
5468 	struct rdma_cqe_responder *resp,
5469 	int *update)
5470 {
5471 	int		cnt;
5472 	qlnx_host_t	*ha = dev->ha;
5473 
5474 	QL_DPRINT12(ha, "enter\n");
5475 
5476 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
5477 		cnt = process_resp_flush(qp, num_entries, wc,
5478 				resp->rq_cons);
5479 		try_consume_resp_cqe(cq, qp, resp, update);
5480 	} else {
5481 		cnt = process_resp_one(dev, qp, cq, wc, resp);
5482 		consume_cqe(cq);
5483 		*update |= 1;
5484 	}
5485 
5486 	QL_DPRINT12(ha, "exit cnt = 0x%x\n", cnt);
5487 	return cnt;
5488 }
5489 
5490 static void
5491 try_consume_req_cqe(struct qlnxr_cq *cq, struct qlnxr_qp *qp,
5492 	struct rdma_cqe_requester *req, int *update)
5493 {
5494 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
5495 		consume_cqe(cq);
5496 		*update |= 1;
5497 	}
5498 }
5499 
5500 static void
5501 doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
5502 {
5503 	uint64_t	reg_addr;
5504 	qlnx_host_t	*ha = dev->ha;
5505 
5506 	QL_DPRINT12(ha, "enter\n");
5507 
5508 	wmb();
5509 	cq->db.data.agg_flags = flags;
5510 	cq->db.data.value = cpu_to_le32(cons);
5511 
5512 	reg_addr = (uint64_t)((uint8_t *)cq->db_addr -
5513 				(uint8_t *)(ha->cdev.doorbells));
5514 
5515 	bus_write_8(ha->pci_dbells, reg_addr, cq->db.raw);
5516 	bus_barrier(ha->pci_dbells,  0, 0, BUS_SPACE_BARRIER_READ);
5517 
5518 	QL_DPRINT12(ha, "exit\n");
5519 	return;
5520 
5521 //#ifdef __LP64__
5522 //	writeq(cq->db.raw, cq->db_addr);
5523 //#else
5524 	/* Note that since the FW allows 64 bit write only, in 32bit systems
5525 	 * the value of db_addr must be low enough. This is currently not
5526 	 * enforced.
5527 	 */
5528 //	writel(cq->db.raw & 0xffffffff, cq->db_addr);
5529 //	mmiowb();
5530 //#endif
5531 }
5532 
5533 static int
5534 is_valid_cqe(struct qlnxr_cq *cq, union rdma_cqe *cqe)
5535 {
5536 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
5537 	return (resp_cqe->flags & RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK) ==
5538 			cq->pbl_toggle;
5539 }
5540 
5541 int
5542 qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
5543 {
5544 	struct qlnxr_cq	*cq = get_qlnxr_cq(ibcq);
5545 	struct qlnxr_dev *dev = get_qlnxr_dev((ibcq->device));
5546 	int		done = 0;
5547 	union rdma_cqe	*cqe = cq->latest_cqe;
5548 	int 		update = 0;
5549 	u32		old_cons, new_cons;
5550 	unsigned long	flags;
5551 	qlnx_host_t	*ha = dev->ha;
5552 
5553 	QL_DPRINT12(ha, "enter\n");
5554 
5555 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5556 		return -EINVAL;
5557 
5558 	if (cq->destroyed) {
5559 		QL_DPRINT11(ha, "called after destroy for cq %p (icid=%d)\n",
5560 			cq, cq->icid);
5561 		return 0;
5562 	}
5563 
5564 	if (cq->cq_type == QLNXR_CQ_TYPE_GSI)
5565 		return qlnxr_gsi_poll_cq(ibcq, num_entries, wc);
5566 
5567 	spin_lock_irqsave(&cq->cq_lock, flags);
5568 
5569 	old_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5570 
5571 	while (num_entries && is_valid_cqe(cq, cqe)) {
5572 		int cnt = 0;
5573 		struct qlnxr_qp *qp;
5574 		struct rdma_cqe_requester *resp_cqe;
5575 		enum rdma_cqe_type cqe_type;
5576 
5577 		/* prevent speculative reads of any field of CQE */
5578 		rmb();
5579 
5580 		resp_cqe = &cqe->req;
5581 		qp = (struct qlnxr_qp *)(uintptr_t)HILO_U64(resp_cqe->qp_handle.hi,
5582 						resp_cqe->qp_handle.lo);
5583 
5584 		if (!qp) {
5585 			QL_DPRINT11(ha, "qp = NULL\n");
5586 			break;
5587 		}
5588 
5589 		wc->qp = &qp->ibqp;
5590 
5591 		cqe_type = GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
5592 
5593 		switch (cqe_type) {
5594 		case RDMA_CQE_TYPE_REQUESTER:
5595 			cnt = qlnxr_poll_cq_req(dev, qp, cq, num_entries,
5596 					wc, &cqe->req);
5597 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
5598 			break;
5599 		case RDMA_CQE_TYPE_RESPONDER_RQ:
5600 			cnt = qlnxr_poll_cq_resp(dev, qp, cq, num_entries,
5601 					wc, &cqe->resp, &update);
5602 			break;
5603 		case RDMA_CQE_TYPE_RESPONDER_SRQ:
5604 			cnt = qlnxr_poll_cq_resp_srq(dev, qp, cq, num_entries,
5605 					wc, &cqe->resp, &update);
5606 			break;
5607 		case RDMA_CQE_TYPE_INVALID:
5608 		default:
5609 			QL_DPRINT11(ha, "cqe type [0x%x] invalid\n", cqe_type);
5610 			break;
5611 		}
5612 		num_entries -= cnt;
5613 		wc += cnt;
5614 		done += cnt;
5615 
5616 		cqe = cq->latest_cqe;
5617 	}
5618 	new_cons = ecore_chain_get_cons_idx_u32(&cq->pbl);
5619 
5620 	cq->cq_cons += new_cons - old_cons;
5621 
5622 	if (update) {
5623 		/* doorbell notifies abount latest VALID entry,
5624 		 * but chain already point to the next INVALID one
5625 		 */
5626 		doorbell_cq(dev, cq, cq->cq_cons - 1, cq->arm_flags);
5627 		QL_DPRINT12(ha, "cq = %p cons = 0x%x "
5628 			"arm_flags = 0x%x db.icid = 0x%x\n", cq,
5629 			(cq->cq_cons - 1), cq->arm_flags, cq->db.data.icid);
5630 	}
5631 
5632 	spin_unlock_irqrestore(&cq->cq_lock, flags);
5633 
5634 	QL_DPRINT12(ha, "exit\n");
5635 
5636 	return done;
5637 }
5638 
5639 int
5640 qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
5641 {
5642         struct qlnxr_cq *cq = get_qlnxr_cq(ibcq);
5643         unsigned long sflags;
5644         struct qlnxr_dev *dev;
5645 	qlnx_host_t	*ha;
5646 
5647 	dev = get_qlnxr_dev((ibcq->device));
5648 	ha = dev->ha;
5649 
5650 	QL_DPRINT12(ha, "enter ibcq = %p flags = 0x%x "
5651 		"cp = %p cons = 0x%x cq_type = 0x%x\n", ibcq,
5652 		flags, cq, cq->cq_cons, cq->cq_type);
5653 
5654 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
5655 		return -EINVAL;
5656 
5657 	if (cq->destroyed) {
5658 		QL_DPRINT11(ha, "cq was already destroyed cq = %p icid=%d\n",
5659 			cq, cq->icid);
5660 		return -EINVAL;
5661 	}
5662 
5663         if (cq->cq_type == QLNXR_CQ_TYPE_GSI) {
5664                 return 0;
5665         }
5666 
5667         spin_lock_irqsave(&cq->cq_lock, sflags);
5668 
5669         cq->arm_flags = 0;
5670 
5671         if (flags & IB_CQ_SOLICITED) {
5672                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
5673         }
5674         if (flags & IB_CQ_NEXT_COMP) {
5675                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
5676         }
5677 
5678         doorbell_cq(dev, cq, (cq->cq_cons - 1), cq->arm_flags);
5679 
5680         spin_unlock_irqrestore(&cq->cq_lock, sflags);
5681 
5682 	QL_DPRINT12(ha, "exit ibcq = %p flags = 0x%x\n", ibcq, flags);
5683         return 0;
5684 }
5685 
5686 static struct qlnxr_mr *
5687 __qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
5688 {
5689 	struct qlnxr_pd *pd = get_qlnxr_pd(ibpd);
5690 	struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
5691 	struct qlnxr_mr *mr;
5692 	int		rc = -ENOMEM;
5693 	qlnx_host_t	*ha;
5694 
5695 	ha = dev->ha;
5696 
5697 	QL_DPRINT12(ha, "enter ibpd = %p pd = %p "
5698 		" pd_id = %d max_page_list_len = %d\n",
5699 		ibpd, pd, pd->pd_id, max_page_list_len);
5700 
5701 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
5702 	if (!mr) {
5703 		QL_DPRINT11(ha, "kzalloc(mr) failed\n");
5704 		return ERR_PTR(rc);
5705 	}
5706 
5707 	mr->dev = dev;
5708 	mr->type = QLNXR_MR_FRMR;
5709 
5710 	rc = qlnxr_init_mr_info(dev, &mr->info, max_page_list_len,
5711 				  1 /* allow dual layer pbl */);
5712 	if (rc) {
5713 		QL_DPRINT11(ha, "qlnxr_init_mr_info failed\n");
5714 		goto err0;
5715 	}
5716 
5717 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
5718 	if (rc) {
5719 		QL_DPRINT11(ha, "ecore_rdma_alloc_tid failed\n");
5720 		goto err0;
5721 	}
5722 
5723 	/* index only, 18 bit long, lkey = itid << 8 | key */
5724 	mr->hw_mr.tid_type = ECORE_RDMA_TID_FMR;
5725 	mr->hw_mr.key = 0;
5726 	mr->hw_mr.pd = pd->pd_id;
5727 	mr->hw_mr.local_read = 1;
5728 	mr->hw_mr.local_write = 0;
5729 	mr->hw_mr.remote_read = 0;
5730 	mr->hw_mr.remote_write = 0;
5731 	mr->hw_mr.remote_atomic = 0;
5732 	mr->hw_mr.mw_bind = false; /* TBD MW BIND */
5733 	mr->hw_mr.pbl_ptr = 0; /* Will be supplied during post */
5734 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
5735 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
5736 	mr->hw_mr.fbo = 0;
5737 	mr->hw_mr.length = 0;
5738 	mr->hw_mr.vaddr = 0;
5739 	mr->hw_mr.zbva = false; /* TBD figure when this should be true */
5740 	mr->hw_mr.phy_mr = true; /* Fast MR - True, Regular Register False */
5741 	mr->hw_mr.dma_mr = false;
5742 
5743 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
5744 	if (rc) {
5745 		QL_DPRINT11(ha, "ecore_rdma_register_tid failed\n");
5746 		goto err1;
5747 	}
5748 
5749 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
5750 	mr->ibmr.rkey = mr->ibmr.lkey;
5751 
5752 	QL_DPRINT12(ha, "exit mr = %p mr->ibmr.lkey = 0x%x\n",
5753 		mr, mr->ibmr.lkey);
5754 
5755 	return mr;
5756 
5757 err1:
5758 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
5759 err0:
5760 	kfree(mr);
5761 
5762 	QL_DPRINT12(ha, "exit\n");
5763 
5764 	return ERR_PTR(rc);
5765 }
5766 
5767 #if __FreeBSD_version >= 1102000
5768 
5769 struct ib_mr *
5770 qlnxr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
5771     u32 max_num_sg, struct ib_udata *udata)
5772 {
5773 	struct qlnxr_dev *dev;
5774 	struct qlnxr_mr *mr;
5775 	qlnx_host_t     *ha;
5776 
5777 	dev = get_qlnxr_dev(ibpd->device);
5778 	ha = dev->ha;
5779 
5780 	QL_DPRINT12(ha, "enter\n");
5781 
5782 	if (mr_type != IB_MR_TYPE_MEM_REG)
5783 		return ERR_PTR(-EINVAL);
5784 
5785 	mr = __qlnxr_alloc_mr(ibpd, max_num_sg);
5786 
5787 	if (IS_ERR(mr))
5788 		return ERR_PTR(-EINVAL);
5789 
5790 	QL_DPRINT12(ha, "exit mr = %p &mr->ibmr = %p\n", mr, &mr->ibmr);
5791 
5792 	return &mr->ibmr;
5793 }
5794 
5795 static int
5796 qlnxr_set_page(struct ib_mr *ibmr, u64 addr)
5797 {
5798 	struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5799 	struct qlnxr_pbl *pbl_table;
5800 	struct regpair *pbe;
5801 	struct qlnxr_dev *dev;
5802 	qlnx_host_t     *ha;
5803 	u32 pbes_in_page;
5804 
5805 	dev = mr->dev;
5806 	ha = dev->ha;
5807 
5808 	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
5809 		QL_DPRINT12(ha, "fails mr->npages %d\n", mr->npages);
5810 		return -ENOMEM;
5811 	}
5812 
5813 	QL_DPRINT12(ha, "mr->npages %d addr = %p enter\n", mr->npages,
5814 		((void *)addr));
5815 
5816 	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
5817 	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
5818 	pbe = (struct regpair *)pbl_table->va;
5819 	pbe +=  mr->npages % pbes_in_page;
5820 	pbe->lo = cpu_to_le32((u32)addr);
5821 	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
5822 
5823 	mr->npages++;
5824 
5825 	QL_DPRINT12(ha, "mr->npages %d addr = %p exit \n", mr->npages,
5826 		((void *)addr));
5827 	return 0;
5828 }
5829 
5830 int
5831 qlnxr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
5832 	int sg_nents, unsigned int *sg_offset)
5833 {
5834 	int             ret;
5835 	struct qlnxr_mr *mr = get_qlnxr_mr(ibmr);
5836 	qlnx_host_t     *ha;
5837 
5838 	if (mr == NULL)
5839 		return (-1);
5840 
5841 	if (mr->dev == NULL)
5842 		return (-1);
5843 
5844 	ha = mr->dev->ha;
5845 
5846 	QL_DPRINT12(ha, "enter\n");
5847 
5848 	mr->npages = 0;
5849 	qlnx_handle_completed_mrs(mr->dev, &mr->info);
5850 
5851 	ret = ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qlnxr_set_page);
5852 
5853 	QL_DPRINT12(ha, "exit ret = %d\n", ret);
5854 
5855 	return (ret);
5856 }
5857 
5858 #else
5859 
5860 struct ib_mr *
5861 qlnxr_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
5862 {
5863 	struct qlnxr_dev *dev;
5864 	struct qlnxr_mr *mr;
5865 	qlnx_host_t	*ha;
5866 	struct ib_mr *ibmr = NULL;
5867 
5868 	dev = get_qlnxr_dev((ibpd->device));
5869 	ha = dev->ha;
5870 
5871 	QL_DPRINT12(ha, "enter\n");
5872 
5873 	mr = __qlnxr_alloc_mr(ibpd, max_page_list_len);
5874 
5875 	if (IS_ERR(mr)) {
5876 		ibmr = ERR_PTR(-EINVAL);
5877 	} else {
5878 		ibmr = &mr->ibmr;
5879 	}
5880 
5881 	QL_DPRINT12(ha, "exit %p\n", ibmr);
5882 	return (ibmr);
5883 }
5884 
5885 void
5886 qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
5887 {
5888 	struct qlnxr_fast_reg_page_list *frmr_list;
5889 
5890 	frmr_list = get_qlnxr_frmr_list(page_list);
5891 
5892 	free_mr_info(frmr_list->dev, &frmr_list->info);
5893 
5894 	kfree(frmr_list->ibfrpl.page_list);
5895 	kfree(frmr_list);
5896 
5897 	return;
5898 }
5899 
5900 struct ib_fast_reg_page_list *
5901 qlnxr_alloc_frmr_page_list(struct ib_device *ibdev, int page_list_len)
5902 {
5903 	struct qlnxr_fast_reg_page_list *frmr_list = NULL;
5904 	struct qlnxr_dev		*dev;
5905 	int				size = page_list_len * sizeof(u64);
5906 	int				rc = -ENOMEM;
5907 	qlnx_host_t			*ha;
5908 
5909 	dev = get_qlnxr_dev(ibdev);
5910 	ha = dev->ha;
5911 
5912 	QL_DPRINT12(ha, "enter\n");
5913 
5914 	frmr_list = kzalloc(sizeof(*frmr_list), GFP_KERNEL);
5915 	if (!frmr_list) {
5916 		QL_DPRINT11(ha, "kzalloc(frmr_list) failed\n");
5917 		goto err;
5918 	}
5919 
5920 	frmr_list->dev = dev;
5921 	frmr_list->ibfrpl.page_list = kzalloc(size, GFP_KERNEL);
5922 	if (!frmr_list->ibfrpl.page_list) {
5923 		QL_DPRINT11(ha, "frmr_list->ibfrpl.page_list = NULL failed\n");
5924 		goto err0;
5925 	}
5926 
5927 	rc = qlnxr_init_mr_info(dev, &frmr_list->info, page_list_len,
5928 			  1 /* allow dual layer pbl */);
5929 	if (rc)
5930 		goto err1;
5931 
5932 	QL_DPRINT12(ha, "exit %p\n", &frmr_list->ibfrpl);
5933 
5934 	return &frmr_list->ibfrpl;
5935 
5936 err1:
5937 	kfree(frmr_list->ibfrpl.page_list);
5938 err0:
5939 	kfree(frmr_list);
5940 err:
5941 	QL_DPRINT12(ha, "exit with error\n");
5942 
5943 	return ERR_PTR(rc);
5944 }
5945 
5946 static int
5947 qlnxr_validate_phys_buf_list(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
5948 	int buf_cnt, uint64_t *total_size)
5949 {
5950 	u64 size = 0;
5951 
5952 	*total_size = 0;
5953 
5954 	if (!buf_cnt || buf_list == NULL) {
5955 		QL_DPRINT11(ha,
5956 			"failed buf_list = %p buf_cnt = %d\n", buf_list, buf_cnt);
5957 		return (-1);
5958 	}
5959 
5960 	size = buf_list->size;
5961 
5962 	if (!size) {
5963 		QL_DPRINT11(ha,
5964 			"failed buf_list = %p buf_cnt = %d"
5965 			" buf_list->size = 0\n", buf_list, buf_cnt);
5966 		return (-1);
5967 	}
5968 
5969 	while (buf_cnt) {
5970 		*total_size += buf_list->size;
5971 
5972 		if (buf_list->size != size) {
5973 			QL_DPRINT11(ha,
5974 				"failed buf_list = %p buf_cnt = %d"
5975 				" all buffers should have same size\n",
5976 				buf_list, buf_cnt);
5977 			return (-1);
5978 		}
5979 
5980 		buf_list++;
5981 		buf_cnt--;
5982 	}
5983 	return (0);
5984 }
5985 
5986 static size_t
5987 qlnxr_get_num_pages(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
5988 	int buf_cnt)
5989 {
5990 	int	i;
5991 	size_t	num_pages = 0;
5992 	u64	size;
5993 
5994 	for (i = 0; i < buf_cnt; i++) {
5995 		size = 0;
5996 		while (size < buf_list->size) {
5997 			size += PAGE_SIZE;
5998 			num_pages++;
5999 		}
6000 		buf_list++;
6001 	}
6002 	return (num_pages);
6003 }
6004 
6005 static void
6006 qlnxr_populate_phys_mem_pbls(struct qlnxr_dev *dev,
6007 	struct ib_phys_buf *buf_list, int buf_cnt,
6008 	struct qlnxr_pbl *pbl, struct qlnxr_pbl_info *pbl_info)
6009 {
6010 	struct regpair		*pbe;
6011 	struct qlnxr_pbl	*pbl_tbl;
6012 	int			pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
6013 	qlnx_host_t		*ha;
6014         int                     i;
6015 	u64			pbe_addr;
6016 
6017 	ha = dev->ha;
6018 
6019 	QL_DPRINT12(ha, "enter\n");
6020 
6021 	if (!pbl_info) {
6022 		QL_DPRINT11(ha, "PBL_INFO not initialized\n");
6023 		return;
6024 	}
6025 
6026 	if (!pbl_info->num_pbes) {
6027 		QL_DPRINT11(ha, "pbl_info->num_pbes == 0\n");
6028 		return;
6029 	}
6030 
6031 	/* If we have a two layered pbl, the first pbl points to the rest
6032 	 * of the pbls and the first entry lays on the second pbl in the table
6033 	 */
6034 	if (pbl_info->two_layered)
6035 		pbl_tbl = &pbl[1];
6036 	else
6037 		pbl_tbl = pbl;
6038 
6039 	pbe = (struct regpair *)pbl_tbl->va;
6040 	if (!pbe) {
6041 		QL_DPRINT12(ha, "pbe is NULL\n");
6042 		return;
6043 	}
6044 
6045 	pbe_cnt = 0;
6046 
6047 	for (i = 0; i < buf_cnt; i++) {
6048 		pages = buf_list->size >> PAGE_SHIFT;
6049 
6050 		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
6051 			/* store the page address in pbe */
6052 
6053 			pbe_addr = buf_list->addr + (PAGE_SIZE * pg_cnt);
6054 
6055 			pbe->lo = cpu_to_le32((u32)pbe_addr);
6056 			pbe->hi = cpu_to_le32(((u32)(pbe_addr >> 32)));
6057 
6058 			QL_DPRINT12(ha, "Populate pbl table:"
6059 				" pbe->addr=0x%x:0x%x "
6060 				" pbe_cnt = %d total_num_pbes=%d"
6061 				" pbe=%p\n", pbe->lo, pbe->hi, pbe_cnt,
6062 				total_num_pbes, pbe);
6063 
6064 			pbe_cnt ++;
6065 			total_num_pbes ++;
6066 			pbe++;
6067 
6068 			if (total_num_pbes == pbl_info->num_pbes)
6069 				return;
6070 
6071 			/* if the given pbl is full storing the pbes,
6072 			 * move to next pbl.  */
6073 
6074 			if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
6075 				pbl_tbl++;
6076 				pbe = (struct regpair *)pbl_tbl->va;
6077 				pbe_cnt = 0;
6078 			}
6079 		}
6080 		buf_list++;
6081 	}
6082 	QL_DPRINT12(ha, "exit\n");
6083 	return;
6084 }
6085 
6086 struct ib_mr *
6087 qlnxr_reg_kernel_mr(struct ib_pd *ibpd,
6088 	struct ib_phys_buf *buf_list,
6089 	int buf_cnt, int acc, u64 *iova_start)
6090 {
6091 	int		rc = -ENOMEM;
6092 	struct qlnxr_dev *dev = get_qlnxr_dev((ibpd->device));
6093 	struct qlnxr_mr *mr;
6094 	struct qlnxr_pd *pd;
6095 	qlnx_host_t	*ha;
6096 	size_t		num_pages = 0;
6097 	uint64_t	length;
6098 
6099 	ha = dev->ha;
6100 
6101 	QL_DPRINT12(ha, "enter\n");
6102 
6103 	pd = get_qlnxr_pd(ibpd);
6104 
6105 	QL_DPRINT12(ha, "pd = %d buf_list = %p, buf_cnt = %d,"
6106 		" iova_start = %p, acc = %d\n",
6107 		pd->pd_id, buf_list, buf_cnt, iova_start, acc);
6108 
6109 	//if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
6110 	//	QL_DPRINT11(ha, "(acc & IB_ACCESS_REMOTE_WRITE &&"
6111 	//		" !(acc & IB_ACCESS_LOCAL_WRITE))\n");
6112 	//	return ERR_PTR(-EINVAL);
6113 	//}
6114 
6115 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
6116 	if (!mr) {
6117 		QL_DPRINT11(ha, "kzalloc(mr) failed\n");
6118 		return ERR_PTR(rc);
6119 	}
6120 
6121 	mr->type = QLNXR_MR_KERNEL;
6122 	mr->iova_start = iova_start;
6123 
6124 	rc = qlnxr_validate_phys_buf_list(ha, buf_list, buf_cnt, &length);
6125 	if (rc)
6126 		goto err0;
6127 
6128 	num_pages = qlnxr_get_num_pages(ha, buf_list, buf_cnt);
6129 	if (!num_pages)
6130 		goto err0;
6131 
6132 	rc = qlnxr_init_mr_info(dev, &mr->info, num_pages, 1);
6133 	if (rc) {
6134 		QL_DPRINT11(ha,
6135 			"qlnxr_init_mr_info failed [%d]\n", rc);
6136 		goto err1;
6137 	}
6138 
6139 	qlnxr_populate_phys_mem_pbls(dev, buf_list, buf_cnt, mr->info.pbl_table,
6140 		   &mr->info.pbl_info);
6141 
6142 	rc = ecore_rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
6143 
6144 	if (rc) {
6145 		QL_DPRINT11(ha, "roce alloc tid returned an error %d\n", rc);
6146 		goto err1;
6147 	}
6148 
6149 	/* index only, 18 bit long, lkey = itid << 8 | key */
6150 	mr->hw_mr.tid_type = ECORE_RDMA_TID_REGISTERED_MR;
6151 	mr->hw_mr.key = 0;
6152 	mr->hw_mr.pd = pd->pd_id;
6153 	mr->hw_mr.local_read = 1;
6154 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
6155 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
6156 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
6157 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
6158 	mr->hw_mr.mw_bind = false; /* TBD MW BIND */
6159 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
6160 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
6161 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
6162 	mr->hw_mr.page_size_log = ilog2(PAGE_SIZE); /* for the MR pages */
6163 
6164 	mr->hw_mr.fbo = 0;
6165 
6166 	mr->hw_mr.length = length;
6167 	mr->hw_mr.vaddr = (uint64_t)iova_start;
6168 	mr->hw_mr.zbva = false; /* TBD figure when this should be true */
6169 	mr->hw_mr.phy_mr = false; /* Fast MR - True, Regular Register False */
6170 	mr->hw_mr.dma_mr = false;
6171 
6172 	rc = ecore_rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
6173 	if (rc) {
6174 		QL_DPRINT11(ha, "roce register tid returned an error %d\n", rc);
6175 		goto err2;
6176 	}
6177 
6178 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6179 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
6180 		mr->hw_mr.remote_atomic)
6181 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
6182 
6183 	QL_DPRINT12(ha, "lkey: %x\n", mr->ibmr.lkey);
6184 
6185 	return (&mr->ibmr);
6186 
6187 err2:
6188 	ecore_rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
6189 err1:
6190 	qlnxr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
6191 err0:
6192 	kfree(mr);
6193 
6194 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6195 	return (ERR_PTR(rc));
6196 }
6197 
6198 #endif /* #if __FreeBSD_version >= 1102000 */
6199 
6200 int
6201 qlnxr_create_ah(struct ib_ah *ibah,
6202 	struct ib_ah_attr *attr, u32 flags,
6203 	struct ib_udata *udata)
6204 {
6205 	struct qlnxr_dev *dev;
6206 	qlnx_host_t	*ha;
6207 	struct qlnxr_ah *ah = get_qlnxr_ah(ibah);
6208 
6209 	dev = get_qlnxr_dev(ibah->device);
6210 	ha = dev->ha;
6211 
6212 	QL_DPRINT12(ha, "in create_ah\n");
6213 
6214 	ah->attr = *attr;
6215 
6216 	return (0);
6217 }
6218 
6219 void
6220 qlnxr_destroy_ah(struct ib_ah *ibah, u32 flags)
6221 {
6222 }
6223 
6224 int
6225 qlnxr_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6226 {
6227 	struct qlnxr_dev *dev;
6228 	qlnx_host_t     *ha;
6229 
6230 	dev = get_qlnxr_dev((ibah->device));
6231 	ha = dev->ha;
6232 	QL_DPRINT12(ha, "Query AH not supported\n");
6233 	return -EINVAL;
6234 }
6235 
6236 int
6237 qlnxr_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
6238 {
6239 	struct qlnxr_dev *dev;
6240 	qlnx_host_t     *ha;
6241 
6242 	dev = get_qlnxr_dev((ibah->device));
6243 	ha = dev->ha;
6244 	QL_DPRINT12(ha, "Modify AH not supported\n");
6245 	return -ENOSYS;
6246 }
6247 
6248 #if __FreeBSD_version >= 1102000
6249 int
6250 qlnxr_process_mad(struct ib_device *ibdev,
6251 		int process_mad_flags,
6252 		u8 port_num,
6253 		const struct ib_wc *in_wc,
6254 		const struct ib_grh *in_grh,
6255 		const struct ib_mad_hdr *mad_hdr,
6256 		size_t in_mad_size,
6257 		struct ib_mad_hdr *out_mad,
6258 		size_t *out_mad_size,
6259 		u16 *out_mad_pkey_index)
6260 
6261 #else
6262 
6263 int
6264 qlnxr_process_mad(struct ib_device *ibdev,
6265                         int process_mad_flags,
6266                         u8 port_num,
6267                         struct ib_wc *in_wc,
6268                         struct ib_grh *in_grh,
6269                         struct ib_mad *in_mad,
6270                         struct ib_mad *out_mad)
6271 
6272 #endif /* #if __FreeBSD_version >= 1102000 */
6273 {
6274 	struct qlnxr_dev *dev;
6275 	qlnx_host_t	*ha;
6276 
6277 	dev = get_qlnxr_dev(ibdev);
6278 	ha = dev->ha;
6279 	QL_DPRINT12(ha, "process mad not supported\n");
6280 
6281 	return -ENOSYS;
6282 //	QL_DPRINT12(ha, "qlnxr_process_mad in_mad %x %x %x %x %x %x %x %x\n",
6283 //               in_mad->mad_hdr.attr_id, in_mad->mad_hdr.base_version,
6284 //               in_mad->mad_hdr.attr_mod, in_mad->mad_hdr.class_specific,
6285 //               in_mad->mad_hdr.class_version, in_mad->mad_hdr.method,
6286 //               in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.status);
6287 
6288 //	return IB_MAD_RESULT_SUCCESS;
6289 }
6290 
6291 #if __FreeBSD_version >= 1102000
6292 int
6293 qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
6294 	struct ib_port_immutable *immutable)
6295 {
6296 	struct qlnxr_dev        *dev;
6297 	qlnx_host_t             *ha;
6298 	struct ib_port_attr     attr;
6299 	int                     err;
6300 
6301 	dev = get_qlnxr_dev(ibdev);
6302 	ha = dev->ha;
6303 
6304 	QL_DPRINT12(ha, "enter\n");
6305 
6306 	err = qlnxr_query_port(ibdev, port_num, &attr);
6307 	if (err)
6308 		return err;
6309 
6310 	if (QLNX_IS_IWARP(dev)) {
6311 		immutable->pkey_tbl_len = 1;
6312 		immutable->gid_tbl_len = 1;
6313 		immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
6314 		immutable->max_mad_size = 0;
6315 	} else {
6316 		immutable->pkey_tbl_len = attr.pkey_tbl_len;
6317 		immutable->gid_tbl_len = attr.gid_tbl_len;
6318 		immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
6319 		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
6320 	}
6321 
6322 	QL_DPRINT12(ha, "exit\n");
6323 	return 0;
6324 }
6325 #endif /* #if __FreeBSD_version > 1102000 */
6326 
6327 /***** iWARP related functions *************/
6328 
6329 static void
6330 qlnxr_iw_mpa_request(void *context,
6331 	struct ecore_iwarp_cm_event_params *params)
6332 {
6333 	struct qlnxr_iw_listener *listener = (struct qlnxr_iw_listener *)context;
6334 	struct qlnxr_dev *dev = listener->dev;
6335 	struct qlnxr_iw_ep *ep;
6336 	struct iw_cm_event event;
6337 	struct sockaddr_in *laddr;
6338 	struct sockaddr_in *raddr;
6339 	qlnx_host_t	*ha;
6340 
6341 	ha = dev->ha;
6342 
6343 	QL_DPRINT12(ha, "enter\n");
6344 
6345 	if (params->cm_info->ip_version != ECORE_TCP_IPV4) {
6346 		QL_DPRINT11(ha, "only IPv4 supported [0x%x]\n",
6347 			params->cm_info->ip_version);
6348 		return;
6349 	}
6350 
6351 	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
6352 
6353 	if (!ep) {
6354 		QL_DPRINT11(ha, "kzalloc{ep) failed\n");
6355 		return;
6356 	}
6357 
6358 	ep->dev = dev;
6359 	ep->ecore_context = params->ep_context;
6360 
6361 	memset(&event, 0, sizeof(event));
6362 
6363 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
6364 	event.status = params->status;
6365 
6366 	laddr = (struct sockaddr_in *)&event.local_addr;
6367 	raddr = (struct sockaddr_in *)&event.remote_addr;
6368 
6369 	laddr->sin_family = AF_INET;
6370 	raddr->sin_family = AF_INET;
6371 
6372 	laddr->sin_port = htons(params->cm_info->local_port);
6373 	raddr->sin_port = htons(params->cm_info->remote_port);
6374 
6375 	laddr->sin_addr.s_addr = htonl(params->cm_info->local_ip[0]);
6376 	raddr->sin_addr.s_addr = htonl(params->cm_info->remote_ip[0]);
6377 
6378 	event.provider_data = (void *)ep;
6379 	event.private_data = (void *)params->cm_info->private_data;
6380 	event.private_data_len = (u8)params->cm_info->private_data_len;
6381 
6382 #if __FreeBSD_version >= 1100000
6383 	event.ord = params->cm_info->ord;
6384 	event.ird = params->cm_info->ird;
6385 #endif /* #if __FreeBSD_version >= 1100000 */
6386 
6387 	listener->cm_id->event_handler(listener->cm_id, &event);
6388 
6389 	QL_DPRINT12(ha, "exit\n");
6390 
6391 	return;
6392 }
6393 
6394 static void
6395 qlnxr_iw_issue_event(void *context,
6396 	 struct ecore_iwarp_cm_event_params *params,
6397 	 enum iw_cm_event_type event_type,
6398 	 char *str)
6399 {
6400 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6401 	struct qlnxr_dev *dev = ep->dev;
6402 	struct iw_cm_event event;
6403 	qlnx_host_t	*ha;
6404 
6405 	ha = dev->ha;
6406 
6407 	QL_DPRINT12(ha, "enter\n");
6408 
6409 	memset(&event, 0, sizeof(event));
6410 	event.status = params->status;
6411 	event.event = event_type;
6412 
6413 	if (params->cm_info != NULL) {
6414 #if __FreeBSD_version >= 1100000
6415 		event.ird = params->cm_info->ird;
6416 		event.ord = params->cm_info->ord;
6417 		QL_DPRINT12(ha, "ord=[%d] \n", event.ord);
6418 		QL_DPRINT12(ha, "ird=[%d] \n", event.ird);
6419 #endif /* #if __FreeBSD_version >= 1100000 */
6420 
6421 		event.private_data_len = params->cm_info->private_data_len;
6422 		event.private_data = (void *)params->cm_info->private_data;
6423 		QL_DPRINT12(ha, "private_data_len=[%d] \n",
6424 			event.private_data_len);
6425 	}
6426 
6427 	QL_DPRINT12(ha, "event=[%d] %s\n", event.event, str);
6428 	QL_DPRINT12(ha, "status=[%d] \n", event.status);
6429 
6430 	if (ep) {
6431 		if (ep->cm_id)
6432 			ep->cm_id->event_handler(ep->cm_id, &event);
6433 		else
6434 			QL_DPRINT11(ha, "ep->cm_id == NULL \n");
6435 	} else {
6436 		QL_DPRINT11(ha, "ep == NULL \n");
6437 	}
6438 
6439 	QL_DPRINT12(ha, "exit\n");
6440 
6441 	return;
6442 }
6443 
6444 static void
6445 qlnxr_iw_close_event(void *context,
6446 	 struct ecore_iwarp_cm_event_params *params)
6447 {
6448 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6449 	struct qlnxr_dev *dev = ep->dev;
6450 	qlnx_host_t	*ha;
6451 
6452 	ha = dev->ha;
6453 
6454 	QL_DPRINT12(ha, "enter\n");
6455 
6456 	if (ep->cm_id) {
6457 		qlnxr_iw_issue_event(context,
6458 				    params,
6459 				    IW_CM_EVENT_CLOSE,
6460 				    "IW_CM_EVENT_EVENT_CLOSE");
6461 		ep->cm_id->rem_ref(ep->cm_id);
6462 		ep->cm_id = NULL;
6463 	}
6464 
6465 	QL_DPRINT12(ha, "exit\n");
6466 
6467 	return;
6468 }
6469 
6470 #if __FreeBSD_version >= 1102000
6471 
6472 static void
6473 qlnxr_iw_passive_complete(void *context,
6474         struct ecore_iwarp_cm_event_params *params)
6475 {
6476         struct qlnxr_iw_ep      *ep = (struct qlnxr_iw_ep *)context;
6477         struct qlnxr_dev        *dev = ep->dev;
6478         qlnx_host_t             *ha;
6479 
6480         ha = dev->ha;
6481 
6482         /* We will only reach the following state if MPA_REJECT was called on
6483          * passive. In this case there will be no associated QP.
6484          */
6485         if ((params->status == -ECONNREFUSED) && (ep->qp == NULL)) {
6486                 QL_DPRINT11(ha, "PASSIVE connection refused releasing ep...\n");
6487                 kfree(ep);
6488                 return;
6489         }
6490 
6491         /* We always issue an established event, however, ofed does not look
6492          * at event code for established. So if there was a failure, we follow
6493          * with close...
6494          */
6495         qlnxr_iw_issue_event(context,
6496                 params,
6497                 IW_CM_EVENT_ESTABLISHED,
6498                 "IW_CM_EVENT_ESTABLISHED");
6499 
6500         if (params->status < 0) {
6501                 qlnxr_iw_close_event(context, params);
6502         }
6503 
6504         return;
6505 }
6506 
6507 struct qlnxr_discon_work {
6508         struct work_struct work;
6509         struct qlnxr_iw_ep *ep;
6510         enum ecore_iwarp_event_type event;
6511         int status;
6512 };
6513 
6514 static void
6515 qlnxr_iw_disconnect_worker(struct work_struct *work)
6516 {
6517         struct qlnxr_discon_work *dwork =
6518                 container_of(work, struct qlnxr_discon_work, work);
6519         struct ecore_rdma_modify_qp_in_params qp_params = { 0 };
6520         struct qlnxr_iw_ep *ep = dwork->ep;
6521         struct qlnxr_dev *dev = ep->dev;
6522         struct qlnxr_qp *qp = ep->qp;
6523         struct iw_cm_event event;
6524 
6525         if (qp->destroyed) {
6526                 kfree(dwork);
6527                 qlnxr_iw_qp_rem_ref(&qp->ibqp);
6528                 return;
6529         }
6530 
6531         memset(&event, 0, sizeof(event));
6532         event.status = dwork->status;
6533         event.event = IW_CM_EVENT_DISCONNECT;
6534 
6535         /* Success means graceful disconnect was requested. modifying
6536          * to SQD is translated to graceful disconnect. O/w reset is sent
6537          */
6538         if (dwork->status)
6539                 qp_params.new_state = ECORE_ROCE_QP_STATE_ERR;
6540         else
6541                 qp_params.new_state = ECORE_ROCE_QP_STATE_SQD;
6542 
6543         kfree(dwork);
6544 
6545         if (ep->cm_id)
6546                 ep->cm_id->event_handler(ep->cm_id, &event);
6547 
6548         SET_FIELD(qp_params.modify_flags,
6549                   ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
6550 
6551         ecore_rdma_modify_qp(dev->rdma_ctx, qp->ecore_qp, &qp_params);
6552 
6553         qlnxr_iw_qp_rem_ref(&qp->ibqp);
6554 
6555         return;
6556 }
6557 
6558 void
6559 qlnxr_iw_disconnect_event(void *context,
6560         struct ecore_iwarp_cm_event_params *params)
6561 {
6562         struct qlnxr_discon_work *work;
6563         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6564         struct qlnxr_dev *dev = ep->dev;
6565         struct qlnxr_qp *qp = ep->qp;
6566 
6567         work = kzalloc(sizeof(*work), GFP_ATOMIC);
6568         if (!work)
6569                 return;
6570 
6571         qlnxr_iw_qp_add_ref(&qp->ibqp);
6572         work->ep = ep;
6573         work->event = params->event;
6574         work->status = params->status;
6575 
6576         INIT_WORK(&work->work, qlnxr_iw_disconnect_worker);
6577         queue_work(dev->iwarp_wq, &work->work);
6578 
6579         return;
6580 }
6581 
6582 #endif /* #if __FreeBSD_version >= 1102000 */
6583 
6584 static int
6585 qlnxr_iw_mpa_reply(void *context,
6586 	struct ecore_iwarp_cm_event_params *params)
6587 {
6588         struct qlnxr_iw_ep	*ep = (struct qlnxr_iw_ep *)context;
6589         struct qlnxr_dev	*dev = ep->dev;
6590         struct ecore_iwarp_send_rtr_in rtr_in;
6591         int			rc;
6592 	qlnx_host_t		*ha;
6593 
6594 	ha = dev->ha;
6595 
6596 	QL_DPRINT12(ha, "enter\n");
6597 
6598 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6599 		return -EINVAL;
6600 
6601 	bzero(&rtr_in, sizeof(struct ecore_iwarp_send_rtr_in));
6602         rtr_in.ep_context = params->ep_context;
6603 
6604         rc = ecore_iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
6605 
6606 	QL_DPRINT12(ha, "exit rc = %d\n", rc);
6607         return rc;
6608 }
6609 
6610 void
6611 qlnxr_iw_qp_event(void *context,
6612 	struct ecore_iwarp_cm_event_params *params,
6613 	enum ib_event_type ib_event,
6614 	char *str)
6615 {
6616         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6617         struct qlnxr_dev *dev = ep->dev;
6618         struct ib_qp *ibqp = &(ep->qp->ibqp);
6619         struct ib_event event;
6620 	qlnx_host_t	*ha;
6621 
6622 	ha = dev->ha;
6623 
6624 	QL_DPRINT12(ha,
6625 		"[context, event, event_handler] = [%p, 0x%x, %s, %p] enter\n",
6626 		context, params->event, str, ibqp->event_handler);
6627 
6628         if (ibqp->event_handler) {
6629                 event.event = ib_event;
6630                 event.device = ibqp->device;
6631                 event.element.qp = ibqp;
6632                 ibqp->event_handler(&event, ibqp->qp_context);
6633         }
6634 
6635 	return;
6636 }
6637 
6638 int
6639 qlnxr_iw_event_handler(void *context,
6640 	struct ecore_iwarp_cm_event_params *params)
6641 {
6642 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6643 	struct qlnxr_dev *dev = ep->dev;
6644 	qlnx_host_t	*ha;
6645 
6646 	ha = dev->ha;
6647 
6648 	QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6649 		"enter\n", context, params->event);
6650 
6651 	switch (params->event) {
6652 	/* Passive side request received */
6653 	case ECORE_IWARP_EVENT_MPA_REQUEST:
6654 		qlnxr_iw_mpa_request(context, params);
6655 		break;
6656 
6657         case ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY:
6658                 qlnxr_iw_mpa_reply(context, params);
6659                 break;
6660 
6661 	/* Passive side established ( ack on mpa response ) */
6662 	case ECORE_IWARP_EVENT_PASSIVE_COMPLETE:
6663 
6664 #if __FreeBSD_version >= 1102000
6665 
6666 		ep->during_connect = 0;
6667 		qlnxr_iw_passive_complete(context, params);
6668 
6669 #else
6670 		qlnxr_iw_issue_event(context,
6671 				    params,
6672 				    IW_CM_EVENT_ESTABLISHED,
6673 				    "IW_CM_EVENT_ESTABLISHED");
6674 #endif /* #if __FreeBSD_version >= 1102000 */
6675 		break;
6676 
6677 	/* Active side reply received */
6678 	case ECORE_IWARP_EVENT_ACTIVE_COMPLETE:
6679 		ep->during_connect = 0;
6680 		qlnxr_iw_issue_event(context,
6681 				    params,
6682 				    IW_CM_EVENT_CONNECT_REPLY,
6683 				    "IW_CM_EVENT_CONNECT_REPLY");
6684 		if (params->status < 0) {
6685 			struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)context;
6686 
6687 			ep->cm_id->rem_ref(ep->cm_id);
6688 			ep->cm_id = NULL;
6689 		}
6690 		break;
6691 
6692 	case ECORE_IWARP_EVENT_DISCONNECT:
6693 
6694 #if __FreeBSD_version >= 1102000
6695 		qlnxr_iw_disconnect_event(context, params);
6696 #else
6697 		qlnxr_iw_issue_event(context,
6698 				    params,
6699 				    IW_CM_EVENT_DISCONNECT,
6700 				    "IW_CM_EVENT_DISCONNECT");
6701 		qlnxr_iw_close_event(context, params);
6702 #endif /* #if __FreeBSD_version >= 1102000 */
6703 		break;
6704 
6705 	case ECORE_IWARP_EVENT_CLOSE:
6706 		ep->during_connect = 0;
6707 		qlnxr_iw_close_event(context, params);
6708 		break;
6709 
6710         case ECORE_IWARP_EVENT_RQ_EMPTY:
6711                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6712                                  "IWARP_EVENT_RQ_EMPTY");
6713                 break;
6714 
6715         case ECORE_IWARP_EVENT_IRQ_FULL:
6716                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6717                                  "IWARP_EVENT_IRQ_FULL");
6718                 break;
6719 
6720         case ECORE_IWARP_EVENT_LLP_TIMEOUT:
6721                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6722                                  "IWARP_EVENT_LLP_TIMEOUT");
6723                 break;
6724 
6725         case ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
6726                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6727                                  "IWARP_EVENT_REMOTE_PROTECTION_ERROR");
6728                 break;
6729 
6730         case ECORE_IWARP_EVENT_CQ_OVERFLOW:
6731                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6732                                  "QED_IWARP_EVENT_CQ_OVERFLOW");
6733                 break;
6734 
6735         case ECORE_IWARP_EVENT_QP_CATASTROPHIC:
6736                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6737                                  "QED_IWARP_EVENT_QP_CATASTROPHIC");
6738                 break;
6739 
6740         case ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR:
6741                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
6742                                  "IWARP_EVENT_LOCAL_ACCESS_ERROR");
6743                 break;
6744 
6745         case ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR:
6746                 qlnxr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
6747                                  "IWARP_EVENT_REMOTE_OPERATION_ERROR");
6748                 break;
6749 
6750         case ECORE_IWARP_EVENT_TERMINATE_RECEIVED:
6751 		QL_DPRINT12(ha, "Got terminate message"
6752 			" ECORE_IWARP_EVENT_TERMINATE_RECEIVED\n");
6753                 break;
6754 
6755 	default:
6756 		QL_DPRINT12(ha,
6757 			"Unknown event [0x%x] received \n", params->event);
6758 		break;
6759 	};
6760 
6761 	QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
6762 		"exit\n", context, params->event);
6763 	return 0;
6764 }
6765 
6766 static int
6767 qlnxr_addr4_resolve(struct qlnxr_dev *dev,
6768 			      struct sockaddr_in *src_in,
6769 			      struct sockaddr_in *dst_in,
6770 			      u8 *dst_mac)
6771 {
6772 	int rc;
6773 
6774 #if __FreeBSD_version >= 1100000
6775 	rc = arpresolve(dev->ha->ifp, 0, NULL, (struct sockaddr *)dst_in,
6776 			dst_mac, NULL, NULL);
6777 #else
6778 	struct llentry *lle;
6779 
6780 	rc = arpresolve(dev->ha->ifp, NULL, NULL, (struct sockaddr *)dst_in,
6781 			dst_mac, &lle);
6782 #endif
6783 
6784 	QL_DPRINT12(dev->ha, "rc = %d "
6785 		"sa_len = 0x%x sa_family = 0x%x IP Address = %d.%d.%d.%d "
6786 		"Dest MAC %02x:%02x:%02x:%02x:%02x:%02x\n", rc,
6787 		dst_in->sin_len, dst_in->sin_family,
6788 		NIPQUAD((dst_in->sin_addr.s_addr)),
6789 		dst_mac[0], dst_mac[1], dst_mac[2],
6790 		dst_mac[3], dst_mac[4], dst_mac[5]);
6791 
6792 	return rc;
6793 }
6794 
6795 int
6796 qlnxr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
6797 {
6798 	struct qlnxr_dev *dev;
6799 	struct ecore_iwarp_connect_out out_params;
6800 	struct ecore_iwarp_connect_in in_params;
6801 	struct qlnxr_iw_ep *ep;
6802 	struct qlnxr_qp *qp;
6803 	struct sockaddr_in *laddr;
6804 	struct sockaddr_in *raddr;
6805 	int rc = 0;
6806 	qlnx_host_t	*ha;
6807 
6808 	dev = get_qlnxr_dev((cm_id->device));
6809 	ha = dev->ha;
6810 
6811 	QL_DPRINT12(ha, "[cm_id, conn_param] = [%p, %p] "
6812 		"enter \n", cm_id, conn_param);
6813 
6814 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6815 		return -EINVAL;
6816 
6817 	qp = idr_find(&dev->qpidr, conn_param->qpn);
6818 
6819 	laddr = (struct sockaddr_in *)&cm_id->local_addr;
6820 	raddr = (struct sockaddr_in *)&cm_id->remote_addr;
6821 
6822 	QL_DPRINT12(ha,
6823 		"local = [%d.%d.%d.%d, %d] remote = [%d.%d.%d.%d, %d]\n",
6824 		NIPQUAD((laddr->sin_addr.s_addr)), laddr->sin_port,
6825 		NIPQUAD((raddr->sin_addr.s_addr)), raddr->sin_port);
6826 
6827 	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
6828 	if (!ep) {
6829 		QL_DPRINT11(ha, "struct qlnxr_iw_ep "
6830 			"alloc memory failed\n");
6831 		return -ENOMEM;
6832 	}
6833 
6834 	ep->dev = dev;
6835 	ep->qp = qp;
6836 	cm_id->add_ref(cm_id);
6837 	ep->cm_id = cm_id;
6838 
6839 	memset(&in_params, 0, sizeof (struct ecore_iwarp_connect_in));
6840 	memset(&out_params, 0, sizeof (struct ecore_iwarp_connect_out));
6841 
6842 	in_params.event_cb = qlnxr_iw_event_handler;
6843 	in_params.cb_context = ep;
6844 
6845 	in_params.cm_info.ip_version = ECORE_TCP_IPV4;
6846 
6847 	in_params.cm_info.remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
6848 	in_params.cm_info.local_ip[0] = ntohl(laddr->sin_addr.s_addr);
6849 	in_params.cm_info.remote_port = ntohs(raddr->sin_port);
6850 	in_params.cm_info.local_port = ntohs(laddr->sin_port);
6851 	in_params.cm_info.vlan = 0;
6852 	in_params.mss = dev->ha->ifp->if_mtu - 40;
6853 
6854 	QL_DPRINT12(ha, "remote_ip = [%d.%d.%d.%d] "
6855 		"local_ip = [%d.%d.%d.%d] remote_port = %d local_port = %d "
6856 		"vlan = %d\n",
6857 		NIPQUAD((in_params.cm_info.remote_ip[0])),
6858 		NIPQUAD((in_params.cm_info.local_ip[0])),
6859 		in_params.cm_info.remote_port, in_params.cm_info.local_port,
6860 		in_params.cm_info.vlan);
6861 
6862 	rc = qlnxr_addr4_resolve(dev, laddr, raddr, (u8 *)in_params.remote_mac_addr);
6863 
6864 	if (rc) {
6865 		QL_DPRINT11(ha, "qlnxr_addr4_resolve failed\n");
6866 		goto err;
6867 	}
6868 
6869 	QL_DPRINT12(ha, "ord = %d ird=%d private_data=%p"
6870 		" private_data_len=%d rq_psn=%d\n",
6871 		conn_param->ord, conn_param->ird, conn_param->private_data,
6872 		conn_param->private_data_len, qp->rq_psn);
6873 
6874 	in_params.cm_info.ord = conn_param->ord;
6875 	in_params.cm_info.ird = conn_param->ird;
6876 	in_params.cm_info.private_data = conn_param->private_data;
6877 	in_params.cm_info.private_data_len = conn_param->private_data_len;
6878 	in_params.qp = qp->ecore_qp;
6879 
6880 	memcpy(in_params.local_mac_addr, dev->ha->primary_mac, ETH_ALEN);
6881 
6882 	rc = ecore_iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
6883 
6884 	if (rc) {
6885 		QL_DPRINT12(ha, "ecore_iwarp_connect failed\n");
6886 		goto err;
6887 	}
6888 
6889 	QL_DPRINT12(ha, "exit\n");
6890 
6891 	return rc;
6892 
6893 err:
6894 	cm_id->rem_ref(cm_id);
6895 	kfree(ep);
6896 
6897 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6898 	return rc;
6899 }
6900 
6901 int
6902 qlnxr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
6903 {
6904 	struct qlnxr_dev *dev;
6905 	struct qlnxr_iw_listener *listener;
6906 	struct ecore_iwarp_listen_in iparams;
6907 	struct ecore_iwarp_listen_out oparams;
6908 	struct sockaddr_in *laddr;
6909 	qlnx_host_t	*ha;
6910 	int rc;
6911 
6912 	dev = get_qlnxr_dev((cm_id->device));
6913 	ha = dev->ha;
6914 
6915 	QL_DPRINT12(ha, "enter\n");
6916 
6917 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
6918 		return -EINVAL;
6919 
6920 	laddr = (struct sockaddr_in *)&cm_id->local_addr;
6921 
6922 	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
6923 
6924 	if (listener == NULL) {
6925 		QL_DPRINT11(ha, "listener memory alloc failed\n");
6926 		return -ENOMEM;
6927 	}
6928 
6929 	listener->dev = dev;
6930 	cm_id->add_ref(cm_id);
6931 	listener->cm_id = cm_id;
6932 	listener->backlog = backlog;
6933 
6934 	memset(&iparams, 0, sizeof (struct ecore_iwarp_listen_in));
6935 	memset(&oparams, 0, sizeof (struct ecore_iwarp_listen_out));
6936 
6937 	iparams.cb_context = listener;
6938 	iparams.event_cb = qlnxr_iw_event_handler;
6939 	iparams.max_backlog = backlog;
6940 
6941 	iparams.ip_version = ECORE_TCP_IPV4;
6942 
6943 	iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
6944 	iparams.port = ntohs(laddr->sin_port);
6945 	iparams.vlan = 0;
6946 
6947 	QL_DPRINT12(ha, "[%d.%d.%d.%d, %d] iparamsport=%d\n",
6948 		NIPQUAD((laddr->sin_addr.s_addr)),
6949 		laddr->sin_port, iparams.port);
6950 
6951 	rc = ecore_iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
6952 	if (rc) {
6953 		QL_DPRINT11(ha,
6954 			"ecore_iwarp_create_listen failed rc = %d\n", rc);
6955 		goto err;
6956 	}
6957 
6958 	listener->ecore_handle = oparams.handle;
6959 	cm_id->provider_data = listener;
6960 
6961 	QL_DPRINT12(ha, "exit\n");
6962 	return rc;
6963 
6964 err:
6965 	cm_id->rem_ref(cm_id);
6966 	kfree(listener);
6967 
6968 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6969 	return rc;
6970 }
6971 
6972 void
6973 qlnxr_iw_destroy_listen(struct iw_cm_id *cm_id)
6974 {
6975 	struct qlnxr_iw_listener *listener = cm_id->provider_data;
6976 	struct qlnxr_dev *dev = get_qlnxr_dev((cm_id->device));
6977 	int rc = 0;
6978 	qlnx_host_t	*ha;
6979 
6980 	ha = dev->ha;
6981 
6982 	QL_DPRINT12(ha, "enter\n");
6983 
6984 	if (listener->ecore_handle)
6985 		rc = ecore_iwarp_destroy_listen(dev->rdma_ctx,
6986 				listener->ecore_handle);
6987 
6988 	cm_id->rem_ref(cm_id);
6989 
6990 	QL_DPRINT12(ha, "exit [%d]\n", rc);
6991 	return;
6992 }
6993 
6994 int
6995 qlnxr_iw_accept(struct iw_cm_id *cm_id,
6996 	struct iw_cm_conn_param *conn_param)
6997 {
6998 	struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
6999 	struct qlnxr_dev *dev = ep->dev;
7000 	struct qlnxr_qp *qp;
7001 	struct ecore_iwarp_accept_in params;
7002 	int rc;
7003 	qlnx_host_t	*ha;
7004 
7005 	ha = dev->ha;
7006 
7007 	QL_DPRINT12(ha, "enter  qpid=%d\n", conn_param->qpn);
7008 
7009 	if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
7010 		return -EINVAL;
7011 
7012 	qp = idr_find(&dev->qpidr, conn_param->qpn);
7013 	if (!qp) {
7014 		QL_DPRINT11(ha, "idr_find failed invalid qpn = %d\n",
7015 			conn_param->qpn);
7016 		return -EINVAL;
7017 	}
7018 	ep->qp = qp;
7019 	qp->ep = ep;
7020 	cm_id->add_ref(cm_id);
7021 	ep->cm_id = cm_id;
7022 
7023 	params.ep_context = ep->ecore_context;
7024 	params.cb_context = ep;
7025 	params.qp = ep->qp->ecore_qp;
7026 	params.private_data = conn_param->private_data;
7027 	params.private_data_len = conn_param->private_data_len;
7028 	params.ird = conn_param->ird;
7029 	params.ord = conn_param->ord;
7030 
7031 	rc = ecore_iwarp_accept(dev->rdma_ctx, &params);
7032 	if (rc) {
7033 		QL_DPRINT11(ha, "ecore_iwarp_accept failed %d\n", rc);
7034 		goto err;
7035 	}
7036 
7037 	QL_DPRINT12(ha, "exit\n");
7038 	return 0;
7039 err:
7040 	cm_id->rem_ref(cm_id);
7041 	QL_DPRINT12(ha, "exit rc = %d\n", rc);
7042 	return rc;
7043 }
7044 
7045 int
7046 qlnxr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
7047 {
7048 #if __FreeBSD_version >= 1102000
7049 
7050         struct qlnxr_iw_ep *ep = (struct qlnxr_iw_ep *)cm_id->provider_data;
7051         struct qlnxr_dev *dev = ep->dev;
7052         struct ecore_iwarp_reject_in params;
7053         int rc;
7054 
7055         params.ep_context = ep->ecore_context;
7056         params.cb_context = ep;
7057         params.private_data = pdata;
7058         params.private_data_len = pdata_len;
7059         ep->qp = NULL;
7060 
7061         rc = ecore_iwarp_reject(dev->rdma_ctx, &params);
7062 
7063         return rc;
7064 
7065 #else
7066 
7067 	printf("iWARP reject_cr not implemented\n");
7068 	return -EINVAL;
7069 
7070 #endif /* #if __FreeBSD_version >= 1102000 */
7071 }
7072 
7073 void
7074 qlnxr_iw_qp_add_ref(struct ib_qp *ibqp)
7075 {
7076 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7077 	qlnx_host_t	*ha;
7078 
7079 	ha = qp->dev->ha;
7080 
7081 	QL_DPRINT12(ha, "enter ibqp = %p\n", ibqp);
7082 
7083 	atomic_inc(&qp->refcnt);
7084 
7085 	QL_DPRINT12(ha, "exit \n");
7086 	return;
7087 }
7088 
7089 void
7090 qlnxr_iw_qp_rem_ref(struct ib_qp *ibqp)
7091 {
7092 	struct qlnxr_qp *qp = get_qlnxr_qp(ibqp);
7093 	qlnx_host_t	*ha;
7094 
7095 	ha = qp->dev->ha;
7096 
7097 	QL_DPRINT12(ha, "enter ibqp = %p qp = %p\n", ibqp, qp);
7098 
7099 	if (atomic_dec_and_test(&qp->refcnt)) {
7100 		qlnxr_idr_remove(qp->dev, qp->qp_id);
7101 	}
7102 
7103 	QL_DPRINT12(ha, "exit \n");
7104 	return;
7105 }
7106 
7107 struct ib_qp *
7108 qlnxr_iw_get_qp(struct ib_device *ibdev, int qpn)
7109 {
7110 	struct qlnxr_dev *dev = get_qlnxr_dev(ibdev);
7111 	struct ib_qp *qp;
7112 	qlnx_host_t	*ha;
7113 
7114 	ha = dev->ha;
7115 
7116 	QL_DPRINT12(ha, "enter dev = %p ibdev = %p qpn = %d\n", dev, ibdev, qpn);
7117 
7118 	qp = idr_find(&dev->qpidr, qpn);
7119 
7120 	QL_DPRINT12(ha, "exit qp = %p\n", qp);
7121 
7122 	return (qp);
7123 }
7124