xref: /linux/drivers/infiniband/hw/bnxt_re/ib_verbs.c (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51 
52 #include "bnxt_ulp.h"
53 
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59 
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63 
64 static int __from_ib_access_flags(int iflags)
65 {
66 	int qflags = 0;
67 
68 	if (iflags & IB_ACCESS_LOCAL_WRITE)
69 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 	if (iflags & IB_ACCESS_REMOTE_READ)
71 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 	if (iflags & IB_ACCESS_REMOTE_WRITE)
73 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 	if (iflags & IB_ACCESS_MW_BIND)
77 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 	if (iflags & IB_ZERO_BASED)
79 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 	if (iflags & IB_ACCESS_ON_DEMAND)
81 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 	return qflags;
83 };
84 
85 static enum ib_access_flags __to_ib_access_flags(int qflags)
86 {
87 	enum ib_access_flags iflags = 0;
88 
89 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 		iflags |= IB_ACCESS_LOCAL_WRITE;
91 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 		iflags |= IB_ACCESS_REMOTE_WRITE;
93 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 		iflags |= IB_ACCESS_REMOTE_READ;
95 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 		iflags |= IB_ACCESS_MW_BIND;
99 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 		iflags |= IB_ZERO_BASED;
101 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 		iflags |= IB_ACCESS_ON_DEMAND;
103 	return iflags;
104 };
105 
106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 			     struct bnxt_qplib_sge *sg_list, int num)
108 {
109 	int i, total = 0;
110 
111 	for (i = 0; i < num; i++) {
112 		sg_list[i].addr = ib_sg_list[i].addr;
113 		sg_list[i].lkey = ib_sg_list[i].lkey;
114 		sg_list[i].size = ib_sg_list[i].length;
115 		total += sg_list[i].size;
116 	}
117 	return total;
118 }
119 
120 /* Device */
121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122 {
123 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 	struct net_device *netdev = NULL;
125 
126 	rcu_read_lock();
127 	if (rdev)
128 		netdev = rdev->netdev;
129 	if (netdev)
130 		dev_hold(netdev);
131 
132 	rcu_read_unlock();
133 	return netdev;
134 }
135 
136 int bnxt_re_query_device(struct ib_device *ibdev,
137 			 struct ib_device_attr *ib_attr,
138 			 struct ib_udata *udata)
139 {
140 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142 
143 	memset(ib_attr, 0, sizeof(*ib_attr));
144 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145 	       min(sizeof(dev_attr->fw_ver),
146 		   sizeof(ib_attr->fw_ver)));
147 	bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148 			    (u8 *)&ib_attr->sys_image_guid);
149 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
151 
152 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154 	ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155 	ib_attr->max_qp = dev_attr->max_qp;
156 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157 	ib_attr->device_cap_flags =
158 				    IB_DEVICE_CURR_QP_STATE_MOD
159 				    | IB_DEVICE_RC_RNR_NAK_GEN
160 				    | IB_DEVICE_SHUTDOWN_PORT
161 				    | IB_DEVICE_SYS_IMAGE_GUID
162 				    | IB_DEVICE_LOCAL_DMA_LKEY
163 				    | IB_DEVICE_RESIZE_MAX_WR
164 				    | IB_DEVICE_PORT_ACTIVE_EVENT
165 				    | IB_DEVICE_N_NOTIFY_CQ
166 				    | IB_DEVICE_MEM_WINDOW
167 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
168 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
169 	ib_attr->max_sge = dev_attr->max_qp_sges;
170 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
171 	ib_attr->max_cq = dev_attr->max_cq;
172 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
173 	ib_attr->max_mr = dev_attr->max_mr;
174 	ib_attr->max_pd = dev_attr->max_pd;
175 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
176 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
177 	if (dev_attr->is_atomic) {
178 		ib_attr->atomic_cap = IB_ATOMIC_HCA;
179 		ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
180 	}
181 
182 	ib_attr->max_ee_rd_atom = 0;
183 	ib_attr->max_res_rd_atom = 0;
184 	ib_attr->max_ee_init_rd_atom = 0;
185 	ib_attr->max_ee = 0;
186 	ib_attr->max_rdd = 0;
187 	ib_attr->max_mw = dev_attr->max_mw;
188 	ib_attr->max_raw_ipv6_qp = 0;
189 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190 	ib_attr->max_mcast_grp = 0;
191 	ib_attr->max_mcast_qp_attach = 0;
192 	ib_attr->max_total_mcast_qp_attach = 0;
193 	ib_attr->max_ah = dev_attr->max_ah;
194 
195 	ib_attr->max_fmr = 0;
196 	ib_attr->max_map_per_fmr = 0;
197 
198 	ib_attr->max_srq = dev_attr->max_srq;
199 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
200 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
201 
202 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
203 
204 	ib_attr->max_pkeys = 1;
205 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
206 	return 0;
207 }
208 
209 int bnxt_re_modify_device(struct ib_device *ibdev,
210 			  int device_modify_mask,
211 			  struct ib_device_modify *device_modify)
212 {
213 	switch (device_modify_mask) {
214 	case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
215 		/* Modify the GUID requires the modification of the GID table */
216 		/* GUID should be made as READ-ONLY */
217 		break;
218 	case IB_DEVICE_MODIFY_NODE_DESC:
219 		/* Node Desc should be made as READ-ONLY */
220 		break;
221 	default:
222 		break;
223 	}
224 	return 0;
225 }
226 
227 /* Port */
228 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
229 		       struct ib_port_attr *port_attr)
230 {
231 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
232 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
233 
234 	memset(port_attr, 0, sizeof(*port_attr));
235 
236 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
237 		port_attr->state = IB_PORT_ACTIVE;
238 		port_attr->phys_state = 5;
239 	} else {
240 		port_attr->state = IB_PORT_DOWN;
241 		port_attr->phys_state = 3;
242 	}
243 	port_attr->max_mtu = IB_MTU_4096;
244 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
245 	port_attr->gid_tbl_len = dev_attr->max_sgid;
246 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
247 				    IB_PORT_DEVICE_MGMT_SUP |
248 				    IB_PORT_VENDOR_CLASS_SUP |
249 				    IB_PORT_IP_BASED_GIDS;
250 
251 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
252 	port_attr->bad_pkey_cntr = 0;
253 	port_attr->qkey_viol_cntr = 0;
254 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
255 	port_attr->lid = 0;
256 	port_attr->sm_lid = 0;
257 	port_attr->lmc = 0;
258 	port_attr->max_vl_num = 4;
259 	port_attr->sm_sl = 0;
260 	port_attr->subnet_timeout = 0;
261 	port_attr->init_type_reply = 0;
262 	port_attr->active_speed = rdev->active_speed;
263 	port_attr->active_width = rdev->active_width;
264 
265 	return 0;
266 }
267 
268 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
269 			       struct ib_port_immutable *immutable)
270 {
271 	struct ib_port_attr port_attr;
272 
273 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
274 		return -EINVAL;
275 
276 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
277 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
278 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
279 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
280 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
281 	return 0;
282 }
283 
284 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
285 {
286 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
287 
288 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
289 		 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
290 		 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
291 }
292 
293 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
294 		       u16 index, u16 *pkey)
295 {
296 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
297 
298 	/* Ignore port_num */
299 
300 	memset(pkey, 0, sizeof(*pkey));
301 	return bnxt_qplib_get_pkey(&rdev->qplib_res,
302 				   &rdev->qplib_res.pkey_tbl, index, pkey);
303 }
304 
305 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
306 		      int index, union ib_gid *gid)
307 {
308 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
309 	int rc = 0;
310 
311 	/* Ignore port_num */
312 	memset(gid, 0, sizeof(*gid));
313 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
314 				 &rdev->qplib_res.sgid_tbl, index,
315 				 (struct bnxt_qplib_gid *)gid);
316 	return rc;
317 }
318 
319 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
320 		    unsigned int index, void **context)
321 {
322 	int rc = 0;
323 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
324 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
325 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
326 	struct bnxt_qplib_gid *gid_to_del;
327 
328 	/* Delete the entry from the hardware */
329 	ctx = *context;
330 	if (!ctx)
331 		return -EINVAL;
332 
333 	if (sgid_tbl && sgid_tbl->active) {
334 		if (ctx->idx >= sgid_tbl->max)
335 			return -EINVAL;
336 		gid_to_del = &sgid_tbl->tbl[ctx->idx];
337 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
338 		 * or via the ib_unregister_device path. In the former case QP1
339 		 * may not be destroyed yet, in which case just return as FW
340 		 * needs that entry to be present and will fail it's deletion.
341 		 * We could get invoked again after QP1 is destroyed OR get an
342 		 * ADD_GID call with a different GID value for the same index
343 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
344 		 */
345 		if (ctx->idx == 0 &&
346 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
347 		    ctx->refcnt == 1 && rdev->qp1_sqp) {
348 			dev_dbg(rdev_to_dev(rdev),
349 				"Trying to delete GID0 while QP1 is alive\n");
350 			return -EFAULT;
351 		}
352 		ctx->refcnt--;
353 		if (!ctx->refcnt) {
354 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
355 			if (rc) {
356 				dev_err(rdev_to_dev(rdev),
357 					"Failed to remove GID: %#x", rc);
358 			} else {
359 				ctx_tbl = sgid_tbl->ctx;
360 				ctx_tbl[ctx->idx] = NULL;
361 				kfree(ctx);
362 			}
363 		}
364 	} else {
365 		return -EINVAL;
366 	}
367 	return rc;
368 }
369 
370 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
371 		    unsigned int index, const union ib_gid *gid,
372 		    const struct ib_gid_attr *attr, void **context)
373 {
374 	int rc;
375 	u32 tbl_idx = 0;
376 	u16 vlan_id = 0xFFFF;
377 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
378 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
379 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
380 
381 	if ((attr->ndev) && is_vlan_dev(attr->ndev))
382 		vlan_id = vlan_dev_vlan_id(attr->ndev);
383 
384 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
385 				 rdev->qplib_res.netdev->dev_addr,
386 				 vlan_id, true, &tbl_idx);
387 	if (rc == -EALREADY) {
388 		ctx_tbl = sgid_tbl->ctx;
389 		ctx_tbl[tbl_idx]->refcnt++;
390 		*context = ctx_tbl[tbl_idx];
391 		return 0;
392 	}
393 
394 	if (rc < 0) {
395 		dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
396 		return rc;
397 	}
398 
399 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
400 	if (!ctx)
401 		return -ENOMEM;
402 	ctx_tbl = sgid_tbl->ctx;
403 	ctx->idx = tbl_idx;
404 	ctx->refcnt = 1;
405 	ctx_tbl[tbl_idx] = ctx;
406 	*context = ctx;
407 
408 	return rc;
409 }
410 
411 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
412 					    u8 port_num)
413 {
414 	return IB_LINK_LAYER_ETHERNET;
415 }
416 
417 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
418 
419 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
420 {
421 	struct bnxt_re_fence_data *fence = &pd->fence;
422 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
423 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
424 
425 	memset(wqe, 0, sizeof(*wqe));
426 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
427 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
428 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
429 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
430 	wqe->bind.zero_based = false;
431 	wqe->bind.parent_l_key = ib_mr->lkey;
432 	wqe->bind.va = (u64)(unsigned long)fence->va;
433 	wqe->bind.length = fence->size;
434 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
435 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
436 
437 	/* Save the initial rkey in fence structure for now;
438 	 * wqe->bind.r_key will be set at (re)bind time.
439 	 */
440 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
441 }
442 
443 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
444 {
445 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
446 					     qplib_qp);
447 	struct ib_pd *ib_pd = qp->ib_qp.pd;
448 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
449 	struct bnxt_re_fence_data *fence = &pd->fence;
450 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
451 	struct bnxt_qplib_swqe wqe;
452 	int rc;
453 
454 	memcpy(&wqe, fence_wqe, sizeof(wqe));
455 	wqe.bind.r_key = fence->bind_rkey;
456 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
457 
458 	dev_dbg(rdev_to_dev(qp->rdev),
459 		"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
460 		wqe.bind.r_key, qp->qplib_qp.id, pd);
461 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
462 	if (rc) {
463 		dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
464 		return rc;
465 	}
466 	bnxt_qplib_post_send_db(&qp->qplib_qp);
467 
468 	return rc;
469 }
470 
471 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
472 {
473 	struct bnxt_re_fence_data *fence = &pd->fence;
474 	struct bnxt_re_dev *rdev = pd->rdev;
475 	struct device *dev = &rdev->en_dev->pdev->dev;
476 	struct bnxt_re_mr *mr = fence->mr;
477 
478 	if (fence->mw) {
479 		bnxt_re_dealloc_mw(fence->mw);
480 		fence->mw = NULL;
481 	}
482 	if (mr) {
483 		if (mr->ib_mr.rkey)
484 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
485 					     true);
486 		if (mr->ib_mr.lkey)
487 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
488 		kfree(mr);
489 		fence->mr = NULL;
490 	}
491 	if (fence->dma_addr) {
492 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
493 				 DMA_BIDIRECTIONAL);
494 		fence->dma_addr = 0;
495 	}
496 }
497 
498 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
499 {
500 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
501 	struct bnxt_re_fence_data *fence = &pd->fence;
502 	struct bnxt_re_dev *rdev = pd->rdev;
503 	struct device *dev = &rdev->en_dev->pdev->dev;
504 	struct bnxt_re_mr *mr = NULL;
505 	dma_addr_t dma_addr = 0;
506 	struct ib_mw *mw;
507 	u64 pbl_tbl;
508 	int rc;
509 
510 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
511 				  DMA_BIDIRECTIONAL);
512 	rc = dma_mapping_error(dev, dma_addr);
513 	if (rc) {
514 		dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
515 		rc = -EIO;
516 		fence->dma_addr = 0;
517 		goto fail;
518 	}
519 	fence->dma_addr = dma_addr;
520 
521 	/* Allocate a MR */
522 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
523 	if (!mr) {
524 		rc = -ENOMEM;
525 		goto fail;
526 	}
527 	fence->mr = mr;
528 	mr->rdev = rdev;
529 	mr->qplib_mr.pd = &pd->qplib_pd;
530 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
531 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
532 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
533 	if (rc) {
534 		dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
535 		goto fail;
536 	}
537 
538 	/* Register MR */
539 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
540 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
541 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
542 	pbl_tbl = dma_addr;
543 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
544 			       BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
545 	if (rc) {
546 		dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
547 		goto fail;
548 	}
549 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
550 
551 	/* Create a fence MW only for kernel consumers */
552 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
553 	if (IS_ERR(mw)) {
554 		dev_err(rdev_to_dev(rdev),
555 			"Failed to create fence-MW for PD: %p\n", pd);
556 		rc = PTR_ERR(mw);
557 		goto fail;
558 	}
559 	fence->mw = mw;
560 
561 	bnxt_re_create_fence_wqe(pd);
562 	return 0;
563 
564 fail:
565 	bnxt_re_destroy_fence_mr(pd);
566 	return rc;
567 }
568 
569 /* Protection Domains */
570 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
571 {
572 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
573 	struct bnxt_re_dev *rdev = pd->rdev;
574 	int rc;
575 
576 	bnxt_re_destroy_fence_mr(pd);
577 
578 	if (pd->qplib_pd.id) {
579 		rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
580 					   &rdev->qplib_res.pd_tbl,
581 					   &pd->qplib_pd);
582 		if (rc)
583 			dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
584 	}
585 
586 	kfree(pd);
587 	return 0;
588 }
589 
590 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
591 			       struct ib_ucontext *ucontext,
592 			       struct ib_udata *udata)
593 {
594 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
595 	struct bnxt_re_ucontext *ucntx = container_of(ucontext,
596 						      struct bnxt_re_ucontext,
597 						      ib_uctx);
598 	struct bnxt_re_pd *pd;
599 	int rc;
600 
601 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
602 	if (!pd)
603 		return ERR_PTR(-ENOMEM);
604 
605 	pd->rdev = rdev;
606 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
607 		dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
608 		rc = -ENOMEM;
609 		goto fail;
610 	}
611 
612 	if (udata) {
613 		struct bnxt_re_pd_resp resp;
614 
615 		if (!ucntx->dpi.dbr) {
616 			/* Allocate DPI in alloc_pd to avoid failing of
617 			 * ibv_devinfo and family of application when DPIs
618 			 * are depleted.
619 			 */
620 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
621 						 &ucntx->dpi, ucntx)) {
622 				rc = -ENOMEM;
623 				goto dbfail;
624 			}
625 		}
626 
627 		resp.pdid = pd->qplib_pd.id;
628 		/* Still allow mapping this DBR to the new user PD. */
629 		resp.dpi = ucntx->dpi.dpi;
630 		resp.dbr = (u64)ucntx->dpi.umdbr;
631 
632 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
633 		if (rc) {
634 			dev_err(rdev_to_dev(rdev),
635 				"Failed to copy user response\n");
636 			goto dbfail;
637 		}
638 	}
639 
640 	if (!udata)
641 		if (bnxt_re_create_fence_mr(pd))
642 			dev_warn(rdev_to_dev(rdev),
643 				 "Failed to create Fence-MR\n");
644 	return &pd->ib_pd;
645 dbfail:
646 	(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
647 				    &pd->qplib_pd);
648 fail:
649 	kfree(pd);
650 	return ERR_PTR(rc);
651 }
652 
653 /* Address Handles */
654 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
655 {
656 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
657 	struct bnxt_re_dev *rdev = ah->rdev;
658 	int rc;
659 
660 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
661 	if (rc) {
662 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
663 		return rc;
664 	}
665 	kfree(ah);
666 	return 0;
667 }
668 
669 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
670 				struct rdma_ah_attr *ah_attr,
671 				struct ib_udata *udata)
672 {
673 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
674 	struct bnxt_re_dev *rdev = pd->rdev;
675 	struct bnxt_re_ah *ah;
676 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
677 	int rc;
678 	u8 nw_type;
679 
680 	struct ib_gid_attr sgid_attr;
681 
682 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
683 		dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
684 		return ERR_PTR(-EINVAL);
685 	}
686 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
687 	if (!ah)
688 		return ERR_PTR(-ENOMEM);
689 
690 	ah->rdev = rdev;
691 	ah->qplib_ah.pd = &pd->qplib_pd;
692 
693 	/* Supply the configuration for the HW */
694 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
695 	       sizeof(union ib_gid));
696 	/*
697 	 * If RoCE V2 is enabled, stack will have two entries for
698 	 * each GID entry. Avoiding this duplicte entry in HW. Dividing
699 	 * the GID index by 2 for RoCE V2
700 	 */
701 	ah->qplib_ah.sgid_index = grh->sgid_index / 2;
702 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
703 	ah->qplib_ah.traffic_class = grh->traffic_class;
704 	ah->qplib_ah.flow_label = grh->flow_label;
705 	ah->qplib_ah.hop_limit = grh->hop_limit;
706 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
707 	if (ib_pd->uobject &&
708 	    !rdma_is_multicast_addr((struct in6_addr *)
709 				    grh->dgid.raw) &&
710 	    !rdma_link_local_addr((struct in6_addr *)
711 				  grh->dgid.raw)) {
712 		union ib_gid sgid;
713 
714 		rc = ib_get_cached_gid(&rdev->ibdev, 1,
715 				       grh->sgid_index, &sgid,
716 				       &sgid_attr);
717 		if (rc) {
718 			dev_err(rdev_to_dev(rdev),
719 				"Failed to query gid at index %d",
720 				grh->sgid_index);
721 			goto fail;
722 		}
723 		if (sgid_attr.ndev)
724 			dev_put(sgid_attr.ndev);
725 		/* Get network header type for this GID */
726 		nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
727 		switch (nw_type) {
728 		case RDMA_NETWORK_IPV4:
729 			ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
730 			break;
731 		case RDMA_NETWORK_IPV6:
732 			ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
733 			break;
734 		default:
735 			ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
736 			break;
737 		}
738 	}
739 
740 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
741 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
742 	if (rc) {
743 		dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
744 		goto fail;
745 	}
746 
747 	/* Write AVID to shared page. */
748 	if (ib_pd->uobject) {
749 		struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
750 		struct bnxt_re_ucontext *uctx;
751 		unsigned long flag;
752 		u32 *wrptr;
753 
754 		uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
755 		spin_lock_irqsave(&uctx->sh_lock, flag);
756 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
757 		*wrptr = ah->qplib_ah.id;
758 		wmb(); /* make sure cache is updated. */
759 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
760 	}
761 
762 	return &ah->ib_ah;
763 
764 fail:
765 	kfree(ah);
766 	return ERR_PTR(rc);
767 }
768 
769 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
770 {
771 	return 0;
772 }
773 
774 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
775 {
776 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
777 
778 	ah_attr->type = ib_ah->type;
779 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
780 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
781 	rdma_ah_set_grh(ah_attr, NULL, 0,
782 			ah->qplib_ah.host_sgid_index,
783 			0, ah->qplib_ah.traffic_class);
784 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
785 	rdma_ah_set_port_num(ah_attr, 1);
786 	rdma_ah_set_static_rate(ah_attr, 0);
787 	return 0;
788 }
789 
790 /* Queue Pairs */
791 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
792 {
793 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
794 	struct bnxt_re_dev *rdev = qp->rdev;
795 	int rc;
796 
797 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
798 	bnxt_qplib_del_flush_qp(&qp->qplib_qp);
799 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
800 	if (rc) {
801 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
802 		return rc;
803 	}
804 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
805 		rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
806 					   &rdev->sqp_ah->qplib_ah);
807 		if (rc) {
808 			dev_err(rdev_to_dev(rdev),
809 				"Failed to destroy HW AH for shadow QP");
810 			return rc;
811 		}
812 
813 		bnxt_qplib_del_flush_qp(&qp->qplib_qp);
814 		rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
815 					   &rdev->qp1_sqp->qplib_qp);
816 		if (rc) {
817 			dev_err(rdev_to_dev(rdev),
818 				"Failed to destroy Shadow QP");
819 			return rc;
820 		}
821 		mutex_lock(&rdev->qp_lock);
822 		list_del(&rdev->qp1_sqp->list);
823 		atomic_dec(&rdev->qp_count);
824 		mutex_unlock(&rdev->qp_lock);
825 
826 		kfree(rdev->sqp_ah);
827 		kfree(rdev->qp1_sqp);
828 		rdev->qp1_sqp = NULL;
829 		rdev->sqp_ah = NULL;
830 	}
831 
832 	if (!IS_ERR_OR_NULL(qp->rumem))
833 		ib_umem_release(qp->rumem);
834 	if (!IS_ERR_OR_NULL(qp->sumem))
835 		ib_umem_release(qp->sumem);
836 
837 	mutex_lock(&rdev->qp_lock);
838 	list_del(&qp->list);
839 	atomic_dec(&rdev->qp_count);
840 	mutex_unlock(&rdev->qp_lock);
841 	kfree(qp);
842 	return 0;
843 }
844 
845 static u8 __from_ib_qp_type(enum ib_qp_type type)
846 {
847 	switch (type) {
848 	case IB_QPT_GSI:
849 		return CMDQ_CREATE_QP1_TYPE_GSI;
850 	case IB_QPT_RC:
851 		return CMDQ_CREATE_QP_TYPE_RC;
852 	case IB_QPT_UD:
853 		return CMDQ_CREATE_QP_TYPE_UD;
854 	default:
855 		return IB_QPT_MAX;
856 	}
857 }
858 
859 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
860 				struct bnxt_re_qp *qp, struct ib_udata *udata)
861 {
862 	struct bnxt_re_qp_req ureq;
863 	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
864 	struct ib_umem *umem;
865 	int bytes = 0;
866 	struct ib_ucontext *context = pd->ib_pd.uobject->context;
867 	struct bnxt_re_ucontext *cntx = container_of(context,
868 						     struct bnxt_re_ucontext,
869 						     ib_uctx);
870 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
871 		return -EFAULT;
872 
873 	bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
874 	/* Consider mapping PSN search memory only for RC QPs. */
875 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
876 		bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
877 	bytes = PAGE_ALIGN(bytes);
878 	umem = ib_umem_get(context, ureq.qpsva, bytes,
879 			   IB_ACCESS_LOCAL_WRITE, 1);
880 	if (IS_ERR(umem))
881 		return PTR_ERR(umem);
882 
883 	qp->sumem = umem;
884 	qplib_qp->sq.sglist = umem->sg_head.sgl;
885 	qplib_qp->sq.nmap = umem->nmap;
886 	qplib_qp->qp_handle = ureq.qp_handle;
887 
888 	if (!qp->qplib_qp.srq) {
889 		bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
890 		bytes = PAGE_ALIGN(bytes);
891 		umem = ib_umem_get(context, ureq.qprva, bytes,
892 				   IB_ACCESS_LOCAL_WRITE, 1);
893 		if (IS_ERR(umem))
894 			goto rqfail;
895 		qp->rumem = umem;
896 		qplib_qp->rq.sglist = umem->sg_head.sgl;
897 		qplib_qp->rq.nmap = umem->nmap;
898 	}
899 
900 	qplib_qp->dpi = &cntx->dpi;
901 	return 0;
902 rqfail:
903 	ib_umem_release(qp->sumem);
904 	qp->sumem = NULL;
905 	qplib_qp->sq.sglist = NULL;
906 	qplib_qp->sq.nmap = 0;
907 
908 	return PTR_ERR(umem);
909 }
910 
911 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
912 				(struct bnxt_re_pd *pd,
913 				 struct bnxt_qplib_res *qp1_res,
914 				 struct bnxt_qplib_qp *qp1_qp)
915 {
916 	struct bnxt_re_dev *rdev = pd->rdev;
917 	struct bnxt_re_ah *ah;
918 	union ib_gid sgid;
919 	int rc;
920 
921 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
922 	if (!ah)
923 		return NULL;
924 
925 	ah->rdev = rdev;
926 	ah->qplib_ah.pd = &pd->qplib_pd;
927 
928 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
929 	if (rc)
930 		goto fail;
931 
932 	/* supply the dgid data same as sgid */
933 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
934 	       sizeof(union ib_gid));
935 	ah->qplib_ah.sgid_index = 0;
936 
937 	ah->qplib_ah.traffic_class = 0;
938 	ah->qplib_ah.flow_label = 0;
939 	ah->qplib_ah.hop_limit = 1;
940 	ah->qplib_ah.sl = 0;
941 	/* Have DMAC same as SMAC */
942 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
943 
944 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
945 	if (rc) {
946 		dev_err(rdev_to_dev(rdev),
947 			"Failed to allocate HW AH for Shadow QP");
948 		goto fail;
949 	}
950 
951 	return ah;
952 
953 fail:
954 	kfree(ah);
955 	return NULL;
956 }
957 
958 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
959 				(struct bnxt_re_pd *pd,
960 				 struct bnxt_qplib_res *qp1_res,
961 				 struct bnxt_qplib_qp *qp1_qp)
962 {
963 	struct bnxt_re_dev *rdev = pd->rdev;
964 	struct bnxt_re_qp *qp;
965 	int rc;
966 
967 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
968 	if (!qp)
969 		return NULL;
970 
971 	qp->rdev = rdev;
972 
973 	/* Initialize the shadow QP structure from the QP1 values */
974 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
975 
976 	qp->qplib_qp.pd = &pd->qplib_pd;
977 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
978 	qp->qplib_qp.type = IB_QPT_UD;
979 
980 	qp->qplib_qp.max_inline_data = 0;
981 	qp->qplib_qp.sig_type = true;
982 
983 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
984 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
985 	qp->qplib_qp.sq.max_sge = 2;
986 	/* Q full delta can be 1 since it is internal QP */
987 	qp->qplib_qp.sq.q_full_delta = 1;
988 
989 	qp->qplib_qp.scq = qp1_qp->scq;
990 	qp->qplib_qp.rcq = qp1_qp->rcq;
991 
992 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
993 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
994 	/* Q full delta can be 1 since it is internal QP */
995 	qp->qplib_qp.rq.q_full_delta = 1;
996 
997 	qp->qplib_qp.mtu = qp1_qp->mtu;
998 
999 	qp->qplib_qp.sq_hdr_buf_size = 0;
1000 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1001 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1002 
1003 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1004 	if (rc)
1005 		goto fail;
1006 
1007 	rdev->sqp_id = qp->qplib_qp.id;
1008 
1009 	spin_lock_init(&qp->sq_lock);
1010 	INIT_LIST_HEAD(&qp->list);
1011 	mutex_lock(&rdev->qp_lock);
1012 	list_add_tail(&qp->list, &rdev->qp_list);
1013 	atomic_inc(&rdev->qp_count);
1014 	mutex_unlock(&rdev->qp_lock);
1015 	return qp;
1016 fail:
1017 	kfree(qp);
1018 	return NULL;
1019 }
1020 
1021 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1022 				struct ib_qp_init_attr *qp_init_attr,
1023 				struct ib_udata *udata)
1024 {
1025 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1026 	struct bnxt_re_dev *rdev = pd->rdev;
1027 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1028 	struct bnxt_re_qp *qp;
1029 	struct bnxt_re_cq *cq;
1030 	struct bnxt_re_srq *srq;
1031 	int rc, entries;
1032 
1033 	if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1034 	    (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1035 	    (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1036 	    (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1037 	    (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1038 		return ERR_PTR(-EINVAL);
1039 
1040 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1041 	if (!qp)
1042 		return ERR_PTR(-ENOMEM);
1043 
1044 	qp->rdev = rdev;
1045 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1046 	qp->qplib_qp.pd = &pd->qplib_pd;
1047 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1048 	qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1049 	if (qp->qplib_qp.type == IB_QPT_MAX) {
1050 		dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1051 			qp->qplib_qp.type);
1052 		rc = -EINVAL;
1053 		goto fail;
1054 	}
1055 	qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1056 	qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1057 				  IB_SIGNAL_ALL_WR) ? true : false);
1058 
1059 	qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1060 	if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1061 		qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1062 
1063 	if (qp_init_attr->send_cq) {
1064 		cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1065 				  ib_cq);
1066 		if (!cq) {
1067 			dev_err(rdev_to_dev(rdev), "Send CQ not found");
1068 			rc = -EINVAL;
1069 			goto fail;
1070 		}
1071 		qp->qplib_qp.scq = &cq->qplib_cq;
1072 	}
1073 
1074 	if (qp_init_attr->recv_cq) {
1075 		cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1076 				  ib_cq);
1077 		if (!cq) {
1078 			dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1079 			rc = -EINVAL;
1080 			goto fail;
1081 		}
1082 		qp->qplib_qp.rcq = &cq->qplib_cq;
1083 	}
1084 
1085 	if (qp_init_attr->srq) {
1086 		srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1087 				   ib_srq);
1088 		if (!srq) {
1089 			dev_err(rdev_to_dev(rdev), "SRQ not found");
1090 			rc = -EINVAL;
1091 			goto fail;
1092 		}
1093 		qp->qplib_qp.srq = &srq->qplib_srq;
1094 		qp->qplib_qp.rq.max_wqe = 0;
1095 	} else {
1096 		/* Allocate 1 more than what's provided so posting max doesn't
1097 		 * mean empty
1098 		 */
1099 		entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1100 		qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1101 						dev_attr->max_qp_wqes + 1);
1102 
1103 		qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1104 						qp_init_attr->cap.max_recv_wr;
1105 
1106 		qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1107 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1108 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1109 	}
1110 
1111 	qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1112 
1113 	if (qp_init_attr->qp_type == IB_QPT_GSI) {
1114 		/* Allocate 1 more than what's provided */
1115 		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1116 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1117 						dev_attr->max_qp_wqes + 1);
1118 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1119 						qp_init_attr->cap.max_send_wr;
1120 		qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1121 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1122 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1123 		qp->qplib_qp.sq.max_sge++;
1124 		if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1125 			qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1126 
1127 		qp->qplib_qp.rq_hdr_buf_size =
1128 					BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1129 
1130 		qp->qplib_qp.sq_hdr_buf_size =
1131 					BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1132 		qp->qplib_qp.dpi = &rdev->dpi_privileged;
1133 		rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1134 		if (rc) {
1135 			dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1136 			goto fail;
1137 		}
1138 		/* Create a shadow QP to handle the QP1 traffic */
1139 		rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1140 							 &qp->qplib_qp);
1141 		if (!rdev->qp1_sqp) {
1142 			rc = -EINVAL;
1143 			dev_err(rdev_to_dev(rdev),
1144 				"Failed to create Shadow QP for QP1");
1145 			goto qp_destroy;
1146 		}
1147 		rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1148 							   &qp->qplib_qp);
1149 		if (!rdev->sqp_ah) {
1150 			bnxt_qplib_destroy_qp(&rdev->qplib_res,
1151 					      &rdev->qp1_sqp->qplib_qp);
1152 			rc = -EINVAL;
1153 			dev_err(rdev_to_dev(rdev),
1154 				"Failed to create AH entry for ShadowQP");
1155 			goto qp_destroy;
1156 		}
1157 
1158 	} else {
1159 		/* Allocate 128 + 1 more than what's provided */
1160 		entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1161 					     BNXT_QPLIB_RESERVED_QP_WRS + 1);
1162 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1163 						dev_attr->max_qp_wqes +
1164 						BNXT_QPLIB_RESERVED_QP_WRS + 1);
1165 		qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1166 
1167 		/*
1168 		 * Reserving one slot for Phantom WQE. Application can
1169 		 * post one extra entry in this case. But allowing this to avoid
1170 		 * unexpected Queue full condition
1171 		 */
1172 
1173 		qp->qplib_qp.sq.q_full_delta -= 1;
1174 
1175 		qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1176 		qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1177 		if (udata) {
1178 			rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1179 			if (rc)
1180 				goto fail;
1181 		} else {
1182 			qp->qplib_qp.dpi = &rdev->dpi_privileged;
1183 		}
1184 
1185 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1186 		if (rc) {
1187 			dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1188 			goto fail;
1189 		}
1190 	}
1191 
1192 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1193 	spin_lock_init(&qp->sq_lock);
1194 	spin_lock_init(&qp->rq_lock);
1195 
1196 	if (udata) {
1197 		struct bnxt_re_qp_resp resp;
1198 
1199 		resp.qpid = qp->ib_qp.qp_num;
1200 		resp.rsvd = 0;
1201 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1202 		if (rc) {
1203 			dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1204 			goto qp_destroy;
1205 		}
1206 	}
1207 	INIT_LIST_HEAD(&qp->list);
1208 	mutex_lock(&rdev->qp_lock);
1209 	list_add_tail(&qp->list, &rdev->qp_list);
1210 	atomic_inc(&rdev->qp_count);
1211 	mutex_unlock(&rdev->qp_lock);
1212 
1213 	return &qp->ib_qp;
1214 qp_destroy:
1215 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1216 fail:
1217 	kfree(qp);
1218 	return ERR_PTR(rc);
1219 }
1220 
1221 static u8 __from_ib_qp_state(enum ib_qp_state state)
1222 {
1223 	switch (state) {
1224 	case IB_QPS_RESET:
1225 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1226 	case IB_QPS_INIT:
1227 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1228 	case IB_QPS_RTR:
1229 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1230 	case IB_QPS_RTS:
1231 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1232 	case IB_QPS_SQD:
1233 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1234 	case IB_QPS_SQE:
1235 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1236 	case IB_QPS_ERR:
1237 	default:
1238 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1239 	}
1240 }
1241 
1242 static enum ib_qp_state __to_ib_qp_state(u8 state)
1243 {
1244 	switch (state) {
1245 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1246 		return IB_QPS_RESET;
1247 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1248 		return IB_QPS_INIT;
1249 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1250 		return IB_QPS_RTR;
1251 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1252 		return IB_QPS_RTS;
1253 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1254 		return IB_QPS_SQD;
1255 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1256 		return IB_QPS_SQE;
1257 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1258 	default:
1259 		return IB_QPS_ERR;
1260 	}
1261 }
1262 
1263 static u32 __from_ib_mtu(enum ib_mtu mtu)
1264 {
1265 	switch (mtu) {
1266 	case IB_MTU_256:
1267 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1268 	case IB_MTU_512:
1269 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1270 	case IB_MTU_1024:
1271 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1272 	case IB_MTU_2048:
1273 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1274 	case IB_MTU_4096:
1275 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1276 	default:
1277 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1278 	}
1279 }
1280 
1281 static enum ib_mtu __to_ib_mtu(u32 mtu)
1282 {
1283 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1284 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1285 		return IB_MTU_256;
1286 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1287 		return IB_MTU_512;
1288 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1289 		return IB_MTU_1024;
1290 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1291 		return IB_MTU_2048;
1292 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1293 		return IB_MTU_4096;
1294 	default:
1295 		return IB_MTU_2048;
1296 	}
1297 }
1298 
1299 /* Shared Receive Queues */
1300 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1301 {
1302 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1303 					       ib_srq);
1304 	struct bnxt_re_dev *rdev = srq->rdev;
1305 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1306 	struct bnxt_qplib_nq *nq = NULL;
1307 	int rc;
1308 
1309 	if (qplib_srq->cq)
1310 		nq = qplib_srq->cq->nq;
1311 	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1312 	if (rc) {
1313 		dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1314 		return rc;
1315 	}
1316 
1317 	if (srq->umem && !IS_ERR(srq->umem))
1318 		ib_umem_release(srq->umem);
1319 	kfree(srq);
1320 	atomic_dec(&rdev->srq_count);
1321 	if (nq)
1322 		nq->budget--;
1323 	return 0;
1324 }
1325 
1326 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1327 				 struct bnxt_re_pd *pd,
1328 				 struct bnxt_re_srq *srq,
1329 				 struct ib_udata *udata)
1330 {
1331 	struct bnxt_re_srq_req ureq;
1332 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1333 	struct ib_umem *umem;
1334 	int bytes = 0;
1335 	struct ib_ucontext *context = pd->ib_pd.uobject->context;
1336 	struct bnxt_re_ucontext *cntx = container_of(context,
1337 						     struct bnxt_re_ucontext,
1338 						     ib_uctx);
1339 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1340 		return -EFAULT;
1341 
1342 	bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1343 	bytes = PAGE_ALIGN(bytes);
1344 	umem = ib_umem_get(context, ureq.srqva, bytes,
1345 			   IB_ACCESS_LOCAL_WRITE, 1);
1346 	if (IS_ERR(umem))
1347 		return PTR_ERR(umem);
1348 
1349 	srq->umem = umem;
1350 	qplib_srq->nmap = umem->nmap;
1351 	qplib_srq->sglist = umem->sg_head.sgl;
1352 	qplib_srq->srq_handle = ureq.srq_handle;
1353 	qplib_srq->dpi = &cntx->dpi;
1354 
1355 	return 0;
1356 }
1357 
1358 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1359 				  struct ib_srq_init_attr *srq_init_attr,
1360 				  struct ib_udata *udata)
1361 {
1362 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1363 	struct bnxt_re_dev *rdev = pd->rdev;
1364 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1365 	struct bnxt_re_srq *srq;
1366 	struct bnxt_qplib_nq *nq = NULL;
1367 	int rc, entries;
1368 
1369 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1370 		dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1371 		rc = -EINVAL;
1372 		goto exit;
1373 	}
1374 
1375 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1376 		rc = -ENOTSUPP;
1377 		goto exit;
1378 	}
1379 
1380 	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1381 	if (!srq) {
1382 		rc = -ENOMEM;
1383 		goto exit;
1384 	}
1385 	srq->rdev = rdev;
1386 	srq->qplib_srq.pd = &pd->qplib_pd;
1387 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1388 	/* Allocate 1 more than what's provided so posting max doesn't
1389 	 * mean empty
1390 	 */
1391 	entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1392 	if (entries > dev_attr->max_srq_wqes + 1)
1393 		entries = dev_attr->max_srq_wqes + 1;
1394 
1395 	srq->qplib_srq.max_wqe = entries;
1396 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1397 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1398 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1399 	srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1400 	nq = &rdev->nq[0];
1401 
1402 	if (udata) {
1403 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1404 		if (rc)
1405 			goto fail;
1406 	}
1407 
1408 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1409 	if (rc) {
1410 		dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1411 		goto fail;
1412 	}
1413 
1414 	if (udata) {
1415 		struct bnxt_re_srq_resp resp;
1416 
1417 		resp.srqid = srq->qplib_srq.id;
1418 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1419 		if (rc) {
1420 			dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1421 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1422 					       &srq->qplib_srq);
1423 			goto exit;
1424 		}
1425 	}
1426 	if (nq)
1427 		nq->budget++;
1428 	atomic_inc(&rdev->srq_count);
1429 
1430 	return &srq->ib_srq;
1431 
1432 fail:
1433 	if (udata && srq->umem && !IS_ERR(srq->umem)) {
1434 		ib_umem_release(srq->umem);
1435 		srq->umem = NULL;
1436 	}
1437 
1438 	kfree(srq);
1439 exit:
1440 	return ERR_PTR(rc);
1441 }
1442 
1443 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1444 		       enum ib_srq_attr_mask srq_attr_mask,
1445 		       struct ib_udata *udata)
1446 {
1447 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1448 					       ib_srq);
1449 	struct bnxt_re_dev *rdev = srq->rdev;
1450 	int rc;
1451 
1452 	switch (srq_attr_mask) {
1453 	case IB_SRQ_MAX_WR:
1454 		/* SRQ resize is not supported */
1455 		break;
1456 	case IB_SRQ_LIMIT:
1457 		/* Change the SRQ threshold */
1458 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1459 			return -EINVAL;
1460 
1461 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1462 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1463 		if (rc) {
1464 			dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1465 			return rc;
1466 		}
1467 		/* On success, update the shadow */
1468 		srq->srq_limit = srq_attr->srq_limit;
1469 		/* No need to Build and send response back to udata */
1470 		break;
1471 	default:
1472 		dev_err(rdev_to_dev(rdev),
1473 			"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1474 		return -EINVAL;
1475 	}
1476 	return 0;
1477 }
1478 
1479 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1480 {
1481 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1482 					       ib_srq);
1483 	struct bnxt_re_srq tsrq;
1484 	struct bnxt_re_dev *rdev = srq->rdev;
1485 	int rc;
1486 
1487 	/* Get live SRQ attr */
1488 	tsrq.qplib_srq.id = srq->qplib_srq.id;
1489 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1490 	if (rc) {
1491 		dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1492 		return rc;
1493 	}
1494 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
1495 	srq_attr->max_sge = srq->qplib_srq.max_sge;
1496 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1497 
1498 	return 0;
1499 }
1500 
1501 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
1502 			  struct ib_recv_wr **bad_wr)
1503 {
1504 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1505 					       ib_srq);
1506 	struct bnxt_qplib_swqe wqe;
1507 	unsigned long flags;
1508 	int rc = 0, payload_sz = 0;
1509 
1510 	spin_lock_irqsave(&srq->lock, flags);
1511 	while (wr) {
1512 		/* Transcribe each ib_recv_wr to qplib_swqe */
1513 		wqe.num_sge = wr->num_sge;
1514 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
1515 					       wr->num_sge);
1516 		wqe.wr_id = wr->wr_id;
1517 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1518 
1519 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1520 		if (rc) {
1521 			*bad_wr = wr;
1522 			break;
1523 		}
1524 		wr = wr->next;
1525 	}
1526 	spin_unlock_irqrestore(&srq->lock, flags);
1527 
1528 	return rc;
1529 }
1530 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1531 				    struct bnxt_re_qp *qp1_qp,
1532 				    int qp_attr_mask)
1533 {
1534 	struct bnxt_re_qp *qp = rdev->qp1_sqp;
1535 	int rc = 0;
1536 
1537 	if (qp_attr_mask & IB_QP_STATE) {
1538 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1539 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1540 	}
1541 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1542 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1543 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1544 	}
1545 
1546 	if (qp_attr_mask & IB_QP_QKEY) {
1547 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1548 		/* Using a Random  QKEY */
1549 		qp->qplib_qp.qkey = 0x81818181;
1550 	}
1551 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1552 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1553 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1554 	}
1555 
1556 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1557 	if (rc)
1558 		dev_err(rdev_to_dev(rdev),
1559 			"Failed to modify Shadow QP for QP1");
1560 	return rc;
1561 }
1562 
1563 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1564 		      int qp_attr_mask, struct ib_udata *udata)
1565 {
1566 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1567 	struct bnxt_re_dev *rdev = qp->rdev;
1568 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1569 	enum ib_qp_state curr_qp_state, new_qp_state;
1570 	int rc, entries;
1571 	int status;
1572 	union ib_gid sgid;
1573 	struct ib_gid_attr sgid_attr;
1574 	u8 nw_type;
1575 
1576 	qp->qplib_qp.modify_flags = 0;
1577 	if (qp_attr_mask & IB_QP_STATE) {
1578 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1579 		new_qp_state = qp_attr->qp_state;
1580 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1581 					ib_qp->qp_type, qp_attr_mask,
1582 					IB_LINK_LAYER_ETHERNET)) {
1583 			dev_err(rdev_to_dev(rdev),
1584 				"Invalid attribute mask: %#x specified ",
1585 				qp_attr_mask);
1586 			dev_err(rdev_to_dev(rdev),
1587 				"for qpn: %#x type: %#x",
1588 				ib_qp->qp_num, ib_qp->qp_type);
1589 			dev_err(rdev_to_dev(rdev),
1590 				"curr_qp_state=0x%x, new_qp_state=0x%x\n",
1591 				curr_qp_state, new_qp_state);
1592 			return -EINVAL;
1593 		}
1594 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1595 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1596 
1597 		if (!qp->sumem &&
1598 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1599 			dev_dbg(rdev_to_dev(rdev),
1600 				"Move QP = %p to flush list\n",
1601 				qp);
1602 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1603 		}
1604 		if (!qp->sumem &&
1605 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1606 			dev_dbg(rdev_to_dev(rdev),
1607 				"Move QP = %p out of flush list\n",
1608 				qp);
1609 			bnxt_qplib_del_flush_qp(&qp->qplib_qp);
1610 		}
1611 	}
1612 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1613 		qp->qplib_qp.modify_flags |=
1614 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1615 		qp->qplib_qp.en_sqd_async_notify = true;
1616 	}
1617 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1618 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1619 		qp->qplib_qp.access =
1620 			__from_ib_access_flags(qp_attr->qp_access_flags);
1621 		/* LOCAL_WRITE access must be set to allow RC receive */
1622 		qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1623 	}
1624 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1625 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1626 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1627 	}
1628 	if (qp_attr_mask & IB_QP_QKEY) {
1629 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1630 		qp->qplib_qp.qkey = qp_attr->qkey;
1631 	}
1632 	if (qp_attr_mask & IB_QP_AV) {
1633 		const struct ib_global_route *grh =
1634 			rdma_ah_read_grh(&qp_attr->ah_attr);
1635 
1636 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1637 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1638 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1639 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1640 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1641 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1642 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1643 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1644 		       sizeof(qp->qplib_qp.ah.dgid.data));
1645 		qp->qplib_qp.ah.flow_label = grh->flow_label;
1646 		/* If RoCE V2 is enabled, stack will have two entries for
1647 		 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1648 		 * the GID index by 2 for RoCE V2
1649 		 */
1650 		qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1651 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1652 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1653 		qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1654 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1655 		ether_addr_copy(qp->qplib_qp.ah.dmac,
1656 				qp_attr->ah_attr.roce.dmac);
1657 
1658 		status = ib_get_cached_gid(&rdev->ibdev, 1,
1659 					   grh->sgid_index,
1660 					   &sgid, &sgid_attr);
1661 		if (!status && sgid_attr.ndev) {
1662 			memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1663 			       ETH_ALEN);
1664 			dev_put(sgid_attr.ndev);
1665 			nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1666 							 &sgid);
1667 			switch (nw_type) {
1668 			case RDMA_NETWORK_IPV4:
1669 				qp->qplib_qp.nw_type =
1670 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1671 				break;
1672 			case RDMA_NETWORK_IPV6:
1673 				qp->qplib_qp.nw_type =
1674 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1675 				break;
1676 			default:
1677 				qp->qplib_qp.nw_type =
1678 					CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1679 				break;
1680 			}
1681 		}
1682 	}
1683 
1684 	if (qp_attr_mask & IB_QP_PATH_MTU) {
1685 		qp->qplib_qp.modify_flags |=
1686 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1687 		qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1688 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1689 	} else if (qp_attr->qp_state == IB_QPS_RTR) {
1690 		qp->qplib_qp.modify_flags |=
1691 			CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1692 		qp->qplib_qp.path_mtu =
1693 			__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1694 		qp->qplib_qp.mtu =
1695 			ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1696 	}
1697 
1698 	if (qp_attr_mask & IB_QP_TIMEOUT) {
1699 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1700 		qp->qplib_qp.timeout = qp_attr->timeout;
1701 	}
1702 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
1703 		qp->qplib_qp.modify_flags |=
1704 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1705 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1706 	}
1707 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
1708 		qp->qplib_qp.modify_flags |=
1709 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1710 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1711 	}
1712 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1713 		qp->qplib_qp.modify_flags |=
1714 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1715 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1716 	}
1717 	if (qp_attr_mask & IB_QP_RQ_PSN) {
1718 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1719 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1720 	}
1721 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1722 		qp->qplib_qp.modify_flags |=
1723 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1724 		/* Cap the max_rd_atomic to device max */
1725 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1726 						   dev_attr->max_qp_rd_atom);
1727 	}
1728 	if (qp_attr_mask & IB_QP_SQ_PSN) {
1729 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1730 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1731 	}
1732 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1733 		if (qp_attr->max_dest_rd_atomic >
1734 		    dev_attr->max_qp_init_rd_atom) {
1735 			dev_err(rdev_to_dev(rdev),
1736 				"max_dest_rd_atomic requested%d is > dev_max%d",
1737 				qp_attr->max_dest_rd_atomic,
1738 				dev_attr->max_qp_init_rd_atom);
1739 			return -EINVAL;
1740 		}
1741 
1742 		qp->qplib_qp.modify_flags |=
1743 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1744 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1745 	}
1746 	if (qp_attr_mask & IB_QP_CAP) {
1747 		qp->qplib_qp.modify_flags |=
1748 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1749 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1750 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1751 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1752 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1753 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1754 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1755 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1756 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1757 		    (qp_attr->cap.max_inline_data >=
1758 						dev_attr->max_inline_data)) {
1759 			dev_err(rdev_to_dev(rdev),
1760 				"Create QP failed - max exceeded");
1761 			return -EINVAL;
1762 		}
1763 		entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1764 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1765 						dev_attr->max_qp_wqes + 1);
1766 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1767 						qp_attr->cap.max_send_wr;
1768 		/*
1769 		 * Reserving one slot for Phantom WQE. Some application can
1770 		 * post one extra entry in this case. Allowing this to avoid
1771 		 * unexpected Queue full condition
1772 		 */
1773 		qp->qplib_qp.sq.q_full_delta -= 1;
1774 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1775 		if (qp->qplib_qp.rq.max_wqe) {
1776 			entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1777 			qp->qplib_qp.rq.max_wqe =
1778 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1779 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1780 						       qp_attr->cap.max_recv_wr;
1781 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1782 		} else {
1783 			/* SRQ was used prior, just ignore the RQ caps */
1784 		}
1785 	}
1786 	if (qp_attr_mask & IB_QP_DEST_QPN) {
1787 		qp->qplib_qp.modify_flags |=
1788 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1789 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1790 	}
1791 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1792 	if (rc) {
1793 		dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1794 		return rc;
1795 	}
1796 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1797 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1798 	return rc;
1799 }
1800 
1801 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1802 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1803 {
1804 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1805 	struct bnxt_re_dev *rdev = qp->rdev;
1806 	struct bnxt_qplib_qp *qplib_qp;
1807 	int rc;
1808 
1809 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1810 	if (!qplib_qp)
1811 		return -ENOMEM;
1812 
1813 	qplib_qp->id = qp->qplib_qp.id;
1814 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1815 
1816 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1817 	if (rc) {
1818 		dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1819 		goto out;
1820 	}
1821 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1822 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1823 	qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1824 	qp_attr->pkey_index = qplib_qp->pkey_index;
1825 	qp_attr->qkey = qplib_qp->qkey;
1826 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1827 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1828 			qplib_qp->ah.host_sgid_index,
1829 			qplib_qp->ah.hop_limit,
1830 			qplib_qp->ah.traffic_class);
1831 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1832 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1833 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1834 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1835 	qp_attr->timeout = qplib_qp->timeout;
1836 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
1837 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
1838 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1839 	qp_attr->rq_psn = qplib_qp->rq.psn;
1840 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1841 	qp_attr->sq_psn = qplib_qp->sq.psn;
1842 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1843 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1844 							 IB_SIGNAL_REQ_WR;
1845 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1846 
1847 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1848 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1849 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1850 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1851 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1852 	qp_init_attr->cap = qp_attr->cap;
1853 
1854 out:
1855 	kfree(qplib_qp);
1856 	return rc;
1857 }
1858 
1859 /* Routine for sending QP1 packets for RoCE V1 an V2
1860  */
1861 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1862 				     struct ib_send_wr *wr,
1863 				     struct bnxt_qplib_swqe *wqe,
1864 				     int payload_size)
1865 {
1866 	struct ib_device *ibdev = &qp->rdev->ibdev;
1867 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1868 					     ib_ah);
1869 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1870 	struct bnxt_qplib_sge sge;
1871 	union ib_gid sgid;
1872 	u8 nw_type;
1873 	u16 ether_type;
1874 	struct ib_gid_attr sgid_attr;
1875 	union ib_gid dgid;
1876 	bool is_eth = false;
1877 	bool is_vlan = false;
1878 	bool is_grh = false;
1879 	bool is_udp = false;
1880 	u8 ip_version = 0;
1881 	u16 vlan_id = 0xFFFF;
1882 	void *buf;
1883 	int i, rc = 0;
1884 
1885 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1886 
1887 	rc = ib_get_cached_gid(ibdev, 1,
1888 			       qplib_ah->host_sgid_index, &sgid,
1889 			       &sgid_attr);
1890 	if (rc) {
1891 		dev_err(rdev_to_dev(qp->rdev),
1892 			"Failed to query gid at index %d",
1893 			qplib_ah->host_sgid_index);
1894 		return rc;
1895 	}
1896 	if (sgid_attr.ndev) {
1897 		if (is_vlan_dev(sgid_attr.ndev))
1898 			vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1899 		dev_put(sgid_attr.ndev);
1900 	}
1901 	/* Get network header type for this GID */
1902 	nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1903 	switch (nw_type) {
1904 	case RDMA_NETWORK_IPV4:
1905 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1906 		break;
1907 	case RDMA_NETWORK_IPV6:
1908 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1909 		break;
1910 	default:
1911 		nw_type = BNXT_RE_ROCE_V1_PACKET;
1912 		break;
1913 	}
1914 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1915 	is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1916 	if (is_udp) {
1917 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1918 			ip_version = 4;
1919 			ether_type = ETH_P_IP;
1920 		} else {
1921 			ip_version = 6;
1922 			ether_type = ETH_P_IPV6;
1923 		}
1924 		is_grh = false;
1925 	} else {
1926 		ether_type = ETH_P_IBOE;
1927 		is_grh = true;
1928 	}
1929 
1930 	is_eth = true;
1931 	is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1932 
1933 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1934 			  ip_version, is_udp, 0, &qp->qp1_hdr);
1935 
1936 	/* ETH */
1937 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1938 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1939 
1940 	/* For vlan, check the sgid for vlan existence */
1941 
1942 	if (!is_vlan) {
1943 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1944 	} else {
1945 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1946 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1947 	}
1948 
1949 	if (is_grh || (ip_version == 6)) {
1950 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1951 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1952 		       sizeof(sgid));
1953 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1954 	}
1955 
1956 	if (ip_version == 4) {
1957 		qp->qp1_hdr.ip4.tos = 0;
1958 		qp->qp1_hdr.ip4.id = 0;
1959 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1960 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1961 
1962 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1963 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1964 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1965 	}
1966 
1967 	if (is_udp) {
1968 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1969 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
1970 		qp->qp1_hdr.udp.csum = 0;
1971 	}
1972 
1973 	/* BTH */
1974 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1975 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1976 		qp->qp1_hdr.immediate_present = 1;
1977 	} else {
1978 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1979 	}
1980 	if (wr->send_flags & IB_SEND_SOLICITED)
1981 		qp->qp1_hdr.bth.solicited_event = 1;
1982 	/* pad_count */
1983 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1984 
1985 	/* P_key for QP1 is for all members */
1986 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1987 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1988 	qp->qp1_hdr.bth.ack_req = 0;
1989 	qp->send_psn++;
1990 	qp->send_psn &= BTH_PSN_MASK;
1991 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1992 	/* DETH */
1993 	/* Use the priviledged Q_Key for QP1 */
1994 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1995 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
1996 
1997 	/* Pack the QP1 to the transmit buffer */
1998 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1999 	if (buf) {
2000 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2001 		for (i = wqe->num_sge; i; i--) {
2002 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2003 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2004 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2005 		}
2006 
2007 		/*
2008 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2009 		 * which is same as the QP1 SQ header buffer.
2010 		 * Header buf size for IPV4 RoCE V2 can be 66.
2011 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2012 		 * Subtract 20 bytes from QP1 SQ header buf size
2013 		 */
2014 		if (is_udp && ip_version == 4)
2015 			sge.size -= 20;
2016 		/*
2017 		 * Max Header buf size for RoCE V1 is 78.
2018 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2019 		 * Subtract 8 bytes from QP1 SQ header buf size
2020 		 */
2021 		if (!is_udp)
2022 			sge.size -= 8;
2023 
2024 		/* Subtract 4 bytes for non vlan packets */
2025 		if (!is_vlan)
2026 			sge.size -= 4;
2027 
2028 		wqe->sg_list[0].addr = sge.addr;
2029 		wqe->sg_list[0].lkey = sge.lkey;
2030 		wqe->sg_list[0].size = sge.size;
2031 		wqe->num_sge++;
2032 
2033 	} else {
2034 		dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2035 		rc = -ENOMEM;
2036 	}
2037 	return rc;
2038 }
2039 
2040 /* For the MAD layer, it only provides the recv SGE the size of
2041  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2042  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2043  * receive packet (334 bytes) with no VLAN and then copy the GRH
2044  * and the MAD datagram out to the provided SGE.
2045  */
2046 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2047 					    struct ib_recv_wr *wr,
2048 					    struct bnxt_qplib_swqe *wqe,
2049 					    int payload_size)
2050 {
2051 	struct bnxt_qplib_sge ref, sge;
2052 	u32 rq_prod_index;
2053 	struct bnxt_re_sqp_entries *sqp_entry;
2054 
2055 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2056 
2057 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2058 		return -ENOMEM;
2059 
2060 	/* Create 1 SGE to receive the entire
2061 	 * ethernet packet
2062 	 */
2063 	/* Save the reference from ULP */
2064 	ref.addr = wqe->sg_list[0].addr;
2065 	ref.lkey = wqe->sg_list[0].lkey;
2066 	ref.size = wqe->sg_list[0].size;
2067 
2068 	sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2069 
2070 	/* SGE 1 */
2071 	wqe->sg_list[0].addr = sge.addr;
2072 	wqe->sg_list[0].lkey = sge.lkey;
2073 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2074 	sge.size -= wqe->sg_list[0].size;
2075 
2076 	sqp_entry->sge.addr = ref.addr;
2077 	sqp_entry->sge.lkey = ref.lkey;
2078 	sqp_entry->sge.size = ref.size;
2079 	/* Store the wrid for reporting completion */
2080 	sqp_entry->wrid = wqe->wr_id;
2081 	/* change the wqe->wrid to table index */
2082 	wqe->wr_id = rq_prod_index;
2083 	return 0;
2084 }
2085 
2086 static int is_ud_qp(struct bnxt_re_qp *qp)
2087 {
2088 	return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2089 }
2090 
2091 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2092 				  struct ib_send_wr *wr,
2093 				  struct bnxt_qplib_swqe *wqe)
2094 {
2095 	struct bnxt_re_ah *ah = NULL;
2096 
2097 	if (is_ud_qp(qp)) {
2098 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2099 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2100 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2101 		wqe->send.avid = ah->qplib_ah.id;
2102 	}
2103 	switch (wr->opcode) {
2104 	case IB_WR_SEND:
2105 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2106 		break;
2107 	case IB_WR_SEND_WITH_IMM:
2108 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2109 		wqe->send.imm_data = wr->ex.imm_data;
2110 		break;
2111 	case IB_WR_SEND_WITH_INV:
2112 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2113 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2114 		break;
2115 	default:
2116 		return -EINVAL;
2117 	}
2118 	if (wr->send_flags & IB_SEND_SIGNALED)
2119 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2120 	if (wr->send_flags & IB_SEND_FENCE)
2121 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2122 	if (wr->send_flags & IB_SEND_SOLICITED)
2123 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2124 	if (wr->send_flags & IB_SEND_INLINE)
2125 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2126 
2127 	return 0;
2128 }
2129 
2130 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
2131 				  struct bnxt_qplib_swqe *wqe)
2132 {
2133 	switch (wr->opcode) {
2134 	case IB_WR_RDMA_WRITE:
2135 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2136 		break;
2137 	case IB_WR_RDMA_WRITE_WITH_IMM:
2138 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2139 		wqe->rdma.imm_data = wr->ex.imm_data;
2140 		break;
2141 	case IB_WR_RDMA_READ:
2142 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2143 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2144 		break;
2145 	default:
2146 		return -EINVAL;
2147 	}
2148 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2149 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2150 	if (wr->send_flags & IB_SEND_SIGNALED)
2151 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2152 	if (wr->send_flags & IB_SEND_FENCE)
2153 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2154 	if (wr->send_flags & IB_SEND_SOLICITED)
2155 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2156 	if (wr->send_flags & IB_SEND_INLINE)
2157 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2158 
2159 	return 0;
2160 }
2161 
2162 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
2163 				    struct bnxt_qplib_swqe *wqe)
2164 {
2165 	switch (wr->opcode) {
2166 	case IB_WR_ATOMIC_CMP_AND_SWP:
2167 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2168 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2169 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2170 		break;
2171 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2172 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2173 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2174 		break;
2175 	default:
2176 		return -EINVAL;
2177 	}
2178 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2179 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2180 	if (wr->send_flags & IB_SEND_SIGNALED)
2181 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2182 	if (wr->send_flags & IB_SEND_FENCE)
2183 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2184 	if (wr->send_flags & IB_SEND_SOLICITED)
2185 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2186 	return 0;
2187 }
2188 
2189 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2190 				 struct bnxt_qplib_swqe *wqe)
2191 {
2192 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2193 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2194 
2195 	if (wr->send_flags & IB_SEND_SIGNALED)
2196 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2197 	if (wr->send_flags & IB_SEND_FENCE)
2198 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2199 	if (wr->send_flags & IB_SEND_SOLICITED)
2200 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2201 
2202 	return 0;
2203 }
2204 
2205 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2206 				 struct bnxt_qplib_swqe *wqe)
2207 {
2208 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2209 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2210 	int access = wr->access;
2211 
2212 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2213 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2214 	wqe->frmr.page_list = mr->pages;
2215 	wqe->frmr.page_list_len = mr->npages;
2216 	wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2217 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2218 
2219 	if (wr->wr.send_flags & IB_SEND_FENCE)
2220 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2221 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2222 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2223 
2224 	if (access & IB_ACCESS_LOCAL_WRITE)
2225 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2226 	if (access & IB_ACCESS_REMOTE_READ)
2227 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2228 	if (access & IB_ACCESS_REMOTE_WRITE)
2229 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2230 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2231 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2232 	if (access & IB_ACCESS_MW_BIND)
2233 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2234 
2235 	wqe->frmr.l_key = wr->key;
2236 	wqe->frmr.length = wr->mr->length;
2237 	wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2238 	wqe->frmr.va = wr->mr->iova;
2239 	return 0;
2240 }
2241 
2242 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2243 				    struct ib_send_wr *wr,
2244 				    struct bnxt_qplib_swqe *wqe)
2245 {
2246 	/*  Copy the inline data to the data  field */
2247 	u8 *in_data;
2248 	u32 i, sge_len;
2249 	void *sge_addr;
2250 
2251 	in_data = wqe->inline_data;
2252 	for (i = 0; i < wr->num_sge; i++) {
2253 		sge_addr = (void *)(unsigned long)
2254 				wr->sg_list[i].addr;
2255 		sge_len = wr->sg_list[i].length;
2256 
2257 		if ((sge_len + wqe->inline_len) >
2258 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2259 			dev_err(rdev_to_dev(rdev),
2260 				"Inline data size requested > supported value");
2261 			return -EINVAL;
2262 		}
2263 		sge_len = wr->sg_list[i].length;
2264 
2265 		memcpy(in_data, sge_addr, sge_len);
2266 		in_data += wr->sg_list[i].length;
2267 		wqe->inline_len += wr->sg_list[i].length;
2268 	}
2269 	return wqe->inline_len;
2270 }
2271 
2272 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2273 				   struct ib_send_wr *wr,
2274 				   struct bnxt_qplib_swqe *wqe)
2275 {
2276 	int payload_sz = 0;
2277 
2278 	if (wr->send_flags & IB_SEND_INLINE)
2279 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2280 	else
2281 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2282 					       wqe->num_sge);
2283 
2284 	return payload_sz;
2285 }
2286 
2287 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2288 {
2289 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2290 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2291 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2292 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2293 		int qp_attr_mask;
2294 		struct ib_qp_attr qp_attr;
2295 
2296 		qp_attr_mask = IB_QP_STATE;
2297 		qp_attr.qp_state = IB_QPS_RTS;
2298 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2299 		qp->qplib_qp.wqe_cnt = 0;
2300 	}
2301 }
2302 
2303 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2304 				       struct bnxt_re_qp *qp,
2305 				struct ib_send_wr *wr)
2306 {
2307 	struct bnxt_qplib_swqe wqe;
2308 	int rc = 0, payload_sz = 0;
2309 	unsigned long flags;
2310 
2311 	spin_lock_irqsave(&qp->sq_lock, flags);
2312 	memset(&wqe, 0, sizeof(wqe));
2313 	while (wr) {
2314 		/* House keeping */
2315 		memset(&wqe, 0, sizeof(wqe));
2316 
2317 		/* Common */
2318 		wqe.num_sge = wr->num_sge;
2319 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2320 			dev_err(rdev_to_dev(rdev),
2321 				"Limit exceeded for Send SGEs");
2322 			rc = -EINVAL;
2323 			goto bad;
2324 		}
2325 
2326 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2327 		if (payload_sz < 0) {
2328 			rc = -EINVAL;
2329 			goto bad;
2330 		}
2331 		wqe.wr_id = wr->wr_id;
2332 
2333 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2334 
2335 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2336 		if (!rc)
2337 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2338 bad:
2339 		if (rc) {
2340 			dev_err(rdev_to_dev(rdev),
2341 				"Post send failed opcode = %#x rc = %d",
2342 				wr->opcode, rc);
2343 			break;
2344 		}
2345 		wr = wr->next;
2346 	}
2347 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2348 	bnxt_ud_qp_hw_stall_workaround(qp);
2349 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2350 	return rc;
2351 }
2352 
2353 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
2354 		      struct ib_send_wr **bad_wr)
2355 {
2356 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2357 	struct bnxt_qplib_swqe wqe;
2358 	int rc = 0, payload_sz = 0;
2359 	unsigned long flags;
2360 
2361 	spin_lock_irqsave(&qp->sq_lock, flags);
2362 	while (wr) {
2363 		/* House keeping */
2364 		memset(&wqe, 0, sizeof(wqe));
2365 
2366 		/* Common */
2367 		wqe.num_sge = wr->num_sge;
2368 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2369 			dev_err(rdev_to_dev(qp->rdev),
2370 				"Limit exceeded for Send SGEs");
2371 			rc = -EINVAL;
2372 			goto bad;
2373 		}
2374 
2375 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2376 		if (payload_sz < 0) {
2377 			rc = -EINVAL;
2378 			goto bad;
2379 		}
2380 		wqe.wr_id = wr->wr_id;
2381 
2382 		switch (wr->opcode) {
2383 		case IB_WR_SEND:
2384 		case IB_WR_SEND_WITH_IMM:
2385 			if (ib_qp->qp_type == IB_QPT_GSI) {
2386 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2387 							       payload_sz);
2388 				if (rc)
2389 					goto bad;
2390 				wqe.rawqp1.lflags |=
2391 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2392 			}
2393 			switch (wr->send_flags) {
2394 			case IB_SEND_IP_CSUM:
2395 				wqe.rawqp1.lflags |=
2396 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2397 				break;
2398 			default:
2399 				break;
2400 			}
2401 			/* Fall thru to build the wqe */
2402 		case IB_WR_SEND_WITH_INV:
2403 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2404 			break;
2405 		case IB_WR_RDMA_WRITE:
2406 		case IB_WR_RDMA_WRITE_WITH_IMM:
2407 		case IB_WR_RDMA_READ:
2408 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2409 			break;
2410 		case IB_WR_ATOMIC_CMP_AND_SWP:
2411 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2412 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2413 			break;
2414 		case IB_WR_RDMA_READ_WITH_INV:
2415 			dev_err(rdev_to_dev(qp->rdev),
2416 				"RDMA Read with Invalidate is not supported");
2417 			rc = -EINVAL;
2418 			goto bad;
2419 		case IB_WR_LOCAL_INV:
2420 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2421 			break;
2422 		case IB_WR_REG_MR:
2423 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2424 			break;
2425 		default:
2426 			/* Unsupported WRs */
2427 			dev_err(rdev_to_dev(qp->rdev),
2428 				"WR (%#x) is not supported", wr->opcode);
2429 			rc = -EINVAL;
2430 			goto bad;
2431 		}
2432 		if (!rc)
2433 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2434 bad:
2435 		if (rc) {
2436 			dev_err(rdev_to_dev(qp->rdev),
2437 				"post_send failed op:%#x qps = %#x rc = %d\n",
2438 				wr->opcode, qp->qplib_qp.state, rc);
2439 			*bad_wr = wr;
2440 			break;
2441 		}
2442 		wr = wr->next;
2443 	}
2444 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2445 	bnxt_ud_qp_hw_stall_workaround(qp);
2446 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2447 
2448 	return rc;
2449 }
2450 
2451 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2452 				       struct bnxt_re_qp *qp,
2453 				       struct ib_recv_wr *wr)
2454 {
2455 	struct bnxt_qplib_swqe wqe;
2456 	int rc = 0;
2457 
2458 	memset(&wqe, 0, sizeof(wqe));
2459 	while (wr) {
2460 		/* House keeping */
2461 		memset(&wqe, 0, sizeof(wqe));
2462 
2463 		/* Common */
2464 		wqe.num_sge = wr->num_sge;
2465 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2466 			dev_err(rdev_to_dev(rdev),
2467 				"Limit exceeded for Receive SGEs");
2468 			rc = -EINVAL;
2469 			break;
2470 		}
2471 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2472 		wqe.wr_id = wr->wr_id;
2473 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2474 
2475 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2476 		if (rc)
2477 			break;
2478 
2479 		wr = wr->next;
2480 	}
2481 	if (!rc)
2482 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2483 	return rc;
2484 }
2485 
2486 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2487 		      struct ib_recv_wr **bad_wr)
2488 {
2489 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2490 	struct bnxt_qplib_swqe wqe;
2491 	int rc = 0, payload_sz = 0;
2492 	unsigned long flags;
2493 	u32 count = 0;
2494 
2495 	spin_lock_irqsave(&qp->rq_lock, flags);
2496 	while (wr) {
2497 		/* House keeping */
2498 		memset(&wqe, 0, sizeof(wqe));
2499 
2500 		/* Common */
2501 		wqe.num_sge = wr->num_sge;
2502 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2503 			dev_err(rdev_to_dev(qp->rdev),
2504 				"Limit exceeded for Receive SGEs");
2505 			rc = -EINVAL;
2506 			*bad_wr = wr;
2507 			break;
2508 		}
2509 
2510 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2511 					       wr->num_sge);
2512 		wqe.wr_id = wr->wr_id;
2513 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2514 
2515 		if (ib_qp->qp_type == IB_QPT_GSI)
2516 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2517 							      payload_sz);
2518 		if (!rc)
2519 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2520 		if (rc) {
2521 			*bad_wr = wr;
2522 			break;
2523 		}
2524 
2525 		/* Ring DB if the RQEs posted reaches a threshold value */
2526 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2527 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
2528 			count = 0;
2529 		}
2530 
2531 		wr = wr->next;
2532 	}
2533 
2534 	if (count)
2535 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
2536 
2537 	spin_unlock_irqrestore(&qp->rq_lock, flags);
2538 
2539 	return rc;
2540 }
2541 
2542 /* Completion Queues */
2543 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2544 {
2545 	int rc;
2546 	struct bnxt_re_cq *cq;
2547 	struct bnxt_qplib_nq *nq;
2548 	struct bnxt_re_dev *rdev;
2549 
2550 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2551 	rdev = cq->rdev;
2552 	nq = cq->qplib_cq.nq;
2553 
2554 	rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2555 	if (rc) {
2556 		dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2557 		return rc;
2558 	}
2559 	if (!IS_ERR_OR_NULL(cq->umem))
2560 		ib_umem_release(cq->umem);
2561 
2562 	atomic_dec(&rdev->cq_count);
2563 	nq->budget--;
2564 	kfree(cq->cql);
2565 	kfree(cq);
2566 
2567 	return 0;
2568 }
2569 
2570 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2571 				const struct ib_cq_init_attr *attr,
2572 				struct ib_ucontext *context,
2573 				struct ib_udata *udata)
2574 {
2575 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2576 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2577 	struct bnxt_re_cq *cq = NULL;
2578 	int rc, entries;
2579 	int cqe = attr->cqe;
2580 	struct bnxt_qplib_nq *nq = NULL;
2581 	unsigned int nq_alloc_cnt;
2582 
2583 	/* Validate CQ fields */
2584 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2585 		dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2586 		return ERR_PTR(-EINVAL);
2587 	}
2588 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2589 	if (!cq)
2590 		return ERR_PTR(-ENOMEM);
2591 
2592 	cq->rdev = rdev;
2593 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2594 
2595 	entries = roundup_pow_of_two(cqe + 1);
2596 	if (entries > dev_attr->max_cq_wqes + 1)
2597 		entries = dev_attr->max_cq_wqes + 1;
2598 
2599 	if (context) {
2600 		struct bnxt_re_cq_req req;
2601 		struct bnxt_re_ucontext *uctx = container_of
2602 						(context,
2603 						 struct bnxt_re_ucontext,
2604 						 ib_uctx);
2605 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2606 			rc = -EFAULT;
2607 			goto fail;
2608 		}
2609 
2610 		cq->umem = ib_umem_get(context, req.cq_va,
2611 				       entries * sizeof(struct cq_base),
2612 				       IB_ACCESS_LOCAL_WRITE, 1);
2613 		if (IS_ERR(cq->umem)) {
2614 			rc = PTR_ERR(cq->umem);
2615 			goto fail;
2616 		}
2617 		cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2618 		cq->qplib_cq.nmap = cq->umem->nmap;
2619 		cq->qplib_cq.dpi = &uctx->dpi;
2620 	} else {
2621 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2622 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2623 				  GFP_KERNEL);
2624 		if (!cq->cql) {
2625 			rc = -ENOMEM;
2626 			goto fail;
2627 		}
2628 
2629 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
2630 		cq->qplib_cq.sghead = NULL;
2631 		cq->qplib_cq.nmap = 0;
2632 	}
2633 	/*
2634 	 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2635 	 * used for getting the NQ index.
2636 	 */
2637 	nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2638 	nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2639 	cq->qplib_cq.max_wqe = entries;
2640 	cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2641 	cq->qplib_cq.nq	= nq;
2642 
2643 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2644 	if (rc) {
2645 		dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2646 		goto fail;
2647 	}
2648 
2649 	cq->ib_cq.cqe = entries;
2650 	cq->cq_period = cq->qplib_cq.period;
2651 	nq->budget++;
2652 
2653 	atomic_inc(&rdev->cq_count);
2654 
2655 	if (context) {
2656 		struct bnxt_re_cq_resp resp;
2657 
2658 		resp.cqid = cq->qplib_cq.id;
2659 		resp.tail = cq->qplib_cq.hwq.cons;
2660 		resp.phase = cq->qplib_cq.period;
2661 		resp.rsvd = 0;
2662 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2663 		if (rc) {
2664 			dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2665 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2666 			goto c2fail;
2667 		}
2668 	}
2669 
2670 	return &cq->ib_cq;
2671 
2672 c2fail:
2673 	if (context)
2674 		ib_umem_release(cq->umem);
2675 fail:
2676 	kfree(cq->cql);
2677 	kfree(cq);
2678 	return ERR_PTR(rc);
2679 }
2680 
2681 static u8 __req_to_ib_wc_status(u8 qstatus)
2682 {
2683 	switch (qstatus) {
2684 	case CQ_REQ_STATUS_OK:
2685 		return IB_WC_SUCCESS;
2686 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2687 		return IB_WC_BAD_RESP_ERR;
2688 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2689 		return IB_WC_LOC_LEN_ERR;
2690 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2691 		return IB_WC_LOC_QP_OP_ERR;
2692 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2693 		return IB_WC_LOC_PROT_ERR;
2694 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2695 		return IB_WC_GENERAL_ERR;
2696 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2697 		return IB_WC_REM_INV_REQ_ERR;
2698 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2699 		return IB_WC_REM_ACCESS_ERR;
2700 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2701 		return IB_WC_REM_OP_ERR;
2702 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2703 		return IB_WC_RNR_RETRY_EXC_ERR;
2704 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2705 		return IB_WC_RETRY_EXC_ERR;
2706 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2707 		return IB_WC_WR_FLUSH_ERR;
2708 	default:
2709 		return IB_WC_GENERAL_ERR;
2710 	}
2711 	return 0;
2712 }
2713 
2714 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2715 {
2716 	switch (qstatus) {
2717 	case CQ_RES_RAWETH_QP1_STATUS_OK:
2718 		return IB_WC_SUCCESS;
2719 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2720 		return IB_WC_LOC_ACCESS_ERR;
2721 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2722 		return IB_WC_LOC_LEN_ERR;
2723 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2724 		return IB_WC_LOC_PROT_ERR;
2725 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2726 		return IB_WC_LOC_QP_OP_ERR;
2727 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2728 		return IB_WC_GENERAL_ERR;
2729 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2730 		return IB_WC_WR_FLUSH_ERR;
2731 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2732 		return IB_WC_WR_FLUSH_ERR;
2733 	default:
2734 		return IB_WC_GENERAL_ERR;
2735 	}
2736 }
2737 
2738 static u8 __rc_to_ib_wc_status(u8 qstatus)
2739 {
2740 	switch (qstatus) {
2741 	case CQ_RES_RC_STATUS_OK:
2742 		return IB_WC_SUCCESS;
2743 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2744 		return IB_WC_LOC_ACCESS_ERR;
2745 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2746 		return IB_WC_LOC_LEN_ERR;
2747 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2748 		return IB_WC_LOC_PROT_ERR;
2749 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2750 		return IB_WC_LOC_QP_OP_ERR;
2751 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2752 		return IB_WC_GENERAL_ERR;
2753 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2754 		return IB_WC_REM_INV_REQ_ERR;
2755 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2756 		return IB_WC_WR_FLUSH_ERR;
2757 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2758 		return IB_WC_WR_FLUSH_ERR;
2759 	default:
2760 		return IB_WC_GENERAL_ERR;
2761 	}
2762 }
2763 
2764 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2765 {
2766 	switch (cqe->type) {
2767 	case BNXT_QPLIB_SWQE_TYPE_SEND:
2768 		wc->opcode = IB_WC_SEND;
2769 		break;
2770 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2771 		wc->opcode = IB_WC_SEND;
2772 		wc->wc_flags |= IB_WC_WITH_IMM;
2773 		break;
2774 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2775 		wc->opcode = IB_WC_SEND;
2776 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2777 		break;
2778 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2779 		wc->opcode = IB_WC_RDMA_WRITE;
2780 		break;
2781 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2782 		wc->opcode = IB_WC_RDMA_WRITE;
2783 		wc->wc_flags |= IB_WC_WITH_IMM;
2784 		break;
2785 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2786 		wc->opcode = IB_WC_RDMA_READ;
2787 		break;
2788 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2789 		wc->opcode = IB_WC_COMP_SWAP;
2790 		break;
2791 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2792 		wc->opcode = IB_WC_FETCH_ADD;
2793 		break;
2794 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2795 		wc->opcode = IB_WC_LOCAL_INV;
2796 		break;
2797 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2798 		wc->opcode = IB_WC_REG_MR;
2799 		break;
2800 	default:
2801 		wc->opcode = IB_WC_SEND;
2802 		break;
2803 	}
2804 
2805 	wc->status = __req_to_ib_wc_status(cqe->status);
2806 }
2807 
2808 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2809 				     u16 raweth_qp1_flags2)
2810 {
2811 	bool is_ipv6 = false, is_ipv4 = false;
2812 
2813 	/* raweth_qp1_flags Bit 9-6 indicates itype */
2814 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2815 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2816 		return -1;
2817 
2818 	if (raweth_qp1_flags2 &
2819 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2820 	    raweth_qp1_flags2 &
2821 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2822 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2823 		(raweth_qp1_flags2 &
2824 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2825 			(is_ipv6 = true) : (is_ipv4 = true);
2826 		return ((is_ipv6) ?
2827 			 BNXT_RE_ROCEV2_IPV6_PACKET :
2828 			 BNXT_RE_ROCEV2_IPV4_PACKET);
2829 	} else {
2830 		return BNXT_RE_ROCE_V1_PACKET;
2831 	}
2832 }
2833 
2834 static int bnxt_re_to_ib_nw_type(int nw_type)
2835 {
2836 	u8 nw_hdr_type = 0xFF;
2837 
2838 	switch (nw_type) {
2839 	case BNXT_RE_ROCE_V1_PACKET:
2840 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2841 		break;
2842 	case BNXT_RE_ROCEV2_IPV4_PACKET:
2843 		nw_hdr_type = RDMA_NETWORK_IPV4;
2844 		break;
2845 	case BNXT_RE_ROCEV2_IPV6_PACKET:
2846 		nw_hdr_type = RDMA_NETWORK_IPV6;
2847 		break;
2848 	}
2849 	return nw_hdr_type;
2850 }
2851 
2852 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2853 				       void *rq_hdr_buf)
2854 {
2855 	u8 *tmp_buf = NULL;
2856 	struct ethhdr *eth_hdr;
2857 	u16 eth_type;
2858 	bool rc = false;
2859 
2860 	tmp_buf = (u8 *)rq_hdr_buf;
2861 	/*
2862 	 * If dest mac is not same as I/F mac, this could be a
2863 	 * loopback address or multicast address, check whether
2864 	 * it is a loopback packet
2865 	 */
2866 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2867 		tmp_buf += 4;
2868 		/* Check the  ether type */
2869 		eth_hdr = (struct ethhdr *)tmp_buf;
2870 		eth_type = ntohs(eth_hdr->h_proto);
2871 		switch (eth_type) {
2872 		case ETH_P_IBOE:
2873 			rc = true;
2874 			break;
2875 		case ETH_P_IP:
2876 		case ETH_P_IPV6: {
2877 			u32 len;
2878 			struct udphdr *udp_hdr;
2879 
2880 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2881 						      sizeof(struct ipv6hdr));
2882 			tmp_buf += sizeof(struct ethhdr) + len;
2883 			udp_hdr = (struct udphdr *)tmp_buf;
2884 			if (ntohs(udp_hdr->dest) ==
2885 				    ROCE_V2_UDP_DPORT)
2886 				rc = true;
2887 			break;
2888 			}
2889 		default:
2890 			break;
2891 		}
2892 	}
2893 
2894 	return rc;
2895 }
2896 
2897 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2898 					 struct bnxt_qplib_cqe *cqe)
2899 {
2900 	struct bnxt_re_dev *rdev = qp1_qp->rdev;
2901 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
2902 	struct bnxt_re_qp *qp = rdev->qp1_sqp;
2903 	struct ib_send_wr *swr;
2904 	struct ib_ud_wr udwr;
2905 	struct ib_recv_wr rwr;
2906 	int pkt_type = 0;
2907 	u32 tbl_idx;
2908 	void *rq_hdr_buf;
2909 	dma_addr_t rq_hdr_buf_map;
2910 	dma_addr_t shrq_hdr_buf_map;
2911 	u32 offset = 0;
2912 	u32 skip_bytes = 0;
2913 	struct ib_sge s_sge[2];
2914 	struct ib_sge r_sge[2];
2915 	int rc;
2916 
2917 	memset(&udwr, 0, sizeof(udwr));
2918 	memset(&rwr, 0, sizeof(rwr));
2919 	memset(&s_sge, 0, sizeof(s_sge));
2920 	memset(&r_sge, 0, sizeof(r_sge));
2921 
2922 	swr = &udwr.wr;
2923 	tbl_idx = cqe->wr_id;
2924 
2925 	rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2926 			(tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2927 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2928 							  tbl_idx);
2929 
2930 	/* Shadow QP header buffer */
2931 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2932 							    tbl_idx);
2933 	sqp_entry = &rdev->sqp_tbl[tbl_idx];
2934 
2935 	/* Store this cqe */
2936 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2937 	sqp_entry->qp1_qp = qp1_qp;
2938 
2939 	/* Find packet type from the cqe */
2940 
2941 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2942 					     cqe->raweth_qp1_flags2);
2943 	if (pkt_type < 0) {
2944 		dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2945 		return -EINVAL;
2946 	}
2947 
2948 	/* Adjust the offset for the user buffer and post in the rq */
2949 
2950 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2951 		offset = 20;
2952 
2953 	/*
2954 	 * QP1 loopback packet has 4 bytes of internal header before
2955 	 * ether header. Skip these four bytes.
2956 	 */
2957 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2958 		skip_bytes = 4;
2959 
2960 	/* First send SGE . Skip the ether header*/
2961 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2962 			+ skip_bytes;
2963 	s_sge[0].lkey = 0xFFFFFFFF;
2964 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2965 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2966 
2967 	/* Second Send SGE */
2968 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2969 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2970 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2971 		s_sge[1].addr += 8;
2972 	s_sge[1].lkey = 0xFFFFFFFF;
2973 	s_sge[1].length = 256;
2974 
2975 	/* First recv SGE */
2976 
2977 	r_sge[0].addr = shrq_hdr_buf_map;
2978 	r_sge[0].lkey = 0xFFFFFFFF;
2979 	r_sge[0].length = 40;
2980 
2981 	r_sge[1].addr = sqp_entry->sge.addr + offset;
2982 	r_sge[1].lkey = sqp_entry->sge.lkey;
2983 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2984 
2985 	/* Create receive work request */
2986 	rwr.num_sge = 2;
2987 	rwr.sg_list = r_sge;
2988 	rwr.wr_id = tbl_idx;
2989 	rwr.next = NULL;
2990 
2991 	rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2992 	if (rc) {
2993 		dev_err(rdev_to_dev(rdev),
2994 			"Failed to post Rx buffers to shadow QP");
2995 		return -ENOMEM;
2996 	}
2997 
2998 	swr->num_sge = 2;
2999 	swr->sg_list = s_sge;
3000 	swr->wr_id = tbl_idx;
3001 	swr->opcode = IB_WR_SEND;
3002 	swr->next = NULL;
3003 
3004 	udwr.ah = &rdev->sqp_ah->ib_ah;
3005 	udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3006 	udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3007 
3008 	/* post data received  in the send queue */
3009 	rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3010 
3011 	return 0;
3012 }
3013 
3014 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3015 					  struct bnxt_qplib_cqe *cqe)
3016 {
3017 	wc->opcode = IB_WC_RECV;
3018 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3019 	wc->wc_flags |= IB_WC_GRH;
3020 }
3021 
3022 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3023 				u16 *vid, u8 *sl)
3024 {
3025 	bool ret = false;
3026 	u32 metadata;
3027 	u16 tpid;
3028 
3029 	metadata = orig_cqe->raweth_qp1_metadata;
3030 	if (orig_cqe->raweth_qp1_flags2 &
3031 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3032 		tpid = ((metadata &
3033 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3034 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3035 		if (tpid == ETH_P_8021Q) {
3036 			*vid = metadata &
3037 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3038 			*sl = (metadata &
3039 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3040 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3041 			ret = true;
3042 		}
3043 	}
3044 
3045 	return ret;
3046 }
3047 
3048 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3049 				      struct bnxt_qplib_cqe *cqe)
3050 {
3051 	wc->opcode = IB_WC_RECV;
3052 	wc->status = __rc_to_ib_wc_status(cqe->status);
3053 
3054 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3055 		wc->wc_flags |= IB_WC_WITH_IMM;
3056 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3057 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3058 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3059 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3060 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3061 }
3062 
3063 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3064 					     struct ib_wc *wc,
3065 					     struct bnxt_qplib_cqe *cqe)
3066 {
3067 	struct bnxt_re_dev *rdev = qp->rdev;
3068 	struct bnxt_re_qp *qp1_qp = NULL;
3069 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3070 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3071 	int nw_type;
3072 	u32 tbl_idx;
3073 	u16 vlan_id;
3074 	u8 sl;
3075 
3076 	tbl_idx = cqe->wr_id;
3077 
3078 	sqp_entry = &rdev->sqp_tbl[tbl_idx];
3079 	qp1_qp = sqp_entry->qp1_qp;
3080 	orig_cqe = &sqp_entry->cqe;
3081 
3082 	wc->wr_id = sqp_entry->wrid;
3083 	wc->byte_len = orig_cqe->length;
3084 	wc->qp = &qp1_qp->ib_qp;
3085 
3086 	wc->ex.imm_data = orig_cqe->immdata;
3087 	wc->src_qp = orig_cqe->src_qp;
3088 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3089 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3090 		wc->vlan_id = vlan_id;
3091 		wc->sl = sl;
3092 		wc->wc_flags |= IB_WC_WITH_VLAN;
3093 	}
3094 	wc->port_num = 1;
3095 	wc->vendor_err = orig_cqe->status;
3096 
3097 	wc->opcode = IB_WC_RECV;
3098 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3099 	wc->wc_flags |= IB_WC_GRH;
3100 
3101 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3102 					    orig_cqe->raweth_qp1_flags2);
3103 	if (nw_type >= 0) {
3104 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3105 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3106 	}
3107 }
3108 
3109 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3110 				      struct bnxt_qplib_cqe *cqe)
3111 {
3112 	wc->opcode = IB_WC_RECV;
3113 	wc->status = __rc_to_ib_wc_status(cqe->status);
3114 
3115 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3116 		wc->wc_flags |= IB_WC_WITH_IMM;
3117 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3118 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3119 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3120 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3121 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3122 }
3123 
3124 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3125 {
3126 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3127 	unsigned long flags;
3128 	int rc = 0;
3129 
3130 	spin_lock_irqsave(&qp->sq_lock, flags);
3131 
3132 	rc = bnxt_re_bind_fence_mw(lib_qp);
3133 	if (!rc) {
3134 		lib_qp->sq.phantom_wqe_cnt++;
3135 		dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3136 			"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3137 			lib_qp->id, lib_qp->sq.hwq.prod,
3138 			HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3139 			lib_qp->sq.phantom_wqe_cnt);
3140 	}
3141 
3142 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3143 	return rc;
3144 }
3145 
3146 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3147 {
3148 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3149 	struct bnxt_re_qp *qp;
3150 	struct bnxt_qplib_cqe *cqe;
3151 	int i, ncqe, budget;
3152 	struct bnxt_qplib_q *sq;
3153 	struct bnxt_qplib_qp *lib_qp;
3154 	u32 tbl_idx;
3155 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3156 	unsigned long flags;
3157 
3158 	spin_lock_irqsave(&cq->cq_lock, flags);
3159 	budget = min_t(u32, num_entries, cq->max_cql);
3160 	num_entries = budget;
3161 	if (!cq->cql) {
3162 		dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3163 		goto exit;
3164 	}
3165 	cqe = &cq->cql[0];
3166 	while (budget) {
3167 		lib_qp = NULL;
3168 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3169 		if (lib_qp) {
3170 			sq = &lib_qp->sq;
3171 			if (sq->send_phantom) {
3172 				qp = container_of(lib_qp,
3173 						  struct bnxt_re_qp, qplib_qp);
3174 				if (send_phantom_wqe(qp) == -ENOMEM)
3175 					dev_err(rdev_to_dev(cq->rdev),
3176 						"Phantom failed! Scheduled to send again\n");
3177 				else
3178 					sq->send_phantom = false;
3179 			}
3180 		}
3181 		if (ncqe < budget)
3182 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3183 							      cqe + ncqe,
3184 							      budget - ncqe);
3185 
3186 		if (!ncqe)
3187 			break;
3188 
3189 		for (i = 0; i < ncqe; i++, cqe++) {
3190 			/* Transcribe each qplib_wqe back to ib_wc */
3191 			memset(wc, 0, sizeof(*wc));
3192 
3193 			wc->wr_id = cqe->wr_id;
3194 			wc->byte_len = cqe->length;
3195 			qp = container_of
3196 				((struct bnxt_qplib_qp *)
3197 				 (unsigned long)(cqe->qp_handle),
3198 				 struct bnxt_re_qp, qplib_qp);
3199 			if (!qp) {
3200 				dev_err(rdev_to_dev(cq->rdev),
3201 					"POLL CQ : bad QP handle");
3202 				continue;
3203 			}
3204 			wc->qp = &qp->ib_qp;
3205 			wc->ex.imm_data = cqe->immdata;
3206 			wc->src_qp = cqe->src_qp;
3207 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3208 			wc->port_num = 1;
3209 			wc->vendor_err = cqe->status;
3210 
3211 			switch (cqe->opcode) {
3212 			case CQ_BASE_CQE_TYPE_REQ:
3213 				if (qp->qplib_qp.id ==
3214 				    qp->rdev->qp1_sqp->qplib_qp.id) {
3215 					/* Handle this completion with
3216 					 * the stored completion
3217 					 */
3218 					memset(wc, 0, sizeof(*wc));
3219 					continue;
3220 				}
3221 				bnxt_re_process_req_wc(wc, cqe);
3222 				break;
3223 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3224 				if (!cqe->status) {
3225 					int rc = 0;
3226 
3227 					rc = bnxt_re_process_raw_qp_pkt_rx
3228 								(qp, cqe);
3229 					if (!rc) {
3230 						memset(wc, 0, sizeof(*wc));
3231 						continue;
3232 					}
3233 					cqe->status = -1;
3234 				}
3235 				/* Errors need not be looped back.
3236 				 * But change the wr_id to the one
3237 				 * stored in the table
3238 				 */
3239 				tbl_idx = cqe->wr_id;
3240 				sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3241 				wc->wr_id = sqp_entry->wrid;
3242 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3243 				break;
3244 			case CQ_BASE_CQE_TYPE_RES_RC:
3245 				bnxt_re_process_res_rc_wc(wc, cqe);
3246 				break;
3247 			case CQ_BASE_CQE_TYPE_RES_UD:
3248 				if (qp->qplib_qp.id ==
3249 				    qp->rdev->qp1_sqp->qplib_qp.id) {
3250 					/* Handle this completion with
3251 					 * the stored completion
3252 					 */
3253 					if (cqe->status) {
3254 						continue;
3255 					} else {
3256 						bnxt_re_process_res_shadow_qp_wc
3257 								(qp, wc, cqe);
3258 						break;
3259 					}
3260 				}
3261 				bnxt_re_process_res_ud_wc(wc, cqe);
3262 				break;
3263 			default:
3264 				dev_err(rdev_to_dev(cq->rdev),
3265 					"POLL CQ : type 0x%x not handled",
3266 					cqe->opcode);
3267 				continue;
3268 			}
3269 			wc++;
3270 			budget--;
3271 		}
3272 	}
3273 exit:
3274 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3275 	return num_entries - budget;
3276 }
3277 
3278 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3279 			  enum ib_cq_notify_flags ib_cqn_flags)
3280 {
3281 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3282 	int type = 0, rc = 0;
3283 	unsigned long flags;
3284 
3285 	spin_lock_irqsave(&cq->cq_lock, flags);
3286 	/* Trigger on the very next completion */
3287 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3288 		type = DBR_DBR_TYPE_CQ_ARMALL;
3289 	/* Trigger on the next solicited completion */
3290 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3291 		type = DBR_DBR_TYPE_CQ_ARMSE;
3292 
3293 	/* Poll to see if there are missed events */
3294 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3295 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3296 		rc = 1;
3297 		goto exit;
3298 	}
3299 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3300 
3301 exit:
3302 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3303 	return rc;
3304 }
3305 
3306 /* Memory Regions */
3307 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3308 {
3309 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3310 	struct bnxt_re_dev *rdev = pd->rdev;
3311 	struct bnxt_re_mr *mr;
3312 	u64 pbl = 0;
3313 	int rc;
3314 
3315 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3316 	if (!mr)
3317 		return ERR_PTR(-ENOMEM);
3318 
3319 	mr->rdev = rdev;
3320 	mr->qplib_mr.pd = &pd->qplib_pd;
3321 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3322 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3323 
3324 	/* Allocate and register 0 as the address */
3325 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3326 	if (rc)
3327 		goto fail;
3328 
3329 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3330 	mr->qplib_mr.total_size = -1; /* Infinte length */
3331 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3332 			       PAGE_SIZE);
3333 	if (rc)
3334 		goto fail_mr;
3335 
3336 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3337 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3338 			       IB_ACCESS_REMOTE_ATOMIC))
3339 		mr->ib_mr.rkey = mr->ib_mr.lkey;
3340 	atomic_inc(&rdev->mr_count);
3341 
3342 	return &mr->ib_mr;
3343 
3344 fail_mr:
3345 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3346 fail:
3347 	kfree(mr);
3348 	return ERR_PTR(rc);
3349 }
3350 
3351 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3352 {
3353 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3354 	struct bnxt_re_dev *rdev = mr->rdev;
3355 	int rc;
3356 
3357 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3358 	if (rc)
3359 		dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3360 
3361 	if (mr->pages) {
3362 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3363 							&mr->qplib_frpl);
3364 		kfree(mr->pages);
3365 		mr->npages = 0;
3366 		mr->pages = NULL;
3367 	}
3368 	if (!IS_ERR_OR_NULL(mr->ib_umem))
3369 		ib_umem_release(mr->ib_umem);
3370 
3371 	kfree(mr);
3372 	atomic_dec(&rdev->mr_count);
3373 	return rc;
3374 }
3375 
3376 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3377 {
3378 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3379 
3380 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3381 		return -ENOMEM;
3382 
3383 	mr->pages[mr->npages++] = addr;
3384 	return 0;
3385 }
3386 
3387 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3388 		      unsigned int *sg_offset)
3389 {
3390 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3391 
3392 	mr->npages = 0;
3393 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3394 }
3395 
3396 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3397 			       u32 max_num_sg)
3398 {
3399 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3400 	struct bnxt_re_dev *rdev = pd->rdev;
3401 	struct bnxt_re_mr *mr = NULL;
3402 	int rc;
3403 
3404 	if (type != IB_MR_TYPE_MEM_REG) {
3405 		dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3406 		return ERR_PTR(-EINVAL);
3407 	}
3408 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
3409 		return ERR_PTR(-EINVAL);
3410 
3411 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3412 	if (!mr)
3413 		return ERR_PTR(-ENOMEM);
3414 
3415 	mr->rdev = rdev;
3416 	mr->qplib_mr.pd = &pd->qplib_pd;
3417 	mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3418 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3419 
3420 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3421 	if (rc)
3422 		goto bail;
3423 
3424 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3425 	mr->ib_mr.rkey = mr->ib_mr.lkey;
3426 
3427 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3428 	if (!mr->pages) {
3429 		rc = -ENOMEM;
3430 		goto fail;
3431 	}
3432 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3433 						 &mr->qplib_frpl, max_num_sg);
3434 	if (rc) {
3435 		dev_err(rdev_to_dev(rdev),
3436 			"Failed to allocate HW FR page list");
3437 		goto fail_mr;
3438 	}
3439 
3440 	atomic_inc(&rdev->mr_count);
3441 	return &mr->ib_mr;
3442 
3443 fail_mr:
3444 	kfree(mr->pages);
3445 fail:
3446 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3447 bail:
3448 	kfree(mr);
3449 	return ERR_PTR(rc);
3450 }
3451 
3452 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3453 			       struct ib_udata *udata)
3454 {
3455 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3456 	struct bnxt_re_dev *rdev = pd->rdev;
3457 	struct bnxt_re_mw *mw;
3458 	int rc;
3459 
3460 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3461 	if (!mw)
3462 		return ERR_PTR(-ENOMEM);
3463 	mw->rdev = rdev;
3464 	mw->qplib_mw.pd = &pd->qplib_pd;
3465 
3466 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3467 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3468 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3469 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3470 	if (rc) {
3471 		dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3472 		goto fail;
3473 	}
3474 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
3475 
3476 	atomic_inc(&rdev->mw_count);
3477 	return &mw->ib_mw;
3478 
3479 fail:
3480 	kfree(mw);
3481 	return ERR_PTR(rc);
3482 }
3483 
3484 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3485 {
3486 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3487 	struct bnxt_re_dev *rdev = mw->rdev;
3488 	int rc;
3489 
3490 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3491 	if (rc) {
3492 		dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3493 		return rc;
3494 	}
3495 
3496 	kfree(mw);
3497 	atomic_dec(&rdev->mw_count);
3498 	return rc;
3499 }
3500 
3501 static int bnxt_re_page_size_ok(int page_shift)
3502 {
3503 	switch (page_shift) {
3504 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3505 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3506 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3507 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3508 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3509 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3510 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3511 	case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3512 		return 1;
3513 	default:
3514 		return 0;
3515 	}
3516 }
3517 
3518 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3519 			     int page_shift)
3520 {
3521 	u64 *pbl_tbl = pbl_tbl_orig;
3522 	u64 paddr;
3523 	u64 page_mask = (1ULL << page_shift) - 1;
3524 	int i, pages;
3525 	struct scatterlist *sg;
3526 	int entry;
3527 
3528 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3529 		pages = sg_dma_len(sg) >> PAGE_SHIFT;
3530 		for (i = 0; i < pages; i++) {
3531 			paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3532 			if (pbl_tbl == pbl_tbl_orig)
3533 				*pbl_tbl++ = paddr & ~page_mask;
3534 			else if ((paddr & page_mask) == 0)
3535 				*pbl_tbl++ = paddr;
3536 		}
3537 	}
3538 	return pbl_tbl - pbl_tbl_orig;
3539 }
3540 
3541 /* uverbs */
3542 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3543 				  u64 virt_addr, int mr_access_flags,
3544 				  struct ib_udata *udata)
3545 {
3546 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3547 	struct bnxt_re_dev *rdev = pd->rdev;
3548 	struct bnxt_re_mr *mr;
3549 	struct ib_umem *umem;
3550 	u64 *pbl_tbl = NULL;
3551 	int umem_pgs, page_shift, rc;
3552 
3553 	if (length > BNXT_RE_MAX_MR_SIZE) {
3554 		dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3555 			length, BNXT_RE_MAX_MR_SIZE);
3556 		return ERR_PTR(-ENOMEM);
3557 	}
3558 
3559 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3560 	if (!mr)
3561 		return ERR_PTR(-ENOMEM);
3562 
3563 	mr->rdev = rdev;
3564 	mr->qplib_mr.pd = &pd->qplib_pd;
3565 	mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3566 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3567 
3568 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3569 	if (rc) {
3570 		dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3571 		goto free_mr;
3572 	}
3573 	/* The fixed portion of the rkey is the same as the lkey */
3574 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
3575 
3576 	umem = ib_umem_get(ib_pd->uobject->context, start, length,
3577 			   mr_access_flags, 0);
3578 	if (IS_ERR(umem)) {
3579 		dev_err(rdev_to_dev(rdev), "Failed to get umem");
3580 		rc = -EFAULT;
3581 		goto free_mrw;
3582 	}
3583 	mr->ib_umem = umem;
3584 
3585 	mr->qplib_mr.va = virt_addr;
3586 	umem_pgs = ib_umem_page_count(umem);
3587 	if (!umem_pgs) {
3588 		dev_err(rdev_to_dev(rdev), "umem is invalid!");
3589 		rc = -EINVAL;
3590 		goto free_umem;
3591 	}
3592 	mr->qplib_mr.total_size = length;
3593 
3594 	pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3595 	if (!pbl_tbl) {
3596 		rc = -ENOMEM;
3597 		goto free_umem;
3598 	}
3599 
3600 	page_shift = umem->page_shift;
3601 
3602 	if (!bnxt_re_page_size_ok(page_shift)) {
3603 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3604 		rc = -EFAULT;
3605 		goto fail;
3606 	}
3607 
3608 	if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3609 		dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3610 			length,	(u64)BNXT_RE_MAX_MR_SIZE_LOW);
3611 		rc = -EINVAL;
3612 		goto fail;
3613 	}
3614 	if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3615 		page_shift = BNXT_RE_PAGE_SHIFT_2M;
3616 		dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3617 			 1 << page_shift);
3618 	}
3619 
3620 	/* Map umem buf ptrs to the PBL */
3621 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3622 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3623 			       umem_pgs, false, 1 << page_shift);
3624 	if (rc) {
3625 		dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3626 		goto fail;
3627 	}
3628 
3629 	kfree(pbl_tbl);
3630 
3631 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
3632 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
3633 	atomic_inc(&rdev->mr_count);
3634 
3635 	return &mr->ib_mr;
3636 fail:
3637 	kfree(pbl_tbl);
3638 free_umem:
3639 	ib_umem_release(umem);
3640 free_mrw:
3641 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3642 free_mr:
3643 	kfree(mr);
3644 	return ERR_PTR(rc);
3645 }
3646 
3647 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3648 					   struct ib_udata *udata)
3649 {
3650 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3651 	struct bnxt_re_uctx_resp resp;
3652 	struct bnxt_re_ucontext *uctx;
3653 	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3654 	int rc;
3655 
3656 	dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3657 		ibdev->uverbs_abi_ver);
3658 
3659 	if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3660 		dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3661 			BNXT_RE_ABI_VERSION);
3662 		return ERR_PTR(-EPERM);
3663 	}
3664 
3665 	uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3666 	if (!uctx)
3667 		return ERR_PTR(-ENOMEM);
3668 
3669 	uctx->rdev = rdev;
3670 
3671 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3672 	if (!uctx->shpg) {
3673 		rc = -ENOMEM;
3674 		goto fail;
3675 	}
3676 	spin_lock_init(&uctx->sh_lock);
3677 
3678 	resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3679 	resp.max_qp = rdev->qplib_ctx.qpc_count;
3680 	resp.pg_size = PAGE_SIZE;
3681 	resp.cqe_sz = sizeof(struct cq_base);
3682 	resp.max_cqd = dev_attr->max_cq_wqes;
3683 	resp.rsvd    = 0;
3684 
3685 	rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3686 	if (rc) {
3687 		dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3688 		rc = -EFAULT;
3689 		goto cfail;
3690 	}
3691 
3692 	return &uctx->ib_uctx;
3693 cfail:
3694 	free_page((unsigned long)uctx->shpg);
3695 	uctx->shpg = NULL;
3696 fail:
3697 	kfree(uctx);
3698 	return ERR_PTR(rc);
3699 }
3700 
3701 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3702 {
3703 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3704 						   struct bnxt_re_ucontext,
3705 						   ib_uctx);
3706 
3707 	struct bnxt_re_dev *rdev = uctx->rdev;
3708 	int rc = 0;
3709 
3710 	if (uctx->shpg)
3711 		free_page((unsigned long)uctx->shpg);
3712 
3713 	if (uctx->dpi.dbr) {
3714 		/* Free DPI only if this is the first PD allocated by the
3715 		 * application and mark the context dpi as NULL
3716 		 */
3717 		rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3718 					    &rdev->qplib_res.dpi_tbl,
3719 					    &uctx->dpi);
3720 		if (rc)
3721 			dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3722 			/* Don't fail, continue*/
3723 		uctx->dpi.dbr = NULL;
3724 	}
3725 
3726 	kfree(uctx);
3727 	return 0;
3728 }
3729 
3730 /* Helper function to mmap the virtual memory from user app */
3731 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3732 {
3733 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3734 						   struct bnxt_re_ucontext,
3735 						   ib_uctx);
3736 	struct bnxt_re_dev *rdev = uctx->rdev;
3737 	u64 pfn;
3738 
3739 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3740 		return -EINVAL;
3741 
3742 	if (vma->vm_pgoff) {
3743 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3744 		if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3745 				       PAGE_SIZE, vma->vm_page_prot)) {
3746 			dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3747 			return -EAGAIN;
3748 		}
3749 	} else {
3750 		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3751 		if (remap_pfn_range(vma, vma->vm_start,
3752 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
3753 			dev_err(rdev_to_dev(rdev),
3754 				"Failed to map shared page");
3755 			return -EAGAIN;
3756 		}
3757 	}
3758 
3759 	return 0;
3760 }
3761