xref: /linux/drivers/infiniband/hw/bnxt_re/ib_verbs.c (revision 4b6b6233f50f72353b54295ba594990b19f33223)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
45 
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/ib_pma.h>
53 #include <rdma/uverbs_ioctl.h>
54 #include <linux/hashtable.h>
55 
56 #include "roce_hsi.h"
57 #include "qplib_res.h"
58 #include "qplib_sp.h"
59 #include "qplib_fp.h"
60 #include "qplib_rcfw.h"
61 
62 #include "bnxt_re.h"
63 #include "ib_verbs.h"
64 #include "debugfs.h"
65 
66 #include <rdma/uverbs_types.h>
67 #include <rdma/uverbs_std_types.h>
68 
69 #include <rdma/ib_user_ioctl_cmds.h>
70 
71 #define UVERBS_MODULE_NAME bnxt_re
72 #include <rdma/uverbs_named_ioctl.h>
73 
74 #include <rdma/bnxt_re-abi.h>
75 
76 static int __from_ib_access_flags(int iflags)
77 {
78 	int qflags = 0;
79 
80 	if (iflags & IB_ACCESS_LOCAL_WRITE)
81 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82 	if (iflags & IB_ACCESS_REMOTE_READ)
83 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84 	if (iflags & IB_ACCESS_REMOTE_WRITE)
85 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88 	if (iflags & IB_ACCESS_MW_BIND)
89 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90 	if (iflags & IB_ZERO_BASED)
91 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92 	if (iflags & IB_ACCESS_ON_DEMAND)
93 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
94 	return qflags;
95 };
96 
97 static int __to_ib_access_flags(int qflags)
98 {
99 	int iflags = 0;
100 
101 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102 		iflags |= IB_ACCESS_LOCAL_WRITE;
103 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104 		iflags |= IB_ACCESS_REMOTE_WRITE;
105 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106 		iflags |= IB_ACCESS_REMOTE_READ;
107 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
109 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110 		iflags |= IB_ACCESS_MW_BIND;
111 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112 		iflags |= IB_ZERO_BASED;
113 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114 		iflags |= IB_ACCESS_ON_DEMAND;
115 	return iflags;
116 }
117 
118 static u8 __qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx *cctx, int iflags)
119 {
120 	u8 qflags = 0;
121 
122 	if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
123 		/* For Wh+ */
124 		return (u8)__from_ib_access_flags(iflags);
125 
126 	/* For P5, P7 and later chips */
127 	if (iflags & IB_ACCESS_LOCAL_WRITE)
128 		qflags |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
129 	if (iflags & IB_ACCESS_REMOTE_WRITE)
130 		qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
131 	if (iflags & IB_ACCESS_REMOTE_READ)
132 		qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
133 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
134 		qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC;
135 
136 	return qflags;
137 }
138 
139 static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
140 {
141 	int iflags = 0;
142 
143 	if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
144 		/* For Wh+ */
145 		return __to_ib_access_flags(qflags);
146 
147 	/* For P5, P7 and later chips */
148 	if (qflags & CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE)
149 		iflags |= IB_ACCESS_LOCAL_WRITE;
150 	if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE)
151 		iflags |= IB_ACCESS_REMOTE_WRITE;
152 	if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_READ)
153 		iflags |= IB_ACCESS_REMOTE_READ;
154 	if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC)
155 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
156 
157 	return iflags;
158 }
159 
160 static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
161 						   struct bnxt_qplib_mrw *qplib_mr)
162 {
163 	if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
164 	    pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
165 		qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
166 }
167 
168 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
169 			     struct bnxt_qplib_sge *sg_list, int num)
170 {
171 	int i, total = 0;
172 
173 	for (i = 0; i < num; i++) {
174 		sg_list[i].addr = ib_sg_list[i].addr;
175 		sg_list[i].lkey = ib_sg_list[i].lkey;
176 		sg_list[i].size = ib_sg_list[i].length;
177 		total += sg_list[i].size;
178 	}
179 	return total;
180 }
181 
182 /* Device */
183 int bnxt_re_query_device(struct ib_device *ibdev,
184 			 struct ib_device_attr *ib_attr,
185 			 struct ib_udata *udata)
186 {
187 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
188 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
189 
190 	memset(ib_attr, 0, sizeof(*ib_attr));
191 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
192 	       min(sizeof(dev_attr->fw_ver),
193 		   sizeof(ib_attr->fw_ver)));
194 	addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
195 			    rdev->netdev->dev_addr);
196 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
197 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
198 
199 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
200 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
201 	ib_attr->hw_ver = rdev->en_dev->pdev->revision;
202 	ib_attr->max_qp = dev_attr->max_qp;
203 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
204 	ib_attr->device_cap_flags =
205 				    IB_DEVICE_CURR_QP_STATE_MOD
206 				    | IB_DEVICE_RC_RNR_NAK_GEN
207 				    | IB_DEVICE_SHUTDOWN_PORT
208 				    | IB_DEVICE_SYS_IMAGE_GUID
209 				    | IB_DEVICE_RESIZE_MAX_WR
210 				    | IB_DEVICE_PORT_ACTIVE_EVENT
211 				    | IB_DEVICE_N_NOTIFY_CQ
212 				    | IB_DEVICE_MEM_WINDOW
213 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
214 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
215 	ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
216 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
217 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
218 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
219 	ib_attr->max_cq = dev_attr->max_cq;
220 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
221 	ib_attr->max_mr = dev_attr->max_mr;
222 	ib_attr->max_pd = dev_attr->max_pd;
223 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
224 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
225 	ib_attr->atomic_cap = IB_ATOMIC_NONE;
226 	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
227 	if (dev_attr->is_atomic) {
228 		ib_attr->atomic_cap = IB_ATOMIC_GLOB;
229 		ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
230 	}
231 
232 	ib_attr->max_ee_rd_atom = 0;
233 	ib_attr->max_res_rd_atom = 0;
234 	ib_attr->max_ee_init_rd_atom = 0;
235 	ib_attr->max_ee = 0;
236 	ib_attr->max_rdd = 0;
237 	ib_attr->max_mw = dev_attr->max_mw;
238 	ib_attr->max_raw_ipv6_qp = 0;
239 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
240 	ib_attr->max_mcast_grp = 0;
241 	ib_attr->max_mcast_qp_attach = 0;
242 	ib_attr->max_total_mcast_qp_attach = 0;
243 	ib_attr->max_ah = dev_attr->max_ah;
244 
245 	ib_attr->max_srq = dev_attr->max_srq;
246 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
247 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
248 
249 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
250 
251 	ib_attr->max_pkeys = 1;
252 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
253 	return 0;
254 }
255 
256 int bnxt_re_modify_device(struct ib_device *ibdev,
257 			  int device_modify_mask,
258 			  struct ib_device_modify *device_modify)
259 {
260 	ibdev_dbg(ibdev, "Modify device with mask 0x%x", device_modify_mask);
261 
262 	if (device_modify_mask & ~IB_DEVICE_MODIFY_NODE_DESC)
263 		return -EOPNOTSUPP;
264 
265 	if (!(device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC))
266 		return 0;
267 
268 	memcpy(ibdev->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX);
269 	return 0;
270 }
271 
272 /* Port */
273 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
274 		       struct ib_port_attr *port_attr)
275 {
276 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
277 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
278 	int rc;
279 
280 	memset(port_attr, 0, sizeof(*port_attr));
281 
282 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
283 		port_attr->state = IB_PORT_ACTIVE;
284 		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
285 	} else {
286 		port_attr->state = IB_PORT_DOWN;
287 		port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
288 	}
289 	port_attr->max_mtu = IB_MTU_4096;
290 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
291 	/* One GID is reserved for RawEth QP. Report one less */
292 	port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
293 				  dev_attr->max_sgid);
294 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
295 				    IB_PORT_DEVICE_MGMT_SUP |
296 				    IB_PORT_VENDOR_CLASS_SUP;
297 	port_attr->ip_gids = true;
298 
299 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
300 	port_attr->bad_pkey_cntr = 0;
301 	port_attr->qkey_viol_cntr = 0;
302 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
303 	port_attr->lid = 0;
304 	port_attr->sm_lid = 0;
305 	port_attr->lmc = 0;
306 	port_attr->max_vl_num = 4;
307 	port_attr->sm_sl = 0;
308 	port_attr->subnet_timeout = 0;
309 	port_attr->init_type_reply = 0;
310 	rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
311 			      &port_attr->active_width);
312 
313 	return rc;
314 }
315 
316 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
317 			       struct ib_port_immutable *immutable)
318 {
319 	struct ib_port_attr port_attr;
320 
321 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
322 		return -EINVAL;
323 
324 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
325 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
326 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
327 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
328 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
329 	return 0;
330 }
331 
332 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
333 {
334 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
335 
336 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
337 		 rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
338 		 rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
339 }
340 
341 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
342 		       u16 index, u16 *pkey)
343 {
344 	if (index > 0)
345 		return -EINVAL;
346 
347 	*pkey = IB_DEFAULT_PKEY_FULL;
348 
349 	return 0;
350 }
351 
352 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
353 		      int index, union ib_gid *gid)
354 {
355 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
356 	int rc;
357 
358 	/* Ignore port_num */
359 	memset(gid, 0, sizeof(*gid));
360 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
361 				 &rdev->qplib_res.sgid_tbl, index,
362 				 (struct bnxt_qplib_gid *)gid);
363 	return rc;
364 }
365 
366 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
367 {
368 	int rc = 0;
369 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
370 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
371 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
372 	struct bnxt_qplib_gid *gid_to_del;
373 	u16 vlan_id = 0xFFFF;
374 
375 	/* Delete the entry from the hardware */
376 	ctx = *context;
377 	if (!ctx)
378 		return -EINVAL;
379 
380 	if (sgid_tbl->active) {
381 		if (ctx->idx >= sgid_tbl->max)
382 			return -EINVAL;
383 		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
384 		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
385 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
386 		 * or via the ib_unregister_device path. In the former case QP1
387 		 * may not be destroyed yet, in which case just return as FW
388 		 * needs that entry to be present and will fail it's deletion.
389 		 * We could get invoked again after QP1 is destroyed OR get an
390 		 * ADD_GID call with a different GID value for the same index
391 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
392 		 */
393 		if (ctx->idx == 0 &&
394 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
395 		    ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
396 			ibdev_dbg(&rdev->ibdev,
397 				  "Trying to delete GID0 while QP1 is alive\n");
398 			return -EFAULT;
399 		}
400 		ctx->refcnt--;
401 		if (!ctx->refcnt) {
402 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
403 						 vlan_id,  true);
404 			if (rc) {
405 				ibdev_err(&rdev->ibdev,
406 					  "Failed to remove GID: %#x", rc);
407 			} else {
408 				ctx_tbl = sgid_tbl->ctx;
409 				ctx_tbl[ctx->idx] = NULL;
410 				kfree(ctx);
411 			}
412 		}
413 	} else {
414 		return -EINVAL;
415 	}
416 	return rc;
417 }
418 
419 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
420 {
421 	int rc;
422 	u32 tbl_idx = 0;
423 	u16 vlan_id = 0xFFFF;
424 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
425 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
426 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
427 
428 	rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
429 	if (rc)
430 		return rc;
431 
432 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
433 				 rdev->qplib_res.netdev->dev_addr,
434 				 vlan_id, true, &tbl_idx, false, 0);
435 	if (rc == -EALREADY) {
436 		ctx_tbl = sgid_tbl->ctx;
437 		ctx_tbl[tbl_idx]->refcnt++;
438 		*context = ctx_tbl[tbl_idx];
439 		return 0;
440 	}
441 
442 	if (rc < 0) {
443 		ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
444 		return rc;
445 	}
446 
447 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
448 	if (!ctx)
449 		return -ENOMEM;
450 	ctx_tbl = sgid_tbl->ctx;
451 	ctx->idx = tbl_idx;
452 	ctx->refcnt = 1;
453 	ctx_tbl[tbl_idx] = ctx;
454 	*context = ctx;
455 
456 	return rc;
457 }
458 
459 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
460 					    u32 port_num)
461 {
462 	return IB_LINK_LAYER_ETHERNET;
463 }
464 
465 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
466 
467 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
468 {
469 	struct bnxt_re_fence_data *fence = &pd->fence;
470 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
471 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
472 	struct bnxt_re_dev *rdev = pd->rdev;
473 
474 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
475 		return;
476 
477 	memset(wqe, 0, sizeof(*wqe));
478 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
479 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
480 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
481 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
482 	wqe->bind.zero_based = false;
483 	wqe->bind.parent_l_key = ib_mr->lkey;
484 	wqe->bind.va = (u64)(unsigned long)fence->va;
485 	wqe->bind.length = fence->size;
486 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
487 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
488 
489 	/* Save the initial rkey in fence structure for now;
490 	 * wqe->bind.r_key will be set at (re)bind time.
491 	 */
492 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
493 }
494 
495 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
496 {
497 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
498 					     qplib_qp);
499 	struct ib_pd *ib_pd = qp->ib_qp.pd;
500 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
501 	struct bnxt_re_fence_data *fence = &pd->fence;
502 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
503 	struct bnxt_qplib_swqe wqe;
504 	int rc;
505 
506 	memcpy(&wqe, fence_wqe, sizeof(wqe));
507 	wqe.bind.r_key = fence->bind_rkey;
508 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
509 
510 	ibdev_dbg(&qp->rdev->ibdev,
511 		  "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
512 		wqe.bind.r_key, qp->qplib_qp.id, pd);
513 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
514 	if (rc) {
515 		ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
516 		return rc;
517 	}
518 	bnxt_qplib_post_send_db(&qp->qplib_qp);
519 
520 	return rc;
521 }
522 
523 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
524 {
525 	struct bnxt_re_fence_data *fence = &pd->fence;
526 	struct bnxt_re_dev *rdev = pd->rdev;
527 	struct device *dev = &rdev->en_dev->pdev->dev;
528 	struct bnxt_re_mr *mr = fence->mr;
529 
530 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
531 		return;
532 
533 	if (fence->mw) {
534 		bnxt_re_dealloc_mw(fence->mw);
535 		fence->mw = NULL;
536 	}
537 	if (mr) {
538 		if (mr->ib_mr.rkey)
539 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
540 					     true);
541 		if (mr->ib_mr.lkey)
542 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
543 		kfree(mr);
544 		fence->mr = NULL;
545 	}
546 	if (fence->dma_addr) {
547 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
548 				 DMA_BIDIRECTIONAL);
549 		fence->dma_addr = 0;
550 	}
551 }
552 
553 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
554 {
555 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
556 	struct bnxt_re_fence_data *fence = &pd->fence;
557 	struct bnxt_re_dev *rdev = pd->rdev;
558 	struct device *dev = &rdev->en_dev->pdev->dev;
559 	struct bnxt_re_mr *mr = NULL;
560 	dma_addr_t dma_addr = 0;
561 	struct ib_mw *mw;
562 	int rc;
563 
564 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
565 		return 0;
566 
567 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
568 				  DMA_BIDIRECTIONAL);
569 	rc = dma_mapping_error(dev, dma_addr);
570 	if (rc) {
571 		ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
572 		rc = -EIO;
573 		fence->dma_addr = 0;
574 		goto fail;
575 	}
576 	fence->dma_addr = dma_addr;
577 
578 	/* Allocate a MR */
579 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
580 	if (!mr) {
581 		rc = -ENOMEM;
582 		goto fail;
583 	}
584 	fence->mr = mr;
585 	mr->rdev = rdev;
586 	mr->qplib_mr.pd = &pd->qplib_pd;
587 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
588 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
589 	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
590 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
591 		if (rc) {
592 			ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
593 			goto fail;
594 		}
595 
596 		/* Register MR */
597 		mr->ib_mr.lkey = mr->qplib_mr.lkey;
598 	} else {
599 		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
600 	}
601 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
602 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
603 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
604 			       BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
605 	if (rc) {
606 		ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
607 		goto fail;
608 	}
609 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
610 
611 	/* Create a fence MW only for kernel consumers */
612 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
613 	if (IS_ERR(mw)) {
614 		ibdev_err(&rdev->ibdev,
615 			  "Failed to create fence-MW for PD: %p\n", pd);
616 		rc = PTR_ERR(mw);
617 		goto fail;
618 	}
619 	fence->mw = mw;
620 
621 	bnxt_re_create_fence_wqe(pd);
622 	return 0;
623 
624 fail:
625 	bnxt_re_destroy_fence_mr(pd);
626 	return rc;
627 }
628 
629 static struct bnxt_re_user_mmap_entry*
630 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
631 			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
632 {
633 	struct bnxt_re_user_mmap_entry *entry;
634 	int ret;
635 
636 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
637 	if (!entry)
638 		return NULL;
639 
640 	entry->mem_offset = mem_offset;
641 	entry->mmap_flag = mmap_flag;
642 	entry->uctx = uctx;
643 
644 	switch (mmap_flag) {
645 	case BNXT_RE_MMAP_SH_PAGE:
646 		ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
647 							&entry->rdma_entry, PAGE_SIZE, 0);
648 		break;
649 	case BNXT_RE_MMAP_UC_DB:
650 	case BNXT_RE_MMAP_WC_DB:
651 	case BNXT_RE_MMAP_DBR_BAR:
652 	case BNXT_RE_MMAP_DBR_PAGE:
653 	case BNXT_RE_MMAP_TOGGLE_PAGE:
654 		ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
655 						  &entry->rdma_entry, PAGE_SIZE);
656 		break;
657 	default:
658 		ret = -EINVAL;
659 		break;
660 	}
661 
662 	if (ret) {
663 		kfree(entry);
664 		return NULL;
665 	}
666 	if (offset)
667 		*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
668 
669 	return entry;
670 }
671 
672 /* Protection Domains */
673 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
674 {
675 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
676 	struct bnxt_re_dev *rdev = pd->rdev;
677 
678 	if (udata) {
679 		rdma_user_mmap_entry_remove(pd->pd_db_mmap);
680 		pd->pd_db_mmap = NULL;
681 	}
682 
683 	bnxt_re_destroy_fence_mr(pd);
684 
685 	if (pd->qplib_pd.id) {
686 		if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
687 					   &rdev->qplib_res.pd_tbl,
688 					   &pd->qplib_pd))
689 			atomic_dec(&rdev->stats.res.pd_count);
690 	}
691 	return 0;
692 }
693 
694 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
695 {
696 	struct ib_device *ibdev = ibpd->device;
697 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
698 	struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
699 		udata, struct bnxt_re_ucontext, ib_uctx);
700 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
701 	struct bnxt_re_user_mmap_entry *entry = NULL;
702 	u32 active_pds;
703 	int rc = 0;
704 
705 	pd->rdev = rdev;
706 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
707 		ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
708 		rc = -ENOMEM;
709 		goto fail;
710 	}
711 
712 	if (udata) {
713 		struct bnxt_re_pd_resp resp = {};
714 
715 		if (!ucntx->dpi.dbr) {
716 			/* Allocate DPI in alloc_pd to avoid failing of
717 			 * ibv_devinfo and family of application when DPIs
718 			 * are depleted.
719 			 */
720 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
721 						 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
722 				rc = -ENOMEM;
723 				goto dbfail;
724 			}
725 		}
726 
727 		resp.pdid = pd->qplib_pd.id;
728 		/* Still allow mapping this DBR to the new user PD. */
729 		resp.dpi = ucntx->dpi.dpi;
730 
731 		entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
732 						  BNXT_RE_MMAP_UC_DB, &resp.dbr);
733 
734 		if (!entry) {
735 			rc = -ENOMEM;
736 			goto dbfail;
737 		}
738 
739 		pd->pd_db_mmap = &entry->rdma_entry;
740 
741 		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
742 		if (rc) {
743 			rdma_user_mmap_entry_remove(pd->pd_db_mmap);
744 			rc = -EFAULT;
745 			goto dbfail;
746 		}
747 	}
748 
749 	if (!udata)
750 		if (bnxt_re_create_fence_mr(pd))
751 			ibdev_warn(&rdev->ibdev,
752 				   "Failed to create Fence-MR\n");
753 	active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
754 	if (active_pds > rdev->stats.res.pd_watermark)
755 		rdev->stats.res.pd_watermark = active_pds;
756 
757 	return 0;
758 dbfail:
759 	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
760 			      &pd->qplib_pd);
761 fail:
762 	return rc;
763 }
764 
765 /* Address Handles */
766 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
767 {
768 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
769 	struct bnxt_re_dev *rdev = ah->rdev;
770 	bool block = true;
771 	int rc;
772 
773 	block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
774 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
775 	if (BNXT_RE_CHECK_RC(rc)) {
776 		if (rc == -ETIMEDOUT)
777 			rc = 0;
778 		else
779 			goto fail;
780 	}
781 	atomic_dec(&rdev->stats.res.ah_count);
782 fail:
783 	return rc;
784 }
785 
786 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
787 {
788 	u8 nw_type;
789 
790 	switch (ntype) {
791 	case RDMA_NETWORK_IPV4:
792 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
793 		break;
794 	case RDMA_NETWORK_IPV6:
795 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
796 		break;
797 	default:
798 		nw_type = CMDQ_CREATE_AH_TYPE_V1;
799 		break;
800 	}
801 	return nw_type;
802 }
803 
804 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
805 		      struct ib_udata *udata)
806 {
807 	struct ib_pd *ib_pd = ib_ah->pd;
808 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
809 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
810 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
811 	struct bnxt_re_dev *rdev = pd->rdev;
812 	const struct ib_gid_attr *sgid_attr;
813 	struct bnxt_re_gid_ctx *ctx;
814 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
815 	u32 active_ahs;
816 	u8 nw_type;
817 	int rc;
818 
819 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
820 		ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
821 		return -EINVAL;
822 	}
823 
824 	ah->rdev = rdev;
825 	ah->qplib_ah.pd = &pd->qplib_pd;
826 
827 	/* Supply the configuration for the HW */
828 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
829 	       sizeof(union ib_gid));
830 	sgid_attr = grh->sgid_attr;
831 	/* Get the HW context of the GID. The reference
832 	 * of GID table entry is already taken by the caller.
833 	 */
834 	ctx = rdma_read_gid_hw_context(sgid_attr);
835 	ah->qplib_ah.sgid_index = ctx->idx;
836 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
837 	ah->qplib_ah.traffic_class = grh->traffic_class;
838 	ah->qplib_ah.flow_label = grh->flow_label;
839 	ah->qplib_ah.hop_limit = grh->hop_limit;
840 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
841 
842 	/* Get network header type for this GID */
843 	nw_type = rdma_gid_attr_network_type(sgid_attr);
844 	ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
845 
846 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
847 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
848 				  !(init_attr->flags &
849 				    RDMA_CREATE_AH_SLEEPABLE));
850 	if (rc) {
851 		ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
852 		return rc;
853 	}
854 
855 	/* Write AVID to shared page. */
856 	if (udata) {
857 		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
858 			udata, struct bnxt_re_ucontext, ib_uctx);
859 		unsigned long flag;
860 		u32 *wrptr;
861 
862 		spin_lock_irqsave(&uctx->sh_lock, flag);
863 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
864 		*wrptr = ah->qplib_ah.id;
865 		wmb(); /* make sure cache is updated. */
866 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
867 	}
868 	active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
869 	if (active_ahs > rdev->stats.res.ah_watermark)
870 		rdev->stats.res.ah_watermark = active_ahs;
871 
872 	return 0;
873 }
874 
875 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
876 {
877 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
878 
879 	ah_attr->type = ib_ah->type;
880 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
881 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
882 	rdma_ah_set_grh(ah_attr, NULL, 0,
883 			ah->qplib_ah.host_sgid_index,
884 			0, ah->qplib_ah.traffic_class);
885 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
886 	rdma_ah_set_port_num(ah_attr, 1);
887 	rdma_ah_set_static_rate(ah_attr, 0);
888 	return 0;
889 }
890 
891 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
892 	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
893 {
894 	unsigned long flags;
895 
896 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
897 	if (qp->rcq != qp->scq)
898 		spin_lock(&qp->rcq->cq_lock);
899 	else
900 		__acquire(&qp->rcq->cq_lock);
901 
902 	return flags;
903 }
904 
905 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
906 			unsigned long flags)
907 	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
908 {
909 	if (qp->rcq != qp->scq)
910 		spin_unlock(&qp->rcq->cq_lock);
911 	else
912 		__release(&qp->rcq->cq_lock);
913 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
914 }
915 
916 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
917 {
918 	struct bnxt_re_qp *gsi_sqp;
919 	struct bnxt_re_ah *gsi_sah;
920 	struct bnxt_re_dev *rdev;
921 	int rc;
922 
923 	rdev = qp->rdev;
924 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
925 	gsi_sah = rdev->gsi_ctx.gsi_sah;
926 
927 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
928 	bnxt_qplib_destroy_ah(&rdev->qplib_res,
929 			      &gsi_sah->qplib_ah,
930 			      true);
931 	atomic_dec(&rdev->stats.res.ah_count);
932 	bnxt_qplib_clean_qp(&qp->qplib_qp);
933 
934 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
935 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
936 	if (rc) {
937 		ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
938 		goto fail;
939 	}
940 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
941 
942 	/* remove from active qp list */
943 	mutex_lock(&rdev->qp_lock);
944 	list_del(&gsi_sqp->list);
945 	mutex_unlock(&rdev->qp_lock);
946 	atomic_dec(&rdev->stats.res.qp_count);
947 
948 	kfree(rdev->gsi_ctx.sqp_tbl);
949 	kfree(gsi_sah);
950 	kfree(gsi_sqp);
951 	rdev->gsi_ctx.gsi_sqp = NULL;
952 	rdev->gsi_ctx.gsi_sah = NULL;
953 	rdev->gsi_ctx.sqp_tbl = NULL;
954 
955 	return 0;
956 fail:
957 	return rc;
958 }
959 
960 static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
961 {
962 	int rc;
963 
964 	if (!rdev->rcfw.roce_mirror)
965 		return;
966 
967 	rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
968 				 (struct bnxt_qplib_gid *)&rdev->ugid,
969 				 0xFFFF, true);
970 	if (rc)
971 		dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
972 }
973 
974 /* Queue Pairs */
975 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
976 {
977 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
978 	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
979 	struct bnxt_re_dev *rdev = qp->rdev;
980 	struct bnxt_qplib_nq *scq_nq = NULL;
981 	struct bnxt_qplib_nq *rcq_nq = NULL;
982 	unsigned int flags;
983 	int rc;
984 
985 	bnxt_re_debug_rem_qpinfo(rdev, qp);
986 
987 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
988 
989 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
990 	if (rc)
991 		ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
992 
993 	if (rdma_is_kernel_res(&qp->ib_qp.res)) {
994 		flags = bnxt_re_lock_cqs(qp);
995 		bnxt_qplib_clean_qp(&qp->qplib_qp);
996 		bnxt_re_unlock_cqs(qp, flags);
997 	}
998 
999 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
1000 
1001 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
1002 		bnxt_re_destroy_gsi_sqp(qp);
1003 
1004 	mutex_lock(&rdev->qp_lock);
1005 	list_del(&qp->list);
1006 	mutex_unlock(&rdev->qp_lock);
1007 	atomic_dec(&rdev->stats.res.qp_count);
1008 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
1009 		atomic_dec(&rdev->stats.res.rc_qp_count);
1010 	else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
1011 		atomic_dec(&rdev->stats.res.ud_qp_count);
1012 
1013 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
1014 		bnxt_re_del_unique_gid(rdev);
1015 
1016 	ib_umem_release(qp->rumem);
1017 	ib_umem_release(qp->sumem);
1018 
1019 	/* Flush all the entries of notification queue associated with
1020 	 * given qp.
1021 	 */
1022 	scq_nq = qplib_qp->scq->nq;
1023 	rcq_nq = qplib_qp->rcq->nq;
1024 	bnxt_re_synchronize_nq(scq_nq);
1025 	if (scq_nq != rcq_nq)
1026 		bnxt_re_synchronize_nq(rcq_nq);
1027 
1028 	return 0;
1029 }
1030 
1031 static u8 __from_ib_qp_type(enum ib_qp_type type)
1032 {
1033 	switch (type) {
1034 	case IB_QPT_GSI:
1035 		return CMDQ_CREATE_QP1_TYPE_GSI;
1036 	case IB_QPT_RC:
1037 		return CMDQ_CREATE_QP_TYPE_RC;
1038 	case IB_QPT_UD:
1039 		return CMDQ_CREATE_QP_TYPE_UD;
1040 	case IB_QPT_RAW_PACKET:
1041 		return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
1042 	default:
1043 		return IB_QPT_MAX;
1044 	}
1045 }
1046 
1047 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
1048 				   int rsge, int max)
1049 {
1050 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1051 		rsge = max;
1052 	return bnxt_re_get_rwqe_size(rsge);
1053 }
1054 
1055 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
1056 {
1057 	u16 wqe_size, calc_ils;
1058 
1059 	wqe_size = bnxt_re_get_swqe_size(nsge);
1060 	if (ilsize) {
1061 		calc_ils = sizeof(struct sq_send_hdr) + ilsize;
1062 		wqe_size = max_t(u16, calc_ils, wqe_size);
1063 		wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
1064 	}
1065 	return wqe_size;
1066 }
1067 
1068 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
1069 				   struct ib_qp_init_attr *init_attr)
1070 {
1071 	struct bnxt_qplib_dev_attr *dev_attr;
1072 	struct bnxt_qplib_qp *qplqp;
1073 	struct bnxt_re_dev *rdev;
1074 	struct bnxt_qplib_q *sq;
1075 	int align, ilsize;
1076 
1077 	rdev = qp->rdev;
1078 	qplqp = &qp->qplib_qp;
1079 	sq = &qplqp->sq;
1080 	dev_attr = rdev->dev_attr;
1081 
1082 	align = sizeof(struct sq_send_hdr);
1083 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1084 
1085 	/* For gen p4 and gen p5 fixed wqe compatibility mode
1086 	 * wqe size is fixed to 128 bytes - ie 6 SGEs
1087 	 */
1088 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
1089 		sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
1090 		sq->max_sge = BNXT_STATIC_MAX_SGE;
1091 	} else {
1092 		sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
1093 		if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
1094 			return -EINVAL;
1095 	}
1096 
1097 	if (init_attr->cap.max_inline_data) {
1098 		qplqp->max_inline_data = sq->wqe_size -
1099 			sizeof(struct sq_send_hdr);
1100 		init_attr->cap.max_inline_data = qplqp->max_inline_data;
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1107 				struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
1108 				struct bnxt_re_qp_req *ureq)
1109 {
1110 	struct bnxt_qplib_qp *qplib_qp;
1111 	int bytes = 0, psn_sz;
1112 	struct ib_umem *umem;
1113 	int psn_nume;
1114 
1115 	qplib_qp = &qp->qplib_qp;
1116 
1117 	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1118 	/* Consider mapping PSN search memory only for RC QPs. */
1119 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1120 		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1121 						   sizeof(struct sq_psn_search_ext) :
1122 						   sizeof(struct sq_psn_search);
1123 		if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
1124 			psn_nume = ureq->sq_slots;
1125 		} else {
1126 			psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1127 			qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1128 				 sizeof(struct bnxt_qplib_sge));
1129 		}
1130 		if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
1131 			psn_nume = roundup_pow_of_two(psn_nume);
1132 		bytes += (psn_nume * psn_sz);
1133 	}
1134 
1135 	bytes = PAGE_ALIGN(bytes);
1136 	umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
1137 			   IB_ACCESS_LOCAL_WRITE);
1138 	if (IS_ERR(umem))
1139 		return PTR_ERR(umem);
1140 
1141 	qp->sumem = umem;
1142 	qplib_qp->sq.sg_info.umem = umem;
1143 	qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1144 	qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1145 	qplib_qp->qp_handle = ureq->qp_handle;
1146 
1147 	if (!qp->qplib_qp.srq) {
1148 		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1149 		bytes = PAGE_ALIGN(bytes);
1150 		umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
1151 				   IB_ACCESS_LOCAL_WRITE);
1152 		if (IS_ERR(umem))
1153 			goto rqfail;
1154 		qp->rumem = umem;
1155 		qplib_qp->rq.sg_info.umem = umem;
1156 		qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1157 		qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1158 	}
1159 
1160 	qplib_qp->dpi = &cntx->dpi;
1161 	return 0;
1162 rqfail:
1163 	ib_umem_release(qp->sumem);
1164 	qp->sumem = NULL;
1165 	memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1166 
1167 	return PTR_ERR(umem);
1168 }
1169 
1170 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1171 				(struct bnxt_re_pd *pd,
1172 				 struct bnxt_qplib_res *qp1_res,
1173 				 struct bnxt_qplib_qp *qp1_qp)
1174 {
1175 	struct bnxt_re_dev *rdev = pd->rdev;
1176 	struct bnxt_re_ah *ah;
1177 	union ib_gid sgid;
1178 	int rc;
1179 
1180 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1181 	if (!ah)
1182 		return NULL;
1183 
1184 	ah->rdev = rdev;
1185 	ah->qplib_ah.pd = &pd->qplib_pd;
1186 
1187 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1188 	if (rc)
1189 		goto fail;
1190 
1191 	/* supply the dgid data same as sgid */
1192 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1193 	       sizeof(union ib_gid));
1194 	ah->qplib_ah.sgid_index = 0;
1195 
1196 	ah->qplib_ah.traffic_class = 0;
1197 	ah->qplib_ah.flow_label = 0;
1198 	ah->qplib_ah.hop_limit = 1;
1199 	ah->qplib_ah.sl = 0;
1200 	/* Have DMAC same as SMAC */
1201 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1202 
1203 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1204 	if (rc) {
1205 		ibdev_err(&rdev->ibdev,
1206 			  "Failed to allocate HW AH for Shadow QP");
1207 		goto fail;
1208 	}
1209 	atomic_inc(&rdev->stats.res.ah_count);
1210 
1211 	return ah;
1212 
1213 fail:
1214 	kfree(ah);
1215 	return NULL;
1216 }
1217 
1218 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1219 				(struct bnxt_re_pd *pd,
1220 				 struct bnxt_qplib_res *qp1_res,
1221 				 struct bnxt_qplib_qp *qp1_qp)
1222 {
1223 	struct bnxt_re_dev *rdev = pd->rdev;
1224 	struct bnxt_re_qp *qp;
1225 	int rc;
1226 
1227 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1228 	if (!qp)
1229 		return NULL;
1230 
1231 	qp->rdev = rdev;
1232 
1233 	/* Initialize the shadow QP structure from the QP1 values */
1234 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1235 
1236 	qp->qplib_qp.pd = &pd->qplib_pd;
1237 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1238 	qp->qplib_qp.type = IB_QPT_UD;
1239 
1240 	qp->qplib_qp.max_inline_data = 0;
1241 	qp->qplib_qp.sig_type = true;
1242 
1243 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1244 	qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1245 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1246 	qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
1247 	qp->qplib_qp.sq.max_sge = 2;
1248 	/* Q full delta can be 1 since it is internal QP */
1249 	qp->qplib_qp.sq.q_full_delta = 1;
1250 	qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1251 	qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1252 
1253 	qp->qplib_qp.scq = qp1_qp->scq;
1254 	qp->qplib_qp.rcq = qp1_qp->rcq;
1255 
1256 	qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1257 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1258 	qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
1259 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1260 	/* Q full delta can be 1 since it is internal QP */
1261 	qp->qplib_qp.rq.q_full_delta = 1;
1262 	qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1263 	qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1264 
1265 	qp->qplib_qp.mtu = qp1_qp->mtu;
1266 
1267 	qp->qplib_qp.sq_hdr_buf_size = 0;
1268 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1269 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1270 
1271 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1272 	if (rc)
1273 		goto fail;
1274 
1275 	spin_lock_init(&qp->sq_lock);
1276 	INIT_LIST_HEAD(&qp->list);
1277 	mutex_lock(&rdev->qp_lock);
1278 	list_add_tail(&qp->list, &rdev->qp_list);
1279 	atomic_inc(&rdev->stats.res.qp_count);
1280 	mutex_unlock(&rdev->qp_lock);
1281 	return qp;
1282 fail:
1283 	kfree(qp);
1284 	return NULL;
1285 }
1286 
1287 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1288 				struct ib_qp_init_attr *init_attr,
1289 				struct bnxt_re_ucontext *uctx)
1290 {
1291 	struct bnxt_qplib_dev_attr *dev_attr;
1292 	struct bnxt_qplib_qp *qplqp;
1293 	struct bnxt_re_dev *rdev;
1294 	struct bnxt_qplib_q *rq;
1295 	int entries;
1296 
1297 	rdev = qp->rdev;
1298 	qplqp = &qp->qplib_qp;
1299 	rq = &qplqp->rq;
1300 	dev_attr = rdev->dev_attr;
1301 
1302 	if (init_attr->srq) {
1303 		struct bnxt_re_srq *srq;
1304 
1305 		srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1306 		qplqp->srq = &srq->qplib_srq;
1307 		rq->max_wqe = 0;
1308 	} else {
1309 		rq->max_sge = init_attr->cap.max_recv_sge;
1310 		if (rq->max_sge > dev_attr->max_qp_sges)
1311 			rq->max_sge = dev_attr->max_qp_sges;
1312 		init_attr->cap.max_recv_sge = rq->max_sge;
1313 		rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1314 						       dev_attr->max_qp_sges);
1315 		/* Allocate 1 more than what's provided so posting max doesn't
1316 		 * mean empty.
1317 		 */
1318 		entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1319 		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1320 		rq->max_sw_wqe = rq->max_wqe;
1321 		rq->q_full_delta = 0;
1322 		rq->sg_info.pgsize = PAGE_SIZE;
1323 		rq->sg_info.pgshft = PAGE_SHIFT;
1324 	}
1325 
1326 	return 0;
1327 }
1328 
1329 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1330 {
1331 	struct bnxt_qplib_dev_attr *dev_attr;
1332 	struct bnxt_qplib_qp *qplqp;
1333 	struct bnxt_re_dev *rdev;
1334 
1335 	rdev = qp->rdev;
1336 	qplqp = &qp->qplib_qp;
1337 	dev_attr = rdev->dev_attr;
1338 
1339 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1340 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1341 		if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1342 			qplqp->rq.max_sge = dev_attr->max_qp_sges;
1343 		qplqp->rq.max_sge = 6;
1344 	}
1345 }
1346 
1347 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1348 				struct ib_qp_init_attr *init_attr,
1349 				struct bnxt_re_ucontext *uctx,
1350 				struct bnxt_re_qp_req *ureq)
1351 {
1352 	struct bnxt_qplib_dev_attr *dev_attr;
1353 	struct bnxt_qplib_qp *qplqp;
1354 	struct bnxt_re_dev *rdev;
1355 	struct bnxt_qplib_q *sq;
1356 	int diff = 0;
1357 	int entries;
1358 	int rc;
1359 
1360 	rdev = qp->rdev;
1361 	qplqp = &qp->qplib_qp;
1362 	sq = &qplqp->sq;
1363 	dev_attr = rdev->dev_attr;
1364 
1365 	sq->max_sge = init_attr->cap.max_send_sge;
1366 	entries = init_attr->cap.max_send_wr;
1367 	if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
1368 		sq->max_wqe = ureq->sq_slots;
1369 		sq->max_sw_wqe = ureq->sq_slots;
1370 		sq->wqe_size = sizeof(struct sq_sge);
1371 	} else {
1372 		if (sq->max_sge > dev_attr->max_qp_sges) {
1373 			sq->max_sge = dev_attr->max_qp_sges;
1374 			init_attr->cap.max_send_sge = sq->max_sge;
1375 		}
1376 
1377 		rc = bnxt_re_setup_swqe_size(qp, init_attr);
1378 		if (rc)
1379 			return rc;
1380 
1381 		/* Allocate 128 + 1 more than what's provided */
1382 		diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1383 			0 : BNXT_QPLIB_RESERVED_QP_WRS;
1384 		entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1385 		sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1386 		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1387 			sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
1388 		else
1389 			sq->max_sw_wqe = sq->max_wqe;
1390 
1391 	}
1392 	sq->q_full_delta = diff + 1;
1393 	/*
1394 	 * Reserving one slot for Phantom WQE. Application can
1395 	 * post one extra entry in this case. But allowing this to avoid
1396 	 * unexpected Queue full condition
1397 	 */
1398 	qplqp->sq.q_full_delta -= 1;
1399 	qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1400 	qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1401 
1402 	return 0;
1403 }
1404 
1405 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1406 				       struct ib_qp_init_attr *init_attr,
1407 				       struct bnxt_re_ucontext *uctx)
1408 {
1409 	struct bnxt_qplib_dev_attr *dev_attr;
1410 	struct bnxt_qplib_qp *qplqp;
1411 	struct bnxt_re_dev *rdev;
1412 	int entries;
1413 
1414 	rdev = qp->rdev;
1415 	qplqp = &qp->qplib_qp;
1416 	dev_attr = rdev->dev_attr;
1417 
1418 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1419 		entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1420 		qplqp->sq.max_wqe = min_t(u32, entries,
1421 					  dev_attr->max_qp_wqes + 1);
1422 		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1423 			init_attr->cap.max_send_wr;
1424 		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1425 		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1426 			qplqp->sq.max_sge = dev_attr->max_qp_sges;
1427 	}
1428 }
1429 
1430 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1431 				struct ib_qp_init_attr *init_attr)
1432 {
1433 	struct bnxt_qplib_chip_ctx *chip_ctx;
1434 	int qptype;
1435 
1436 	chip_ctx = rdev->chip_ctx;
1437 
1438 	qptype = __from_ib_qp_type(init_attr->qp_type);
1439 	if (qptype == IB_QPT_MAX) {
1440 		ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1441 		qptype = -EOPNOTSUPP;
1442 		goto out;
1443 	}
1444 
1445 	if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1446 	    init_attr->qp_type == IB_QPT_GSI)
1447 		qptype = CMDQ_CREATE_QP_TYPE_GSI;
1448 out:
1449 	return qptype;
1450 }
1451 
1452 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1453 				struct ib_qp_init_attr *init_attr,
1454 				struct bnxt_re_ucontext *uctx,
1455 				struct bnxt_re_qp_req *ureq)
1456 {
1457 	struct bnxt_qplib_dev_attr *dev_attr;
1458 	struct bnxt_qplib_qp *qplqp;
1459 	struct bnxt_re_dev *rdev;
1460 	struct bnxt_re_cq *cq;
1461 	int rc = 0, qptype;
1462 
1463 	rdev = qp->rdev;
1464 	qplqp = &qp->qplib_qp;
1465 	dev_attr = rdev->dev_attr;
1466 
1467 	/* Setup misc params */
1468 	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1469 	qplqp->pd = &pd->qplib_pd;
1470 	qplqp->qp_handle = (u64)qplqp;
1471 	qplqp->max_inline_data = init_attr->cap.max_inline_data;
1472 	qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1473 	qptype = bnxt_re_init_qp_type(rdev, init_attr);
1474 	if (qptype < 0) {
1475 		rc = qptype;
1476 		goto out;
1477 	}
1478 	qplqp->type = (u8)qptype;
1479 	qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
1480 	if (init_attr->qp_type == IB_QPT_RC) {
1481 		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1482 		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1483 	}
1484 	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1485 	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1486 	if (init_attr->create_flags) {
1487 		ibdev_dbg(&rdev->ibdev,
1488 			  "QP create flags 0x%x not supported",
1489 			  init_attr->create_flags);
1490 		return -EOPNOTSUPP;
1491 	}
1492 
1493 	/* Setup CQs */
1494 	if (init_attr->send_cq) {
1495 		cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1496 		qplqp->scq = &cq->qplib_cq;
1497 		qp->scq = cq;
1498 	}
1499 
1500 	if (init_attr->recv_cq) {
1501 		cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1502 		qplqp->rcq = &cq->qplib_cq;
1503 		qp->rcq = cq;
1504 	}
1505 
1506 	/* Setup RQ/SRQ */
1507 	rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1508 	if (rc)
1509 		goto out;
1510 	if (init_attr->qp_type == IB_QPT_GSI)
1511 		bnxt_re_adjust_gsi_rq_attr(qp);
1512 
1513 	/* Setup SQ */
1514 	rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
1515 	if (rc)
1516 		goto out;
1517 	if (init_attr->qp_type == IB_QPT_GSI)
1518 		bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1519 
1520 	if (uctx) /* This will update DPI and qp_handle */
1521 		rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
1522 out:
1523 	return rc;
1524 }
1525 
1526 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1527 				     struct bnxt_re_pd *pd)
1528 {
1529 	struct bnxt_re_sqp_entries *sqp_tbl;
1530 	struct bnxt_re_dev *rdev;
1531 	struct bnxt_re_qp *sqp;
1532 	struct bnxt_re_ah *sah;
1533 	int rc = 0;
1534 
1535 	rdev = qp->rdev;
1536 	/* Create a shadow QP to handle the QP1 traffic */
1537 	sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1538 			  GFP_KERNEL);
1539 	if (!sqp_tbl)
1540 		return -ENOMEM;
1541 	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1542 
1543 	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1544 	if (!sqp) {
1545 		rc = -ENODEV;
1546 		ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1547 		goto out;
1548 	}
1549 	rdev->gsi_ctx.gsi_sqp = sqp;
1550 
1551 	sqp->rcq = qp->rcq;
1552 	sqp->scq = qp->scq;
1553 	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1554 					  &qp->qplib_qp);
1555 	if (!sah) {
1556 		bnxt_qplib_destroy_qp(&rdev->qplib_res,
1557 				      &sqp->qplib_qp);
1558 		rc = -ENODEV;
1559 		ibdev_err(&rdev->ibdev,
1560 			  "Failed to create AH entry for ShadowQP");
1561 		goto out;
1562 	}
1563 	rdev->gsi_ctx.gsi_sah = sah;
1564 
1565 	return 0;
1566 out:
1567 	kfree(sqp_tbl);
1568 	return rc;
1569 }
1570 
1571 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1572 				 struct ib_qp_init_attr *init_attr)
1573 {
1574 	struct bnxt_re_dev *rdev;
1575 	struct bnxt_qplib_qp *qplqp;
1576 	int rc;
1577 
1578 	rdev = qp->rdev;
1579 	qplqp = &qp->qplib_qp;
1580 
1581 	qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1582 	qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1583 
1584 	rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1585 	if (rc) {
1586 		ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1587 		goto out;
1588 	}
1589 
1590 	rc = bnxt_re_create_shadow_gsi(qp, pd);
1591 out:
1592 	return rc;
1593 }
1594 
1595 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1596 				   struct ib_qp_init_attr *init_attr,
1597 				   struct bnxt_qplib_dev_attr *dev_attr)
1598 {
1599 	bool rc = true;
1600 
1601 	if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1602 	    init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1603 	    init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1604 	    init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1605 	    init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1606 		ibdev_err(&rdev->ibdev,
1607 			  "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1608 			  init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1609 			  init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1610 			  init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1611 			  init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1612 			  init_attr->cap.max_inline_data,
1613 			  dev_attr->max_inline_data);
1614 		rc = false;
1615 	}
1616 	return rc;
1617 }
1618 
1619 static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
1620 {
1621 	struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
1622 	struct bnxt_qplib_res *res = &rdev->qplib_res;
1623 	int rc;
1624 
1625 	if (!rdev->rcfw.roce_mirror)
1626 		return 0;
1627 
1628 	rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
1629 	addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
1630 
1631 	rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
1632 				 (struct bnxt_qplib_gid *)&rdev->ugid,
1633 				 rdev->qplib_res.netdev->dev_addr,
1634 				 0xFFFF, true, &rdev->ugid_index, true,
1635 				 hctx->stats3.fw_id);
1636 	if (rc)
1637 		dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
1638 
1639 	return rc;
1640 }
1641 
1642 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1643 		      struct ib_udata *udata)
1644 {
1645 	struct bnxt_qplib_dev_attr *dev_attr;
1646 	struct bnxt_re_ucontext *uctx;
1647 	struct bnxt_re_qp_req ureq;
1648 	struct bnxt_re_dev *rdev;
1649 	struct bnxt_re_pd *pd;
1650 	struct bnxt_re_qp *qp;
1651 	struct ib_pd *ib_pd;
1652 	u32 active_qps;
1653 	int rc;
1654 
1655 	ib_pd = ib_qp->pd;
1656 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1657 	rdev = pd->rdev;
1658 	dev_attr = rdev->dev_attr;
1659 	qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1660 
1661 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1662 	if (udata)
1663 		if (ib_copy_from_udata(&ureq, udata,  min(udata->inlen, sizeof(ureq))))
1664 			return -EFAULT;
1665 
1666 	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1667 	if (!rc) {
1668 		rc = -EINVAL;
1669 		goto fail;
1670 	}
1671 
1672 	qp->rdev = rdev;
1673 	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
1674 	if (rc)
1675 		goto fail;
1676 
1677 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1678 	    !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1679 		rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1680 		if (rc == -ENODEV)
1681 			goto qp_destroy;
1682 		if (rc)
1683 			goto fail;
1684 	} else {
1685 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1686 		if (rc) {
1687 			ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1688 			goto free_umem;
1689 		}
1690 		if (udata) {
1691 			struct bnxt_re_qp_resp resp;
1692 
1693 			resp.qpid = qp->qplib_qp.id;
1694 			resp.rsvd = 0;
1695 			rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1696 			if (rc) {
1697 				ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1698 				goto qp_destroy;
1699 			}
1700 		}
1701 	}
1702 
1703 	/* Support for RawEth QP is added to capture TCP pkt dump.
1704 	 * So unique SGID is used to avoid incorrect statistics on per
1705 	 * function stats_ctx
1706 	 */
1707 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
1708 		rc = bnxt_re_add_unique_gid(rdev);
1709 		if (rc)
1710 			goto qp_destroy;
1711 		qp->qplib_qp.ugid_index = rdev->ugid_index;
1712 	}
1713 
1714 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1715 	if (qp_init_attr->qp_type == IB_QPT_GSI)
1716 		rdev->gsi_ctx.gsi_qp = qp;
1717 	spin_lock_init(&qp->sq_lock);
1718 	spin_lock_init(&qp->rq_lock);
1719 	INIT_LIST_HEAD(&qp->list);
1720 	mutex_lock(&rdev->qp_lock);
1721 	list_add_tail(&qp->list, &rdev->qp_list);
1722 	mutex_unlock(&rdev->qp_lock);
1723 	active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1724 	if (active_qps > rdev->stats.res.qp_watermark)
1725 		rdev->stats.res.qp_watermark = active_qps;
1726 	if (qp_init_attr->qp_type == IB_QPT_RC) {
1727 		active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1728 		if (active_qps > rdev->stats.res.rc_qp_watermark)
1729 			rdev->stats.res.rc_qp_watermark = active_qps;
1730 	} else if (qp_init_attr->qp_type == IB_QPT_UD) {
1731 		active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1732 		if (active_qps > rdev->stats.res.ud_qp_watermark)
1733 			rdev->stats.res.ud_qp_watermark = active_qps;
1734 	}
1735 	bnxt_re_debug_add_qpinfo(rdev, qp);
1736 
1737 	return 0;
1738 qp_destroy:
1739 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1740 free_umem:
1741 	ib_umem_release(qp->rumem);
1742 	ib_umem_release(qp->sumem);
1743 fail:
1744 	return rc;
1745 }
1746 
1747 static u8 __from_ib_qp_state(enum ib_qp_state state)
1748 {
1749 	switch (state) {
1750 	case IB_QPS_RESET:
1751 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1752 	case IB_QPS_INIT:
1753 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1754 	case IB_QPS_RTR:
1755 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1756 	case IB_QPS_RTS:
1757 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1758 	case IB_QPS_SQD:
1759 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1760 	case IB_QPS_SQE:
1761 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1762 	case IB_QPS_ERR:
1763 	default:
1764 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1765 	}
1766 }
1767 
1768 static enum ib_qp_state __to_ib_qp_state(u8 state)
1769 {
1770 	switch (state) {
1771 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1772 		return IB_QPS_RESET;
1773 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1774 		return IB_QPS_INIT;
1775 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1776 		return IB_QPS_RTR;
1777 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1778 		return IB_QPS_RTS;
1779 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1780 		return IB_QPS_SQD;
1781 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1782 		return IB_QPS_SQE;
1783 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1784 	default:
1785 		return IB_QPS_ERR;
1786 	}
1787 }
1788 
1789 static u32 __from_ib_mtu(enum ib_mtu mtu)
1790 {
1791 	switch (mtu) {
1792 	case IB_MTU_256:
1793 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1794 	case IB_MTU_512:
1795 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1796 	case IB_MTU_1024:
1797 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1798 	case IB_MTU_2048:
1799 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1800 	case IB_MTU_4096:
1801 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1802 	default:
1803 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1804 	}
1805 }
1806 
1807 static enum ib_mtu __to_ib_mtu(u32 mtu)
1808 {
1809 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1810 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1811 		return IB_MTU_256;
1812 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1813 		return IB_MTU_512;
1814 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1815 		return IB_MTU_1024;
1816 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1817 		return IB_MTU_2048;
1818 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1819 		return IB_MTU_4096;
1820 	default:
1821 		return IB_MTU_2048;
1822 	}
1823 }
1824 
1825 /* Shared Receive Queues */
1826 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1827 {
1828 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1829 					       ib_srq);
1830 	struct bnxt_re_dev *rdev = srq->rdev;
1831 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1832 
1833 	if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1834 		free_page((unsigned long)srq->uctx_srq_page);
1835 		hash_del(&srq->hash_entry);
1836 	}
1837 	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1838 	ib_umem_release(srq->umem);
1839 	atomic_dec(&rdev->stats.res.srq_count);
1840 	return 0;
1841 }
1842 
1843 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1844 				 struct bnxt_re_pd *pd,
1845 				 struct bnxt_re_srq *srq,
1846 				 struct ib_udata *udata)
1847 {
1848 	struct bnxt_re_srq_req ureq;
1849 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1850 	struct ib_umem *umem;
1851 	int bytes = 0;
1852 	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1853 		udata, struct bnxt_re_ucontext, ib_uctx);
1854 
1855 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1856 		return -EFAULT;
1857 
1858 	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1859 	bytes = PAGE_ALIGN(bytes);
1860 	umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1861 			   IB_ACCESS_LOCAL_WRITE);
1862 	if (IS_ERR(umem))
1863 		return PTR_ERR(umem);
1864 
1865 	srq->umem = umem;
1866 	qplib_srq->sg_info.umem = umem;
1867 	qplib_srq->sg_info.pgsize = PAGE_SIZE;
1868 	qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1869 	qplib_srq->srq_handle = ureq.srq_handle;
1870 	qplib_srq->dpi = &cntx->dpi;
1871 
1872 	return 0;
1873 }
1874 
1875 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1876 		       struct ib_srq_init_attr *srq_init_attr,
1877 		       struct ib_udata *udata)
1878 {
1879 	struct bnxt_qplib_dev_attr *dev_attr;
1880 	struct bnxt_re_ucontext *uctx;
1881 	struct bnxt_re_dev *rdev;
1882 	struct bnxt_re_srq *srq;
1883 	struct bnxt_re_pd *pd;
1884 	struct ib_pd *ib_pd;
1885 	u32 active_srqs;
1886 	int rc, entries;
1887 
1888 	ib_pd = ib_srq->pd;
1889 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1890 	rdev = pd->rdev;
1891 	dev_attr = rdev->dev_attr;
1892 	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1893 
1894 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1895 		ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1896 		rc = -EINVAL;
1897 		goto exit;
1898 	}
1899 
1900 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1901 		rc = -EOPNOTSUPP;
1902 		goto exit;
1903 	}
1904 
1905 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1906 	srq->rdev = rdev;
1907 	srq->qplib_srq.pd = &pd->qplib_pd;
1908 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1909 	/* Allocate 1 more than what's provided so posting max doesn't
1910 	 * mean empty
1911 	 */
1912 	entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1913 	if (entries > dev_attr->max_srq_wqes + 1)
1914 		entries = dev_attr->max_srq_wqes + 1;
1915 	srq->qplib_srq.max_wqe = entries;
1916 
1917 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1918 	 /* 128 byte wqe size for SRQ . So use max sges */
1919 	srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1920 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1921 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1922 	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
1923 	srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
1924 	srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
1925 
1926 	if (udata) {
1927 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1928 		if (rc)
1929 			goto fail;
1930 	}
1931 
1932 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1933 	if (rc) {
1934 		ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1935 		goto fail;
1936 	}
1937 
1938 	if (udata) {
1939 		struct bnxt_re_srq_resp resp = {};
1940 
1941 		resp.srqid = srq->qplib_srq.id;
1942 		if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1943 			hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
1944 			srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
1945 			if (!srq->uctx_srq_page) {
1946 				rc = -ENOMEM;
1947 				goto fail;
1948 			}
1949 			resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
1950 		}
1951 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1952 		if (rc) {
1953 			ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1954 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1955 					       &srq->qplib_srq);
1956 			goto fail;
1957 		}
1958 	}
1959 	active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1960 	if (active_srqs > rdev->stats.res.srq_watermark)
1961 		rdev->stats.res.srq_watermark = active_srqs;
1962 	spin_lock_init(&srq->lock);
1963 
1964 	return 0;
1965 
1966 fail:
1967 	ib_umem_release(srq->umem);
1968 exit:
1969 	return rc;
1970 }
1971 
1972 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1973 		       enum ib_srq_attr_mask srq_attr_mask,
1974 		       struct ib_udata *udata)
1975 {
1976 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1977 					       ib_srq);
1978 	struct bnxt_re_dev *rdev = srq->rdev;
1979 	int rc;
1980 
1981 	switch (srq_attr_mask) {
1982 	case IB_SRQ_MAX_WR:
1983 		/* SRQ resize is not supported */
1984 		return -EINVAL;
1985 	case IB_SRQ_LIMIT:
1986 		/* Change the SRQ threshold */
1987 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1988 			return -EINVAL;
1989 
1990 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1991 		rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1992 		if (rc) {
1993 			ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1994 			return rc;
1995 		}
1996 		/* On success, update the shadow */
1997 		srq->srq_limit = srq_attr->srq_limit;
1998 		/* No need to Build and send response back to udata */
1999 		return 0;
2000 	default:
2001 		ibdev_err(&rdev->ibdev,
2002 			  "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
2003 		return -EINVAL;
2004 	}
2005 }
2006 
2007 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
2008 {
2009 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
2010 					       ib_srq);
2011 	struct bnxt_re_srq tsrq;
2012 	struct bnxt_re_dev *rdev = srq->rdev;
2013 	int rc;
2014 
2015 	/* Get live SRQ attr */
2016 	tsrq.qplib_srq.id = srq->qplib_srq.id;
2017 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
2018 	if (rc) {
2019 		ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
2020 		return rc;
2021 	}
2022 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
2023 	srq_attr->max_sge = srq->qplib_srq.max_sge;
2024 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
2025 
2026 	return 0;
2027 }
2028 
2029 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
2030 			  const struct ib_recv_wr **bad_wr)
2031 {
2032 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
2033 					       ib_srq);
2034 	struct bnxt_qplib_swqe wqe;
2035 	unsigned long flags;
2036 	int rc = 0;
2037 
2038 	spin_lock_irqsave(&srq->lock, flags);
2039 	while (wr) {
2040 		/* Transcribe each ib_recv_wr to qplib_swqe */
2041 		wqe.num_sge = wr->num_sge;
2042 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2043 		wqe.wr_id = wr->wr_id;
2044 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2045 
2046 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
2047 		if (rc) {
2048 			*bad_wr = wr;
2049 			break;
2050 		}
2051 		wr = wr->next;
2052 	}
2053 	spin_unlock_irqrestore(&srq->lock, flags);
2054 
2055 	return rc;
2056 }
2057 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
2058 				    struct bnxt_re_qp *qp1_qp,
2059 				    int qp_attr_mask)
2060 {
2061 	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
2062 	int rc;
2063 
2064 	if (qp_attr_mask & IB_QP_STATE) {
2065 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2066 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
2067 	}
2068 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2069 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2070 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
2071 	}
2072 
2073 	if (qp_attr_mask & IB_QP_QKEY) {
2074 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2075 		/* Using a Random  QKEY */
2076 		qp->qplib_qp.qkey = 0x81818181;
2077 	}
2078 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2079 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2080 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
2081 	}
2082 
2083 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2084 	if (rc)
2085 		ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
2086 	return rc;
2087 }
2088 
2089 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2090 		      int qp_attr_mask, struct ib_udata *udata)
2091 {
2092 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2093 	struct bnxt_re_dev *rdev = qp->rdev;
2094 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
2095 	enum ib_qp_state curr_qp_state, new_qp_state;
2096 	int rc, entries;
2097 	unsigned int flags;
2098 	u8 nw_type;
2099 
2100 	if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2101 		return -EOPNOTSUPP;
2102 
2103 	qp->qplib_qp.modify_flags = 0;
2104 	if (qp_attr_mask & IB_QP_STATE) {
2105 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2106 		new_qp_state = qp_attr->qp_state;
2107 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
2108 					ib_qp->qp_type, qp_attr_mask)) {
2109 			ibdev_err(&rdev->ibdev,
2110 				  "Invalid attribute mask: %#x specified ",
2111 				  qp_attr_mask);
2112 			ibdev_err(&rdev->ibdev,
2113 				  "for qpn: %#x type: %#x",
2114 				  ib_qp->qp_num, ib_qp->qp_type);
2115 			ibdev_err(&rdev->ibdev,
2116 				  "curr_qp_state=0x%x, new_qp_state=0x%x\n",
2117 				  curr_qp_state, new_qp_state);
2118 			return -EINVAL;
2119 		}
2120 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2121 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2122 
2123 		if (!qp->sumem &&
2124 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2125 			ibdev_dbg(&rdev->ibdev,
2126 				  "Move QP = %p to flush list\n", qp);
2127 			flags = bnxt_re_lock_cqs(qp);
2128 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2129 			bnxt_re_unlock_cqs(qp, flags);
2130 		}
2131 		if (!qp->sumem &&
2132 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2133 			ibdev_dbg(&rdev->ibdev,
2134 				  "Move QP = %p out of flush list\n", qp);
2135 			flags = bnxt_re_lock_cqs(qp);
2136 			bnxt_qplib_clean_qp(&qp->qplib_qp);
2137 			bnxt_re_unlock_cqs(qp, flags);
2138 		}
2139 	}
2140 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
2141 		qp->qplib_qp.modify_flags |=
2142 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
2143 		qp->qplib_qp.en_sqd_async_notify = true;
2144 	}
2145 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
2146 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2147 		qp->qplib_qp.access =
2148 			__qp_access_flags_from_ib(qp->qplib_qp.cctx,
2149 						  qp_attr->qp_access_flags);
2150 		/* LOCAL_WRITE access must be set to allow RC receive */
2151 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
2152 	}
2153 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2154 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2155 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2156 	}
2157 	if (qp_attr_mask & IB_QP_QKEY) {
2158 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2159 		qp->qplib_qp.qkey = qp_attr->qkey;
2160 	}
2161 	if (qp_attr_mask & IB_QP_AV) {
2162 		const struct ib_global_route *grh =
2163 			rdma_ah_read_grh(&qp_attr->ah_attr);
2164 		const struct ib_gid_attr *sgid_attr;
2165 		struct bnxt_re_gid_ctx *ctx;
2166 
2167 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2168 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2169 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2170 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2171 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2172 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2173 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2174 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2175 		       sizeof(qp->qplib_qp.ah.dgid.data));
2176 		qp->qplib_qp.ah.flow_label = grh->flow_label;
2177 		sgid_attr = grh->sgid_attr;
2178 		/* Get the HW context of the GID. The reference
2179 		 * of GID table entry is already taken by the caller.
2180 		 */
2181 		ctx = rdma_read_gid_hw_context(sgid_attr);
2182 		qp->qplib_qp.ah.sgid_index = ctx->idx;
2183 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2184 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2185 		qp->qplib_qp.ah.traffic_class = grh->traffic_class >> 2;
2186 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2187 		ether_addr_copy(qp->qplib_qp.ah.dmac,
2188 				qp_attr->ah_attr.roce.dmac);
2189 
2190 		rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2191 					     &qp->qplib_qp.smac[0]);
2192 		if (rc)
2193 			return rc;
2194 
2195 		nw_type = rdma_gid_attr_network_type(sgid_attr);
2196 		switch (nw_type) {
2197 		case RDMA_NETWORK_IPV4:
2198 			qp->qplib_qp.nw_type =
2199 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2200 			break;
2201 		case RDMA_NETWORK_IPV6:
2202 			qp->qplib_qp.nw_type =
2203 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2204 			break;
2205 		default:
2206 			qp->qplib_qp.nw_type =
2207 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2208 			break;
2209 		}
2210 	}
2211 
2212 	if (qp_attr->qp_state == IB_QPS_RTR) {
2213 		enum ib_mtu qpmtu;
2214 
2215 		qpmtu = iboe_get_mtu(rdev->netdev->mtu);
2216 		if (qp_attr_mask & IB_QP_PATH_MTU) {
2217 			if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
2218 			    ib_mtu_enum_to_int(qpmtu))
2219 				return -EINVAL;
2220 			qpmtu = qp_attr->path_mtu;
2221 		}
2222 
2223 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2224 		qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
2225 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
2226 	}
2227 
2228 	if (qp_attr_mask & IB_QP_TIMEOUT) {
2229 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2230 		qp->qplib_qp.timeout = qp_attr->timeout;
2231 	}
2232 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
2233 		qp->qplib_qp.modify_flags |=
2234 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2235 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2236 	}
2237 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
2238 		qp->qplib_qp.modify_flags |=
2239 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2240 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2241 	}
2242 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2243 		qp->qplib_qp.modify_flags |=
2244 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2245 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2246 	}
2247 	if (qp_attr_mask & IB_QP_RQ_PSN) {
2248 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2249 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2250 	}
2251 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2252 		qp->qplib_qp.modify_flags |=
2253 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2254 		/* Cap the max_rd_atomic to device max */
2255 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2256 						   dev_attr->max_qp_rd_atom);
2257 	}
2258 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2259 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2260 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2261 	}
2262 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2263 		if (qp_attr->max_dest_rd_atomic >
2264 		    dev_attr->max_qp_init_rd_atom) {
2265 			ibdev_err(&rdev->ibdev,
2266 				  "max_dest_rd_atomic requested%d is > dev_max%d",
2267 				  qp_attr->max_dest_rd_atomic,
2268 				  dev_attr->max_qp_init_rd_atom);
2269 			return -EINVAL;
2270 		}
2271 
2272 		qp->qplib_qp.modify_flags |=
2273 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2274 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2275 	}
2276 	if (qp_attr_mask & IB_QP_CAP) {
2277 		struct bnxt_re_ucontext *uctx =
2278 			rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2279 
2280 		qp->qplib_qp.modify_flags |=
2281 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2282 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2283 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2284 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2285 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2286 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2287 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2288 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2289 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2290 		    (qp_attr->cap.max_inline_data >=
2291 						dev_attr->max_inline_data)) {
2292 			ibdev_err(&rdev->ibdev,
2293 				  "Create QP failed - max exceeded");
2294 			return -EINVAL;
2295 		}
2296 		entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2297 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2298 						dev_attr->max_qp_wqes + 1);
2299 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2300 						qp_attr->cap.max_send_wr;
2301 		/*
2302 		 * Reserving one slot for Phantom WQE. Some application can
2303 		 * post one extra entry in this case. Allowing this to avoid
2304 		 * unexpected Queue full condition
2305 		 */
2306 		qp->qplib_qp.sq.q_full_delta -= 1;
2307 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2308 		if (qp->qplib_qp.rq.max_wqe) {
2309 			entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2310 			qp->qplib_qp.rq.max_wqe =
2311 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2312 			qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
2313 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2314 						       qp_attr->cap.max_recv_wr;
2315 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2316 		} else {
2317 			/* SRQ was used prior, just ignore the RQ caps */
2318 		}
2319 	}
2320 	if (qp_attr_mask & IB_QP_DEST_QPN) {
2321 		qp->qplib_qp.modify_flags |=
2322 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2323 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2324 	}
2325 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2326 	if (rc) {
2327 		ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2328 		return rc;
2329 	}
2330 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2331 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2332 	return rc;
2333 }
2334 
2335 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2336 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2337 {
2338 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2339 	struct bnxt_re_dev *rdev = qp->rdev;
2340 	struct bnxt_qplib_qp *qplib_qp;
2341 	int rc;
2342 
2343 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2344 	if (!qplib_qp)
2345 		return -ENOMEM;
2346 
2347 	qplib_qp->id = qp->qplib_qp.id;
2348 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2349 
2350 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2351 	if (rc) {
2352 		ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2353 		goto out;
2354 	}
2355 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2356 	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2357 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2358 	qp_attr->qp_access_flags = __qp_access_flags_to_ib(qp->qplib_qp.cctx,
2359 							   qplib_qp->access);
2360 	qp_attr->pkey_index = qplib_qp->pkey_index;
2361 	qp_attr->qkey = qplib_qp->qkey;
2362 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2363 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
2364 			qplib_qp->ah.host_sgid_index,
2365 			qplib_qp->ah.hop_limit,
2366 			qplib_qp->ah.traffic_class);
2367 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2368 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2369 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2370 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2371 	qp_attr->timeout = qplib_qp->timeout;
2372 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2373 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2374 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2375 	qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
2376 	qp_attr->rq_psn = qplib_qp->rq.psn;
2377 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2378 	qp_attr->sq_psn = qplib_qp->sq.psn;
2379 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2380 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2381 							 IB_SIGNAL_REQ_WR;
2382 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2383 
2384 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2385 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2386 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2387 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2388 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2389 	qp_init_attr->cap = qp_attr->cap;
2390 
2391 out:
2392 	kfree(qplib_qp);
2393 	return rc;
2394 }
2395 
2396 /* Routine for sending QP1 packets for RoCE V1 an V2
2397  */
2398 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2399 				     const struct ib_send_wr *wr,
2400 				     struct bnxt_qplib_swqe *wqe,
2401 				     int payload_size)
2402 {
2403 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2404 					     ib_ah);
2405 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2406 	const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2407 	struct bnxt_qplib_sge sge;
2408 	u8 nw_type;
2409 	u16 ether_type;
2410 	union ib_gid dgid;
2411 	bool is_eth = false;
2412 	bool is_vlan = false;
2413 	bool is_grh = false;
2414 	bool is_udp = false;
2415 	u8 ip_version = 0;
2416 	u16 vlan_id = 0xFFFF;
2417 	void *buf;
2418 	int i, rc;
2419 
2420 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2421 
2422 	rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2423 	if (rc)
2424 		return rc;
2425 
2426 	/* Get network header type for this GID */
2427 	nw_type = rdma_gid_attr_network_type(sgid_attr);
2428 	switch (nw_type) {
2429 	case RDMA_NETWORK_IPV4:
2430 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2431 		break;
2432 	case RDMA_NETWORK_IPV6:
2433 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2434 		break;
2435 	default:
2436 		nw_type = BNXT_RE_ROCE_V1_PACKET;
2437 		break;
2438 	}
2439 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2440 	is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2441 	if (is_udp) {
2442 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2443 			ip_version = 4;
2444 			ether_type = ETH_P_IP;
2445 		} else {
2446 			ip_version = 6;
2447 			ether_type = ETH_P_IPV6;
2448 		}
2449 		is_grh = false;
2450 	} else {
2451 		ether_type = ETH_P_IBOE;
2452 		is_grh = true;
2453 	}
2454 
2455 	is_eth = true;
2456 	is_vlan = vlan_id && (vlan_id < 0x1000);
2457 
2458 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2459 			  ip_version, is_udp, 0, &qp->qp1_hdr);
2460 
2461 	/* ETH */
2462 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2463 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2464 
2465 	/* For vlan, check the sgid for vlan existence */
2466 
2467 	if (!is_vlan) {
2468 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2469 	} else {
2470 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2471 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2472 	}
2473 
2474 	if (is_grh || (ip_version == 6)) {
2475 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2476 		       sizeof(sgid_attr->gid));
2477 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2478 		       sizeof(sgid_attr->gid));
2479 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
2480 	}
2481 
2482 	if (ip_version == 4) {
2483 		qp->qp1_hdr.ip4.tos = 0;
2484 		qp->qp1_hdr.ip4.id = 0;
2485 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2486 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2487 
2488 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2489 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2490 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2491 	}
2492 
2493 	if (is_udp) {
2494 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2495 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
2496 		qp->qp1_hdr.udp.csum = 0;
2497 	}
2498 
2499 	/* BTH */
2500 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2501 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2502 		qp->qp1_hdr.immediate_present = 1;
2503 	} else {
2504 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2505 	}
2506 	if (wr->send_flags & IB_SEND_SOLICITED)
2507 		qp->qp1_hdr.bth.solicited_event = 1;
2508 	/* pad_count */
2509 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2510 
2511 	/* P_key for QP1 is for all members */
2512 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2513 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2514 	qp->qp1_hdr.bth.ack_req = 0;
2515 	qp->send_psn++;
2516 	qp->send_psn &= BTH_PSN_MASK;
2517 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2518 	/* DETH */
2519 	/* Use the priviledged Q_Key for QP1 */
2520 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2521 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2522 
2523 	/* Pack the QP1 to the transmit buffer */
2524 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2525 	if (buf) {
2526 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2527 		for (i = wqe->num_sge; i; i--) {
2528 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2529 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2530 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2531 		}
2532 
2533 		/*
2534 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2535 		 * which is same as the QP1 SQ header buffer.
2536 		 * Header buf size for IPV4 RoCE V2 can be 66.
2537 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2538 		 * Subtract 20 bytes from QP1 SQ header buf size
2539 		 */
2540 		if (is_udp && ip_version == 4)
2541 			sge.size -= 20;
2542 		/*
2543 		 * Max Header buf size for RoCE V1 is 78.
2544 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2545 		 * Subtract 8 bytes from QP1 SQ header buf size
2546 		 */
2547 		if (!is_udp)
2548 			sge.size -= 8;
2549 
2550 		/* Subtract 4 bytes for non vlan packets */
2551 		if (!is_vlan)
2552 			sge.size -= 4;
2553 
2554 		wqe->sg_list[0].addr = sge.addr;
2555 		wqe->sg_list[0].lkey = sge.lkey;
2556 		wqe->sg_list[0].size = sge.size;
2557 		wqe->num_sge++;
2558 
2559 	} else {
2560 		ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2561 		rc = -ENOMEM;
2562 	}
2563 	return rc;
2564 }
2565 
2566 /* For the MAD layer, it only provides the recv SGE the size of
2567  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2568  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2569  * receive packet (334 bytes) with no VLAN and then copy the GRH
2570  * and the MAD datagram out to the provided SGE.
2571  */
2572 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2573 					    const struct ib_recv_wr *wr,
2574 					    struct bnxt_qplib_swqe *wqe,
2575 					    int payload_size)
2576 {
2577 	struct bnxt_re_sqp_entries *sqp_entry;
2578 	struct bnxt_qplib_sge ref, sge;
2579 	struct bnxt_re_dev *rdev;
2580 	u32 rq_prod_index;
2581 
2582 	rdev = qp->rdev;
2583 
2584 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2585 
2586 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2587 		return -ENOMEM;
2588 
2589 	/* Create 1 SGE to receive the entire
2590 	 * ethernet packet
2591 	 */
2592 	/* Save the reference from ULP */
2593 	ref.addr = wqe->sg_list[0].addr;
2594 	ref.lkey = wqe->sg_list[0].lkey;
2595 	ref.size = wqe->sg_list[0].size;
2596 
2597 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2598 
2599 	/* SGE 1 */
2600 	wqe->sg_list[0].addr = sge.addr;
2601 	wqe->sg_list[0].lkey = sge.lkey;
2602 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2603 	sge.size -= wqe->sg_list[0].size;
2604 
2605 	sqp_entry->sge.addr = ref.addr;
2606 	sqp_entry->sge.lkey = ref.lkey;
2607 	sqp_entry->sge.size = ref.size;
2608 	/* Store the wrid for reporting completion */
2609 	sqp_entry->wrid = wqe->wr_id;
2610 	/* change the wqe->wrid to table index */
2611 	wqe->wr_id = rq_prod_index;
2612 	return 0;
2613 }
2614 
2615 static int is_ud_qp(struct bnxt_re_qp *qp)
2616 {
2617 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2618 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2619 }
2620 
2621 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2622 				  const struct ib_send_wr *wr,
2623 				  struct bnxt_qplib_swqe *wqe)
2624 {
2625 	struct bnxt_re_ah *ah = NULL;
2626 
2627 	if (is_ud_qp(qp)) {
2628 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2629 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2630 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2631 		wqe->send.avid = ah->qplib_ah.id;
2632 	}
2633 	switch (wr->opcode) {
2634 	case IB_WR_SEND:
2635 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2636 		break;
2637 	case IB_WR_SEND_WITH_IMM:
2638 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2639 		wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
2640 		break;
2641 	case IB_WR_SEND_WITH_INV:
2642 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2643 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2644 		break;
2645 	default:
2646 		return -EINVAL;
2647 	}
2648 	if (wr->send_flags & IB_SEND_SIGNALED)
2649 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2650 	if (wr->send_flags & IB_SEND_FENCE)
2651 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2652 	if (wr->send_flags & IB_SEND_SOLICITED)
2653 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2654 	if (wr->send_flags & IB_SEND_INLINE)
2655 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2656 
2657 	return 0;
2658 }
2659 
2660 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2661 				  struct bnxt_qplib_swqe *wqe)
2662 {
2663 	switch (wr->opcode) {
2664 	case IB_WR_RDMA_WRITE:
2665 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2666 		break;
2667 	case IB_WR_RDMA_WRITE_WITH_IMM:
2668 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2669 		wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
2670 		break;
2671 	case IB_WR_RDMA_READ:
2672 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2673 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2674 		break;
2675 	default:
2676 		return -EINVAL;
2677 	}
2678 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2679 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2680 	if (wr->send_flags & IB_SEND_SIGNALED)
2681 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2682 	if (wr->send_flags & IB_SEND_FENCE)
2683 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2684 	if (wr->send_flags & IB_SEND_SOLICITED)
2685 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2686 	if (wr->send_flags & IB_SEND_INLINE)
2687 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2688 
2689 	return 0;
2690 }
2691 
2692 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2693 				    struct bnxt_qplib_swqe *wqe)
2694 {
2695 	switch (wr->opcode) {
2696 	case IB_WR_ATOMIC_CMP_AND_SWP:
2697 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2698 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2699 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2700 		break;
2701 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2702 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2703 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2704 		break;
2705 	default:
2706 		return -EINVAL;
2707 	}
2708 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2709 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2710 	if (wr->send_flags & IB_SEND_SIGNALED)
2711 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2712 	if (wr->send_flags & IB_SEND_FENCE)
2713 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2714 	if (wr->send_flags & IB_SEND_SOLICITED)
2715 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2716 	return 0;
2717 }
2718 
2719 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2720 				 struct bnxt_qplib_swqe *wqe)
2721 {
2722 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2723 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2724 
2725 	if (wr->send_flags & IB_SEND_SIGNALED)
2726 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2727 	if (wr->send_flags & IB_SEND_SOLICITED)
2728 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2729 
2730 	return 0;
2731 }
2732 
2733 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2734 				 struct bnxt_qplib_swqe *wqe)
2735 {
2736 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2737 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2738 	int access = wr->access;
2739 
2740 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2741 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2742 	wqe->frmr.page_list = mr->pages;
2743 	wqe->frmr.page_list_len = mr->npages;
2744 	wqe->frmr.levels = qplib_frpl->hwq.level;
2745 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2746 
2747 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2748 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2749 
2750 	if (access & IB_ACCESS_LOCAL_WRITE)
2751 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2752 	if (access & IB_ACCESS_REMOTE_READ)
2753 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2754 	if (access & IB_ACCESS_REMOTE_WRITE)
2755 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2756 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2757 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2758 	if (access & IB_ACCESS_MW_BIND)
2759 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2760 
2761 	wqe->frmr.l_key = wr->key;
2762 	wqe->frmr.length = wr->mr->length;
2763 	wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2764 	wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2765 	wqe->frmr.va = wr->mr->iova;
2766 	return 0;
2767 }
2768 
2769 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2770 				    const struct ib_send_wr *wr,
2771 				    struct bnxt_qplib_swqe *wqe)
2772 {
2773 	/*  Copy the inline data to the data  field */
2774 	u8 *in_data;
2775 	u32 i, sge_len;
2776 	void *sge_addr;
2777 
2778 	in_data = wqe->inline_data;
2779 	for (i = 0; i < wr->num_sge; i++) {
2780 		sge_addr = (void *)(unsigned long)
2781 				wr->sg_list[i].addr;
2782 		sge_len = wr->sg_list[i].length;
2783 
2784 		if ((sge_len + wqe->inline_len) >
2785 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2786 			ibdev_err(&rdev->ibdev,
2787 				  "Inline data size requested > supported value");
2788 			return -EINVAL;
2789 		}
2790 		sge_len = wr->sg_list[i].length;
2791 
2792 		memcpy(in_data, sge_addr, sge_len);
2793 		in_data += wr->sg_list[i].length;
2794 		wqe->inline_len += wr->sg_list[i].length;
2795 	}
2796 	return wqe->inline_len;
2797 }
2798 
2799 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2800 				   const struct ib_send_wr *wr,
2801 				   struct bnxt_qplib_swqe *wqe)
2802 {
2803 	int payload_sz = 0;
2804 
2805 	if (wr->send_flags & IB_SEND_INLINE)
2806 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2807 	else
2808 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2809 					       wqe->num_sge);
2810 
2811 	return payload_sz;
2812 }
2813 
2814 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2815 {
2816 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2817 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2818 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2819 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2820 		int qp_attr_mask;
2821 		struct ib_qp_attr qp_attr;
2822 
2823 		qp_attr_mask = IB_QP_STATE;
2824 		qp_attr.qp_state = IB_QPS_RTS;
2825 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2826 		qp->qplib_qp.wqe_cnt = 0;
2827 	}
2828 }
2829 
2830 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2831 				       struct bnxt_re_qp *qp,
2832 				       const struct ib_send_wr *wr)
2833 {
2834 	int rc = 0, payload_sz = 0;
2835 	unsigned long flags;
2836 
2837 	spin_lock_irqsave(&qp->sq_lock, flags);
2838 	while (wr) {
2839 		struct bnxt_qplib_swqe wqe = {};
2840 
2841 		/* Common */
2842 		wqe.num_sge = wr->num_sge;
2843 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2844 			ibdev_err(&rdev->ibdev,
2845 				  "Limit exceeded for Send SGEs");
2846 			rc = -EINVAL;
2847 			goto bad;
2848 		}
2849 
2850 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2851 		if (payload_sz < 0) {
2852 			rc = -EINVAL;
2853 			goto bad;
2854 		}
2855 		wqe.wr_id = wr->wr_id;
2856 
2857 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2858 
2859 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2860 		if (!rc)
2861 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2862 bad:
2863 		if (rc) {
2864 			ibdev_err(&rdev->ibdev,
2865 				  "Post send failed opcode = %#x rc = %d",
2866 				  wr->opcode, rc);
2867 			break;
2868 		}
2869 		wr = wr->next;
2870 	}
2871 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2872 	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2873 		bnxt_ud_qp_hw_stall_workaround(qp);
2874 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2875 	return rc;
2876 }
2877 
2878 static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2879 {
2880 	/* Need unconditional fence for non-wire memory opcode
2881 	 * to work as expected.
2882 	 */
2883 	if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2884 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2885 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2886 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2887 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2888 }
2889 
2890 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2891 		      const struct ib_send_wr **bad_wr)
2892 {
2893 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2894 	struct bnxt_qplib_swqe wqe;
2895 	int rc = 0, payload_sz = 0;
2896 	unsigned long flags;
2897 
2898 	spin_lock_irqsave(&qp->sq_lock, flags);
2899 	while (wr) {
2900 		/* House keeping */
2901 		memset(&wqe, 0, sizeof(wqe));
2902 
2903 		/* Common */
2904 		wqe.num_sge = wr->num_sge;
2905 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2906 			ibdev_err(&qp->rdev->ibdev,
2907 				  "Limit exceeded for Send SGEs");
2908 			rc = -EINVAL;
2909 			goto bad;
2910 		}
2911 
2912 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2913 		if (payload_sz < 0) {
2914 			rc = -EINVAL;
2915 			goto bad;
2916 		}
2917 		wqe.wr_id = wr->wr_id;
2918 
2919 		switch (wr->opcode) {
2920 		case IB_WR_SEND:
2921 		case IB_WR_SEND_WITH_IMM:
2922 			if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2923 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2924 							       payload_sz);
2925 				if (rc)
2926 					goto bad;
2927 				wqe.rawqp1.lflags |=
2928 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2929 			}
2930 			switch (wr->send_flags) {
2931 			case IB_SEND_IP_CSUM:
2932 				wqe.rawqp1.lflags |=
2933 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2934 				break;
2935 			default:
2936 				break;
2937 			}
2938 			fallthrough;
2939 		case IB_WR_SEND_WITH_INV:
2940 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2941 			break;
2942 		case IB_WR_RDMA_WRITE:
2943 		case IB_WR_RDMA_WRITE_WITH_IMM:
2944 		case IB_WR_RDMA_READ:
2945 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2946 			break;
2947 		case IB_WR_ATOMIC_CMP_AND_SWP:
2948 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2949 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2950 			break;
2951 		case IB_WR_RDMA_READ_WITH_INV:
2952 			ibdev_err(&qp->rdev->ibdev,
2953 				  "RDMA Read with Invalidate is not supported");
2954 			rc = -EINVAL;
2955 			goto bad;
2956 		case IB_WR_LOCAL_INV:
2957 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2958 			break;
2959 		case IB_WR_REG_MR:
2960 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2961 			break;
2962 		default:
2963 			/* Unsupported WRs */
2964 			ibdev_err(&qp->rdev->ibdev,
2965 				  "WR (%#x) is not supported", wr->opcode);
2966 			rc = -EINVAL;
2967 			goto bad;
2968 		}
2969 		if (!rc) {
2970 			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2971 				bnxt_re_legacy_set_uc_fence(&wqe);
2972 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2973 		}
2974 bad:
2975 		if (rc) {
2976 			ibdev_err(&qp->rdev->ibdev,
2977 				  "post_send failed op:%#x qps = %#x rc = %d\n",
2978 				  wr->opcode, qp->qplib_qp.state, rc);
2979 			*bad_wr = wr;
2980 			break;
2981 		}
2982 		wr = wr->next;
2983 	}
2984 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2985 	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2986 		bnxt_ud_qp_hw_stall_workaround(qp);
2987 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2988 
2989 	return rc;
2990 }
2991 
2992 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2993 				       struct bnxt_re_qp *qp,
2994 				       const struct ib_recv_wr *wr)
2995 {
2996 	struct bnxt_qplib_swqe wqe;
2997 	int rc = 0;
2998 
2999 	while (wr) {
3000 		/* House keeping */
3001 		memset(&wqe, 0, sizeof(wqe));
3002 
3003 		/* Common */
3004 		wqe.num_sge = wr->num_sge;
3005 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3006 			ibdev_err(&rdev->ibdev,
3007 				  "Limit exceeded for Receive SGEs");
3008 			rc = -EINVAL;
3009 			break;
3010 		}
3011 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
3012 		wqe.wr_id = wr->wr_id;
3013 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3014 
3015 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3016 		if (rc)
3017 			break;
3018 
3019 		wr = wr->next;
3020 	}
3021 	if (!rc)
3022 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
3023 	return rc;
3024 }
3025 
3026 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
3027 		      const struct ib_recv_wr **bad_wr)
3028 {
3029 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
3030 	struct bnxt_qplib_swqe wqe;
3031 	int rc = 0, payload_sz = 0;
3032 	unsigned long flags;
3033 	u32 count = 0;
3034 
3035 	spin_lock_irqsave(&qp->rq_lock, flags);
3036 	while (wr) {
3037 		/* House keeping */
3038 		memset(&wqe, 0, sizeof(wqe));
3039 
3040 		/* Common */
3041 		wqe.num_sge = wr->num_sge;
3042 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3043 			ibdev_err(&qp->rdev->ibdev,
3044 				  "Limit exceeded for Receive SGEs");
3045 			rc = -EINVAL;
3046 			*bad_wr = wr;
3047 			break;
3048 		}
3049 
3050 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
3051 					       wr->num_sge);
3052 		wqe.wr_id = wr->wr_id;
3053 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3054 
3055 		if (ib_qp->qp_type == IB_QPT_GSI &&
3056 		    qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
3057 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
3058 							      payload_sz);
3059 		if (!rc)
3060 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3061 		if (rc) {
3062 			*bad_wr = wr;
3063 			break;
3064 		}
3065 
3066 		/* Ring DB if the RQEs posted reaches a threshold value */
3067 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
3068 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
3069 			count = 0;
3070 		}
3071 
3072 		wr = wr->next;
3073 	}
3074 
3075 	if (count)
3076 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
3077 
3078 	spin_unlock_irqrestore(&qp->rq_lock, flags);
3079 
3080 	return rc;
3081 }
3082 
3083 static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
3084 {
3085 	int min, indx;
3086 
3087 	mutex_lock(&rdev->nqr->load_lock);
3088 	for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) {
3089 		if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load)
3090 			min = indx;
3091 	}
3092 	rdev->nqr->nq[min].load++;
3093 	mutex_unlock(&rdev->nqr->load_lock);
3094 
3095 	return &rdev->nqr->nq[min];
3096 }
3097 
3098 static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
3099 {
3100 	mutex_lock(&rdev->nqr->load_lock);
3101 	nq->load--;
3102 	mutex_unlock(&rdev->nqr->load_lock);
3103 }
3104 
3105 /* Completion Queues */
3106 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
3107 {
3108 	struct bnxt_qplib_chip_ctx *cctx;
3109 	struct bnxt_qplib_nq *nq;
3110 	struct bnxt_re_dev *rdev;
3111 	struct bnxt_re_cq *cq;
3112 
3113 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3114 	rdev = cq->rdev;
3115 	nq = cq->qplib_cq.nq;
3116 	cctx = rdev->chip_ctx;
3117 
3118 	if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3119 		free_page((unsigned long)cq->uctx_cq_page);
3120 		hash_del(&cq->hash_entry);
3121 	}
3122 	bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3123 
3124 	bnxt_re_put_nq(rdev, nq);
3125 	ib_umem_release(cq->umem);
3126 
3127 	atomic_dec(&rdev->stats.res.cq_count);
3128 	kfree(cq->cql);
3129 	return 0;
3130 }
3131 
3132 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
3133 		      struct uverbs_attr_bundle *attrs)
3134 {
3135 	struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3136 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
3137 	struct ib_udata *udata = &attrs->driver_udata;
3138 	struct bnxt_re_ucontext *uctx =
3139 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3140 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
3141 	struct bnxt_qplib_chip_ctx *cctx;
3142 	int cqe = attr->cqe;
3143 	int rc, entries;
3144 	u32 active_cqs;
3145 
3146 	if (attr->flags)
3147 		return -EOPNOTSUPP;
3148 
3149 	/* Validate CQ fields */
3150 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3151 		ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
3152 		return -EINVAL;
3153 	}
3154 
3155 	cq->rdev = rdev;
3156 	cctx = rdev->chip_ctx;
3157 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
3158 
3159 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3160 	if (entries > dev_attr->max_cq_wqes + 1)
3161 		entries = dev_attr->max_cq_wqes + 1;
3162 
3163 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3164 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3165 	if (udata) {
3166 		struct bnxt_re_cq_req req;
3167 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3168 			rc = -EFAULT;
3169 			goto fail;
3170 		}
3171 
3172 		cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3173 				       entries * sizeof(struct cq_base),
3174 				       IB_ACCESS_LOCAL_WRITE);
3175 		if (IS_ERR(cq->umem)) {
3176 			rc = PTR_ERR(cq->umem);
3177 			goto fail;
3178 		}
3179 		cq->qplib_cq.sg_info.umem = cq->umem;
3180 		cq->qplib_cq.dpi = &uctx->dpi;
3181 	} else {
3182 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3183 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3184 				  GFP_KERNEL);
3185 		if (!cq->cql) {
3186 			rc = -ENOMEM;
3187 			goto fail;
3188 		}
3189 
3190 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
3191 	}
3192 	cq->qplib_cq.max_wqe = entries;
3193 	cq->qplib_cq.coalescing = &rdev->cq_coalescing;
3194 	cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
3195 	cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id;
3196 
3197 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3198 	if (rc) {
3199 		ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3200 		goto fail;
3201 	}
3202 
3203 	cq->ib_cq.cqe = entries;
3204 	cq->cq_period = cq->qplib_cq.period;
3205 
3206 	active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3207 	if (active_cqs > rdev->stats.res.cq_watermark)
3208 		rdev->stats.res.cq_watermark = active_cqs;
3209 	spin_lock_init(&cq->cq_lock);
3210 
3211 	if (udata) {
3212 		struct bnxt_re_cq_resp resp = {};
3213 
3214 		if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3215 			hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3216 			/* Allocate a page */
3217 			cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3218 			if (!cq->uctx_cq_page) {
3219 				rc = -ENOMEM;
3220 				goto c2fail;
3221 			}
3222 			resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3223 		}
3224 		resp.cqid = cq->qplib_cq.id;
3225 		resp.tail = cq->qplib_cq.hwq.cons;
3226 		resp.phase = cq->qplib_cq.period;
3227 		resp.rsvd = 0;
3228 		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3229 		if (rc) {
3230 			ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3231 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3232 			goto free_mem;
3233 		}
3234 	}
3235 
3236 	return 0;
3237 
3238 free_mem:
3239 	free_page((unsigned long)cq->uctx_cq_page);
3240 c2fail:
3241 	ib_umem_release(cq->umem);
3242 fail:
3243 	kfree(cq->cql);
3244 	return rc;
3245 }
3246 
3247 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3248 {
3249 	struct bnxt_re_dev *rdev = cq->rdev;
3250 
3251 	bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3252 
3253 	cq->qplib_cq.max_wqe = cq->resize_cqe;
3254 	if (cq->resize_umem) {
3255 		ib_umem_release(cq->umem);
3256 		cq->umem = cq->resize_umem;
3257 		cq->resize_umem = NULL;
3258 		cq->resize_cqe = 0;
3259 	}
3260 }
3261 
3262 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3263 {
3264 	struct bnxt_qplib_sg_info sg_info = {};
3265 	struct bnxt_qplib_dpi *orig_dpi = NULL;
3266 	struct bnxt_qplib_dev_attr *dev_attr;
3267 	struct bnxt_re_ucontext *uctx = NULL;
3268 	struct bnxt_re_resize_cq_req req;
3269 	struct bnxt_re_dev *rdev;
3270 	struct bnxt_re_cq *cq;
3271 	int rc, entries;
3272 
3273 	cq =  container_of(ibcq, struct bnxt_re_cq, ib_cq);
3274 	rdev = cq->rdev;
3275 	dev_attr = rdev->dev_attr;
3276 	if (!ibcq->uobject) {
3277 		ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3278 		return -EOPNOTSUPP;
3279 	}
3280 
3281 	if (cq->resize_umem) {
3282 		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3283 			  cq->qplib_cq.id);
3284 		return -EBUSY;
3285 	}
3286 
3287 	/* Check the requested cq depth out of supported depth */
3288 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3289 		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3290 			  cq->qplib_cq.id, cqe);
3291 		return -EINVAL;
3292 	}
3293 
3294 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3295 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3296 	if (entries > dev_attr->max_cq_wqes + 1)
3297 		entries = dev_attr->max_cq_wqes + 1;
3298 
3299 	/* uverbs consumer */
3300 	if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3301 		rc = -EFAULT;
3302 		goto fail;
3303 	}
3304 
3305 	cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3306 				      entries * sizeof(struct cq_base),
3307 				      IB_ACCESS_LOCAL_WRITE);
3308 	if (IS_ERR(cq->resize_umem)) {
3309 		rc = PTR_ERR(cq->resize_umem);
3310 		ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
3311 			  __func__, cq->resize_umem);
3312 		cq->resize_umem = NULL;
3313 		goto fail;
3314 	}
3315 	cq->resize_cqe = entries;
3316 	memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3317 	orig_dpi = cq->qplib_cq.dpi;
3318 
3319 	cq->qplib_cq.sg_info.umem = cq->resize_umem;
3320 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3321 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3322 	cq->qplib_cq.dpi = &uctx->dpi;
3323 
3324 	rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3325 	if (rc) {
3326 		ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3327 			  cq->qplib_cq.id);
3328 		goto fail;
3329 	}
3330 
3331 	cq->ib_cq.cqe = cq->resize_cqe;
3332 	atomic_inc(&rdev->stats.res.resize_count);
3333 
3334 	return 0;
3335 
3336 fail:
3337 	if (cq->resize_umem) {
3338 		ib_umem_release(cq->resize_umem);
3339 		cq->resize_umem = NULL;
3340 		cq->resize_cqe = 0;
3341 		memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3342 		cq->qplib_cq.dpi = orig_dpi;
3343 	}
3344 	return rc;
3345 }
3346 
3347 static u8 __req_to_ib_wc_status(u8 qstatus)
3348 {
3349 	switch (qstatus) {
3350 	case CQ_REQ_STATUS_OK:
3351 		return IB_WC_SUCCESS;
3352 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3353 		return IB_WC_BAD_RESP_ERR;
3354 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3355 		return IB_WC_LOC_LEN_ERR;
3356 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3357 		return IB_WC_LOC_QP_OP_ERR;
3358 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3359 		return IB_WC_LOC_PROT_ERR;
3360 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3361 		return IB_WC_GENERAL_ERR;
3362 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3363 		return IB_WC_REM_INV_REQ_ERR;
3364 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3365 		return IB_WC_REM_ACCESS_ERR;
3366 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3367 		return IB_WC_REM_OP_ERR;
3368 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3369 		return IB_WC_RNR_RETRY_EXC_ERR;
3370 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3371 		return IB_WC_RETRY_EXC_ERR;
3372 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3373 		return IB_WC_WR_FLUSH_ERR;
3374 	default:
3375 		return IB_WC_GENERAL_ERR;
3376 	}
3377 	return 0;
3378 }
3379 
3380 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3381 {
3382 	switch (qstatus) {
3383 	case CQ_RES_RAWETH_QP1_STATUS_OK:
3384 		return IB_WC_SUCCESS;
3385 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3386 		return IB_WC_LOC_ACCESS_ERR;
3387 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3388 		return IB_WC_LOC_LEN_ERR;
3389 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3390 		return IB_WC_LOC_PROT_ERR;
3391 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3392 		return IB_WC_LOC_QP_OP_ERR;
3393 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3394 		return IB_WC_GENERAL_ERR;
3395 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3396 		return IB_WC_WR_FLUSH_ERR;
3397 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3398 		return IB_WC_WR_FLUSH_ERR;
3399 	default:
3400 		return IB_WC_GENERAL_ERR;
3401 	}
3402 }
3403 
3404 static u8 __rc_to_ib_wc_status(u8 qstatus)
3405 {
3406 	switch (qstatus) {
3407 	case CQ_RES_RC_STATUS_OK:
3408 		return IB_WC_SUCCESS;
3409 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3410 		return IB_WC_LOC_ACCESS_ERR;
3411 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3412 		return IB_WC_LOC_LEN_ERR;
3413 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3414 		return IB_WC_LOC_PROT_ERR;
3415 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3416 		return IB_WC_LOC_QP_OP_ERR;
3417 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3418 		return IB_WC_GENERAL_ERR;
3419 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3420 		return IB_WC_REM_INV_REQ_ERR;
3421 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3422 		return IB_WC_WR_FLUSH_ERR;
3423 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3424 		return IB_WC_WR_FLUSH_ERR;
3425 	default:
3426 		return IB_WC_GENERAL_ERR;
3427 	}
3428 }
3429 
3430 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3431 {
3432 	switch (cqe->type) {
3433 	case BNXT_QPLIB_SWQE_TYPE_SEND:
3434 		wc->opcode = IB_WC_SEND;
3435 		break;
3436 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3437 		wc->opcode = IB_WC_SEND;
3438 		wc->wc_flags |= IB_WC_WITH_IMM;
3439 		break;
3440 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3441 		wc->opcode = IB_WC_SEND;
3442 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3443 		break;
3444 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3445 		wc->opcode = IB_WC_RDMA_WRITE;
3446 		break;
3447 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3448 		wc->opcode = IB_WC_RDMA_WRITE;
3449 		wc->wc_flags |= IB_WC_WITH_IMM;
3450 		break;
3451 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3452 		wc->opcode = IB_WC_RDMA_READ;
3453 		break;
3454 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3455 		wc->opcode = IB_WC_COMP_SWAP;
3456 		break;
3457 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3458 		wc->opcode = IB_WC_FETCH_ADD;
3459 		break;
3460 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3461 		wc->opcode = IB_WC_LOCAL_INV;
3462 		break;
3463 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3464 		wc->opcode = IB_WC_REG_MR;
3465 		break;
3466 	default:
3467 		wc->opcode = IB_WC_SEND;
3468 		break;
3469 	}
3470 
3471 	wc->status = __req_to_ib_wc_status(cqe->status);
3472 }
3473 
3474 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3475 				     u16 raweth_qp1_flags2)
3476 {
3477 	bool is_ipv6 = false, is_ipv4 = false;
3478 
3479 	/* raweth_qp1_flags Bit 9-6 indicates itype */
3480 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3481 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3482 		return -1;
3483 
3484 	if (raweth_qp1_flags2 &
3485 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3486 	    raweth_qp1_flags2 &
3487 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3488 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3489 		(raweth_qp1_flags2 &
3490 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3491 			(is_ipv6 = true) : (is_ipv4 = true);
3492 		return ((is_ipv6) ?
3493 			 BNXT_RE_ROCEV2_IPV6_PACKET :
3494 			 BNXT_RE_ROCEV2_IPV4_PACKET);
3495 	} else {
3496 		return BNXT_RE_ROCE_V1_PACKET;
3497 	}
3498 }
3499 
3500 static int bnxt_re_to_ib_nw_type(int nw_type)
3501 {
3502 	u8 nw_hdr_type = 0xFF;
3503 
3504 	switch (nw_type) {
3505 	case BNXT_RE_ROCE_V1_PACKET:
3506 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3507 		break;
3508 	case BNXT_RE_ROCEV2_IPV4_PACKET:
3509 		nw_hdr_type = RDMA_NETWORK_IPV4;
3510 		break;
3511 	case BNXT_RE_ROCEV2_IPV6_PACKET:
3512 		nw_hdr_type = RDMA_NETWORK_IPV6;
3513 		break;
3514 	}
3515 	return nw_hdr_type;
3516 }
3517 
3518 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3519 				       void *rq_hdr_buf)
3520 {
3521 	u8 *tmp_buf = NULL;
3522 	struct ethhdr *eth_hdr;
3523 	u16 eth_type;
3524 	bool rc = false;
3525 
3526 	tmp_buf = (u8 *)rq_hdr_buf;
3527 	/*
3528 	 * If dest mac is not same as I/F mac, this could be a
3529 	 * loopback address or multicast address, check whether
3530 	 * it is a loopback packet
3531 	 */
3532 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3533 		tmp_buf += 4;
3534 		/* Check the  ether type */
3535 		eth_hdr = (struct ethhdr *)tmp_buf;
3536 		eth_type = ntohs(eth_hdr->h_proto);
3537 		switch (eth_type) {
3538 		case ETH_P_IBOE:
3539 			rc = true;
3540 			break;
3541 		case ETH_P_IP:
3542 		case ETH_P_IPV6: {
3543 			u32 len;
3544 			struct udphdr *udp_hdr;
3545 
3546 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3547 						      sizeof(struct ipv6hdr));
3548 			tmp_buf += sizeof(struct ethhdr) + len;
3549 			udp_hdr = (struct udphdr *)tmp_buf;
3550 			if (ntohs(udp_hdr->dest) ==
3551 				    ROCE_V2_UDP_DPORT)
3552 				rc = true;
3553 			break;
3554 			}
3555 		default:
3556 			break;
3557 		}
3558 	}
3559 
3560 	return rc;
3561 }
3562 
3563 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3564 					 struct bnxt_qplib_cqe *cqe)
3565 {
3566 	struct bnxt_re_dev *rdev = gsi_qp->rdev;
3567 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3568 	struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3569 	dma_addr_t shrq_hdr_buf_map;
3570 	struct ib_sge s_sge[2] = {};
3571 	struct ib_sge r_sge[2] = {};
3572 	struct bnxt_re_ah *gsi_sah;
3573 	struct ib_recv_wr rwr = {};
3574 	dma_addr_t rq_hdr_buf_map;
3575 	struct ib_ud_wr udwr = {};
3576 	struct ib_send_wr *swr;
3577 	u32 skip_bytes = 0;
3578 	int pkt_type = 0;
3579 	void *rq_hdr_buf;
3580 	u32 offset = 0;
3581 	u32 tbl_idx;
3582 	int rc;
3583 
3584 	swr = &udwr.wr;
3585 	tbl_idx = cqe->wr_id;
3586 
3587 	rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3588 			(tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3589 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3590 							  tbl_idx);
3591 
3592 	/* Shadow QP header buffer */
3593 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3594 							    tbl_idx);
3595 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3596 
3597 	/* Store this cqe */
3598 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3599 	sqp_entry->qp1_qp = gsi_qp;
3600 
3601 	/* Find packet type from the cqe */
3602 
3603 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3604 					     cqe->raweth_qp1_flags2);
3605 	if (pkt_type < 0) {
3606 		ibdev_err(&rdev->ibdev, "Invalid packet\n");
3607 		return -EINVAL;
3608 	}
3609 
3610 	/* Adjust the offset for the user buffer and post in the rq */
3611 
3612 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3613 		offset = 20;
3614 
3615 	/*
3616 	 * QP1 loopback packet has 4 bytes of internal header before
3617 	 * ether header. Skip these four bytes.
3618 	 */
3619 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3620 		skip_bytes = 4;
3621 
3622 	/* First send SGE . Skip the ether header*/
3623 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3624 			+ skip_bytes;
3625 	s_sge[0].lkey = 0xFFFFFFFF;
3626 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3627 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3628 
3629 	/* Second Send SGE */
3630 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3631 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3632 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3633 		s_sge[1].addr += 8;
3634 	s_sge[1].lkey = 0xFFFFFFFF;
3635 	s_sge[1].length = 256;
3636 
3637 	/* First recv SGE */
3638 
3639 	r_sge[0].addr = shrq_hdr_buf_map;
3640 	r_sge[0].lkey = 0xFFFFFFFF;
3641 	r_sge[0].length = 40;
3642 
3643 	r_sge[1].addr = sqp_entry->sge.addr + offset;
3644 	r_sge[1].lkey = sqp_entry->sge.lkey;
3645 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3646 
3647 	/* Create receive work request */
3648 	rwr.num_sge = 2;
3649 	rwr.sg_list = r_sge;
3650 	rwr.wr_id = tbl_idx;
3651 	rwr.next = NULL;
3652 
3653 	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3654 	if (rc) {
3655 		ibdev_err(&rdev->ibdev,
3656 			  "Failed to post Rx buffers to shadow QP");
3657 		return -ENOMEM;
3658 	}
3659 
3660 	swr->num_sge = 2;
3661 	swr->sg_list = s_sge;
3662 	swr->wr_id = tbl_idx;
3663 	swr->opcode = IB_WR_SEND;
3664 	swr->next = NULL;
3665 	gsi_sah = rdev->gsi_ctx.gsi_sah;
3666 	udwr.ah = &gsi_sah->ib_ah;
3667 	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3668 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3669 
3670 	/* post data received  in the send queue */
3671 	return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3672 }
3673 
3674 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3675 					  struct bnxt_qplib_cqe *cqe)
3676 {
3677 	wc->opcode = IB_WC_RECV;
3678 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3679 	wc->wc_flags |= IB_WC_GRH;
3680 }
3681 
3682 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3683 					u16 vlan_id)
3684 {
3685 	/*
3686 	 * Check if the vlan is configured in the host.  If not configured, it
3687 	 * can be a transparent VLAN. So dont report the vlan id.
3688 	 */
3689 	if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3690 				      htons(ETH_P_8021Q), vlan_id))
3691 		return false;
3692 	return true;
3693 }
3694 
3695 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3696 				u16 *vid, u8 *sl)
3697 {
3698 	bool ret = false;
3699 	u32 metadata;
3700 	u16 tpid;
3701 
3702 	metadata = orig_cqe->raweth_qp1_metadata;
3703 	if (orig_cqe->raweth_qp1_flags2 &
3704 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3705 		tpid = ((metadata &
3706 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3707 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3708 		if (tpid == ETH_P_8021Q) {
3709 			*vid = metadata &
3710 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3711 			*sl = (metadata &
3712 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3713 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3714 			ret = true;
3715 		}
3716 	}
3717 
3718 	return ret;
3719 }
3720 
3721 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3722 				      struct bnxt_qplib_cqe *cqe)
3723 {
3724 	wc->opcode = IB_WC_RECV;
3725 	wc->status = __rc_to_ib_wc_status(cqe->status);
3726 
3727 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3728 		wc->wc_flags |= IB_WC_WITH_IMM;
3729 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3730 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3731 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3732 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3733 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3734 }
3735 
3736 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3737 					     struct ib_wc *wc,
3738 					     struct bnxt_qplib_cqe *cqe)
3739 {
3740 	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3741 	struct bnxt_re_qp *gsi_qp = NULL;
3742 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3743 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3744 	int nw_type;
3745 	u32 tbl_idx;
3746 	u16 vlan_id;
3747 	u8 sl;
3748 
3749 	tbl_idx = cqe->wr_id;
3750 
3751 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3752 	gsi_qp = sqp_entry->qp1_qp;
3753 	orig_cqe = &sqp_entry->cqe;
3754 
3755 	wc->wr_id = sqp_entry->wrid;
3756 	wc->byte_len = orig_cqe->length;
3757 	wc->qp = &gsi_qp->ib_qp;
3758 
3759 	wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
3760 	wc->src_qp = orig_cqe->src_qp;
3761 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3762 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3763 		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3764 			wc->vlan_id = vlan_id;
3765 			wc->sl = sl;
3766 			wc->wc_flags |= IB_WC_WITH_VLAN;
3767 		}
3768 	}
3769 	wc->port_num = 1;
3770 	wc->vendor_err = orig_cqe->status;
3771 
3772 	wc->opcode = IB_WC_RECV;
3773 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3774 	wc->wc_flags |= IB_WC_GRH;
3775 
3776 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3777 					    orig_cqe->raweth_qp1_flags2);
3778 	if (nw_type >= 0) {
3779 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3780 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3781 	}
3782 }
3783 
3784 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3785 				      struct ib_wc *wc,
3786 				      struct bnxt_qplib_cqe *cqe)
3787 {
3788 	struct bnxt_re_dev *rdev;
3789 	u16 vlan_id = 0;
3790 	u8 nw_type;
3791 
3792 	rdev = qp->rdev;
3793 	wc->opcode = IB_WC_RECV;
3794 	wc->status = __rc_to_ib_wc_status(cqe->status);
3795 
3796 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3797 		wc->wc_flags |= IB_WC_WITH_IMM;
3798 	/* report only on GSI QP for Thor */
3799 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3800 		wc->wc_flags |= IB_WC_GRH;
3801 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
3802 		wc->wc_flags |= IB_WC_WITH_SMAC;
3803 		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3804 			vlan_id = (cqe->cfa_meta & 0xFFF);
3805 		}
3806 		/* Mark only if vlan_id is non zero */
3807 		if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3808 			wc->vlan_id = vlan_id;
3809 			wc->wc_flags |= IB_WC_WITH_VLAN;
3810 		}
3811 		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3812 			   CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3813 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3814 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3815 	}
3816 
3817 }
3818 
3819 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3820 {
3821 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3822 	unsigned long flags;
3823 	int rc;
3824 
3825 	spin_lock_irqsave(&qp->sq_lock, flags);
3826 
3827 	rc = bnxt_re_bind_fence_mw(lib_qp);
3828 	if (!rc) {
3829 		lib_qp->sq.phantom_wqe_cnt++;
3830 		ibdev_dbg(&qp->rdev->ibdev,
3831 			  "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3832 			  lib_qp->id, lib_qp->sq.hwq.prod,
3833 			  HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3834 			  lib_qp->sq.phantom_wqe_cnt);
3835 	}
3836 
3837 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3838 	return rc;
3839 }
3840 
3841 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3842 {
3843 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3844 	struct bnxt_re_qp *qp, *sh_qp;
3845 	struct bnxt_qplib_cqe *cqe;
3846 	int i, ncqe, budget;
3847 	struct bnxt_qplib_q *sq;
3848 	struct bnxt_qplib_qp *lib_qp;
3849 	u32 tbl_idx;
3850 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3851 	unsigned long flags;
3852 
3853 	/* User CQ; the only processing we do is to
3854 	 * complete any pending CQ resize operation.
3855 	 */
3856 	if (cq->umem) {
3857 		if (cq->resize_umem)
3858 			bnxt_re_resize_cq_complete(cq);
3859 		return 0;
3860 	}
3861 
3862 	spin_lock_irqsave(&cq->cq_lock, flags);
3863 	budget = min_t(u32, num_entries, cq->max_cql);
3864 	num_entries = budget;
3865 	if (!cq->cql) {
3866 		ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3867 		goto exit;
3868 	}
3869 	cqe = &cq->cql[0];
3870 	while (budget) {
3871 		lib_qp = NULL;
3872 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3873 		if (lib_qp) {
3874 			sq = &lib_qp->sq;
3875 			if (sq->send_phantom) {
3876 				qp = container_of(lib_qp,
3877 						  struct bnxt_re_qp, qplib_qp);
3878 				if (send_phantom_wqe(qp) == -ENOMEM)
3879 					ibdev_err(&cq->rdev->ibdev,
3880 						  "Phantom failed! Scheduled to send again\n");
3881 				else
3882 					sq->send_phantom = false;
3883 			}
3884 		}
3885 		if (ncqe < budget)
3886 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3887 							      cqe + ncqe,
3888 							      budget - ncqe);
3889 
3890 		if (!ncqe)
3891 			break;
3892 
3893 		for (i = 0; i < ncqe; i++, cqe++) {
3894 			/* Transcribe each qplib_wqe back to ib_wc */
3895 			memset(wc, 0, sizeof(*wc));
3896 
3897 			wc->wr_id = cqe->wr_id;
3898 			wc->byte_len = cqe->length;
3899 			qp = container_of
3900 				((struct bnxt_qplib_qp *)
3901 				 (unsigned long)(cqe->qp_handle),
3902 				 struct bnxt_re_qp, qplib_qp);
3903 			wc->qp = &qp->ib_qp;
3904 			if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3905 				wc->ex.imm_data = cpu_to_be32(cqe->immdata);
3906 			else
3907 				wc->ex.invalidate_rkey = cqe->invrkey;
3908 			wc->src_qp = cqe->src_qp;
3909 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3910 			wc->port_num = 1;
3911 			wc->vendor_err = cqe->status;
3912 
3913 			switch (cqe->opcode) {
3914 			case CQ_BASE_CQE_TYPE_REQ:
3915 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3916 				if (sh_qp &&
3917 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3918 					/* Handle this completion with
3919 					 * the stored completion
3920 					 */
3921 					memset(wc, 0, sizeof(*wc));
3922 					continue;
3923 				}
3924 				bnxt_re_process_req_wc(wc, cqe);
3925 				break;
3926 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3927 				if (!cqe->status) {
3928 					int rc = 0;
3929 
3930 					rc = bnxt_re_process_raw_qp_pkt_rx
3931 								(qp, cqe);
3932 					if (!rc) {
3933 						memset(wc, 0, sizeof(*wc));
3934 						continue;
3935 					}
3936 					cqe->status = -1;
3937 				}
3938 				/* Errors need not be looped back.
3939 				 * But change the wr_id to the one
3940 				 * stored in the table
3941 				 */
3942 				tbl_idx = cqe->wr_id;
3943 				sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3944 				wc->wr_id = sqp_entry->wrid;
3945 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3946 				break;
3947 			case CQ_BASE_CQE_TYPE_RES_RC:
3948 				bnxt_re_process_res_rc_wc(wc, cqe);
3949 				break;
3950 			case CQ_BASE_CQE_TYPE_RES_UD:
3951 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3952 				if (sh_qp &&
3953 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3954 					/* Handle this completion with
3955 					 * the stored completion
3956 					 */
3957 					if (cqe->status) {
3958 						continue;
3959 					} else {
3960 						bnxt_re_process_res_shadow_qp_wc
3961 								(qp, wc, cqe);
3962 						break;
3963 					}
3964 				}
3965 				bnxt_re_process_res_ud_wc(qp, wc, cqe);
3966 				break;
3967 			default:
3968 				ibdev_err(&cq->rdev->ibdev,
3969 					  "POLL CQ : type 0x%x not handled",
3970 					  cqe->opcode);
3971 				continue;
3972 			}
3973 			wc++;
3974 			budget--;
3975 		}
3976 	}
3977 exit:
3978 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3979 	return num_entries - budget;
3980 }
3981 
3982 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3983 			  enum ib_cq_notify_flags ib_cqn_flags)
3984 {
3985 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3986 	int type = 0, rc = 0;
3987 	unsigned long flags;
3988 
3989 	spin_lock_irqsave(&cq->cq_lock, flags);
3990 	/* Trigger on the very next completion */
3991 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3992 		type = DBC_DBC_TYPE_CQ_ARMALL;
3993 	/* Trigger on the next solicited completion */
3994 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3995 		type = DBC_DBC_TYPE_CQ_ARMSE;
3996 
3997 	/* Poll to see if there are missed events */
3998 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3999 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
4000 		rc = 1;
4001 		goto exit;
4002 	}
4003 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
4004 
4005 exit:
4006 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4007 	return rc;
4008 }
4009 
4010 /* Memory Regions */
4011 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
4012 {
4013 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4014 	struct bnxt_re_dev *rdev = pd->rdev;
4015 	struct bnxt_re_mr *mr;
4016 	u32 active_mrs;
4017 	int rc;
4018 
4019 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4020 	if (!mr)
4021 		return ERR_PTR(-ENOMEM);
4022 
4023 	mr->rdev = rdev;
4024 	mr->qplib_mr.pd = &pd->qplib_pd;
4025 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
4026 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4027 
4028 	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
4029 		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
4030 
4031 	/* Allocate and register 0 as the address */
4032 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4033 	if (rc)
4034 		goto fail;
4035 
4036 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
4037 	mr->qplib_mr.total_size = -1; /* Infinte length */
4038 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
4039 			       PAGE_SIZE);
4040 	if (rc)
4041 		goto fail_mr;
4042 
4043 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4044 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
4045 			       IB_ACCESS_REMOTE_ATOMIC))
4046 		mr->ib_mr.rkey = mr->ib_mr.lkey;
4047 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4048 	if (active_mrs > rdev->stats.res.mr_watermark)
4049 		rdev->stats.res.mr_watermark = active_mrs;
4050 
4051 	return &mr->ib_mr;
4052 
4053 fail_mr:
4054 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4055 fail:
4056 	kfree(mr);
4057 	return ERR_PTR(rc);
4058 }
4059 
4060 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
4061 {
4062 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
4063 	struct bnxt_re_dev *rdev = mr->rdev;
4064 	int rc;
4065 
4066 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4067 	if (rc) {
4068 		ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
4069 		return rc;
4070 	}
4071 
4072 	if (mr->pages) {
4073 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
4074 							&mr->qplib_frpl);
4075 		kfree(mr->pages);
4076 		mr->npages = 0;
4077 		mr->pages = NULL;
4078 	}
4079 	ib_umem_release(mr->ib_umem);
4080 
4081 	kfree(mr);
4082 	atomic_dec(&rdev->stats.res.mr_count);
4083 	return rc;
4084 }
4085 
4086 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
4087 {
4088 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
4089 
4090 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
4091 		return -ENOMEM;
4092 
4093 	mr->pages[mr->npages++] = addr;
4094 	return 0;
4095 }
4096 
4097 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
4098 		      unsigned int *sg_offset)
4099 {
4100 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
4101 
4102 	mr->npages = 0;
4103 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
4104 }
4105 
4106 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
4107 			       u32 max_num_sg)
4108 {
4109 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4110 	struct bnxt_re_dev *rdev = pd->rdev;
4111 	struct bnxt_re_mr *mr = NULL;
4112 	u32 active_mrs;
4113 	int rc;
4114 
4115 	if (type != IB_MR_TYPE_MEM_REG) {
4116 		ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
4117 		return ERR_PTR(-EINVAL);
4118 	}
4119 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
4120 		return ERR_PTR(-EINVAL);
4121 
4122 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4123 	if (!mr)
4124 		return ERR_PTR(-ENOMEM);
4125 
4126 	mr->rdev = rdev;
4127 	mr->qplib_mr.pd = &pd->qplib_pd;
4128 	mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
4129 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4130 
4131 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4132 	if (rc)
4133 		goto bail;
4134 
4135 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4136 	mr->ib_mr.rkey = mr->ib_mr.lkey;
4137 
4138 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
4139 	if (!mr->pages) {
4140 		rc = -ENOMEM;
4141 		goto fail;
4142 	}
4143 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4144 						 &mr->qplib_frpl, max_num_sg);
4145 	if (rc) {
4146 		ibdev_err(&rdev->ibdev,
4147 			  "Failed to allocate HW FR page list");
4148 		goto fail_mr;
4149 	}
4150 
4151 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4152 	if (active_mrs > rdev->stats.res.mr_watermark)
4153 		rdev->stats.res.mr_watermark = active_mrs;
4154 	return &mr->ib_mr;
4155 
4156 fail_mr:
4157 	kfree(mr->pages);
4158 fail:
4159 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4160 bail:
4161 	kfree(mr);
4162 	return ERR_PTR(rc);
4163 }
4164 
4165 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
4166 			       struct ib_udata *udata)
4167 {
4168 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4169 	struct bnxt_re_dev *rdev = pd->rdev;
4170 	struct bnxt_re_mw *mw;
4171 	u32 active_mws;
4172 	int rc;
4173 
4174 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
4175 	if (!mw)
4176 		return ERR_PTR(-ENOMEM);
4177 	mw->rdev = rdev;
4178 	mw->qplib_mw.pd = &pd->qplib_pd;
4179 
4180 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4181 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4182 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4183 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4184 	if (rc) {
4185 		ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4186 		goto fail;
4187 	}
4188 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
4189 
4190 	active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4191 	if (active_mws > rdev->stats.res.mw_watermark)
4192 		rdev->stats.res.mw_watermark = active_mws;
4193 	return &mw->ib_mw;
4194 
4195 fail:
4196 	kfree(mw);
4197 	return ERR_PTR(rc);
4198 }
4199 
4200 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4201 {
4202 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4203 	struct bnxt_re_dev *rdev = mw->rdev;
4204 	int rc;
4205 
4206 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4207 	if (rc) {
4208 		ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4209 		return rc;
4210 	}
4211 
4212 	kfree(mw);
4213 	atomic_dec(&rdev->stats.res.mw_count);
4214 	return rc;
4215 }
4216 
4217 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4218 					   int mr_access_flags, struct ib_umem *umem)
4219 {
4220 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4221 	struct bnxt_re_dev *rdev = pd->rdev;
4222 	unsigned long page_size;
4223 	struct bnxt_re_mr *mr;
4224 	int umem_pgs, rc;
4225 	u32 active_mrs;
4226 
4227 	if (length > BNXT_RE_MAX_MR_SIZE) {
4228 		ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4229 			  length, BNXT_RE_MAX_MR_SIZE);
4230 		return ERR_PTR(-ENOMEM);
4231 	}
4232 
4233 	page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4234 	if (!page_size) {
4235 		ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4236 		return ERR_PTR(-EINVAL);
4237 	}
4238 
4239 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4240 	if (!mr)
4241 		return ERR_PTR(-ENOMEM);
4242 
4243 	mr->rdev = rdev;
4244 	mr->qplib_mr.pd = &pd->qplib_pd;
4245 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
4246 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4247 
4248 	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
4249 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4250 		if (rc) {
4251 			ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4252 			rc = -EIO;
4253 			goto free_mr;
4254 		}
4255 		/* The fixed portion of the rkey is the same as the lkey */
4256 		mr->ib_mr.rkey = mr->qplib_mr.rkey;
4257 	} else {
4258 		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
4259 	}
4260 	mr->ib_umem = umem;
4261 	mr->qplib_mr.va = virt_addr;
4262 	mr->qplib_mr.total_size = length;
4263 
4264 	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
4265 		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
4266 
4267 	umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4268 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4269 			       umem_pgs, page_size);
4270 	if (rc) {
4271 		ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4272 		rc = -EIO;
4273 		goto free_mrw;
4274 	}
4275 
4276 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4277 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
4278 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4279 	if (active_mrs > rdev->stats.res.mr_watermark)
4280 		rdev->stats.res.mr_watermark = active_mrs;
4281 
4282 	return &mr->ib_mr;
4283 
4284 free_mrw:
4285 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4286 free_mr:
4287 	kfree(mr);
4288 	return ERR_PTR(rc);
4289 }
4290 
4291 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4292 				  u64 virt_addr, int mr_access_flags,
4293 				  struct ib_dmah *dmah,
4294 				  struct ib_udata *udata)
4295 {
4296 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4297 	struct bnxt_re_dev *rdev = pd->rdev;
4298 	struct ib_umem *umem;
4299 	struct ib_mr *ib_mr;
4300 
4301 	if (dmah)
4302 		return ERR_PTR(-EOPNOTSUPP);
4303 
4304 	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4305 	if (IS_ERR(umem))
4306 		return ERR_CAST(umem);
4307 
4308 	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4309 	if (IS_ERR(ib_mr))
4310 		ib_umem_release(umem);
4311 	return ib_mr;
4312 }
4313 
4314 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4315 					 u64 length, u64 virt_addr, int fd,
4316 					 int mr_access_flags,
4317 					 struct ib_dmah *dmah,
4318 					 struct uverbs_attr_bundle *attrs)
4319 {
4320 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4321 	struct bnxt_re_dev *rdev = pd->rdev;
4322 	struct ib_umem_dmabuf *umem_dmabuf;
4323 	struct ib_umem *umem;
4324 	struct ib_mr *ib_mr;
4325 
4326 	if (dmah)
4327 		return ERR_PTR(-EOPNOTSUPP);
4328 
4329 	umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4330 						fd, mr_access_flags);
4331 	if (IS_ERR(umem_dmabuf))
4332 		return ERR_CAST(umem_dmabuf);
4333 
4334 	umem = &umem_dmabuf->umem;
4335 
4336 	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4337 	if (IS_ERR(ib_mr))
4338 		ib_umem_release(umem);
4339 	return ib_mr;
4340 }
4341 
4342 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4343 {
4344 	struct ib_device *ibdev = ctx->device;
4345 	struct bnxt_re_ucontext *uctx =
4346 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4347 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4348 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
4349 	struct bnxt_re_user_mmap_entry *entry;
4350 	struct bnxt_re_uctx_resp resp = {};
4351 	struct bnxt_re_uctx_req ureq = {};
4352 	u32 chip_met_rev_num = 0;
4353 	int rc;
4354 
4355 	ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4356 
4357 	if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4358 		ibdev_dbg(ibdev, " is different from the device %d ",
4359 			  BNXT_RE_ABI_VERSION);
4360 		return -EPERM;
4361 	}
4362 
4363 	uctx->rdev = rdev;
4364 
4365 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4366 	if (!uctx->shpg) {
4367 		rc = -ENOMEM;
4368 		goto fail;
4369 	}
4370 	spin_lock_init(&uctx->sh_lock);
4371 
4372 	resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4373 	chip_met_rev_num = rdev->chip_ctx->chip_num;
4374 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4375 			     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4376 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4377 			     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4378 	resp.chip_id0 = chip_met_rev_num;
4379 	/*Temp, Use xa_alloc instead */
4380 	resp.dev_id = rdev->en_dev->pdev->devfn;
4381 	resp.max_qp = rdev->qplib_ctx.qpc_count;
4382 	resp.pg_size = PAGE_SIZE;
4383 	resp.cqe_sz = sizeof(struct cq_base);
4384 	resp.max_cqd = dev_attr->max_cq_wqes;
4385 
4386 	if (rdev->chip_ctx->modes.db_push)
4387 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4388 
4389 	entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4390 	if (!entry) {
4391 		rc = -ENOMEM;
4392 		goto cfail;
4393 	}
4394 	uctx->shpage_mmap = &entry->rdma_entry;
4395 	if (rdev->pacing.dbr_pacing)
4396 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4397 
4398 	if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
4399 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
4400 
4401 	if (udata->inlen >= sizeof(ureq)) {
4402 		rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4403 		if (rc)
4404 			goto cfail;
4405 		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4406 			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4407 			uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
4408 		}
4409 		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
4410 			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4411 			resp.mode = rdev->chip_ctx->modes.wqe_mode;
4412 			if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
4413 				uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
4414 		}
4415 	}
4416 
4417 	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4418 	if (rc) {
4419 		ibdev_err(ibdev, "Failed to copy user context");
4420 		rc = -EFAULT;
4421 		goto cfail;
4422 	}
4423 
4424 	return 0;
4425 cfail:
4426 	free_page((unsigned long)uctx->shpg);
4427 	uctx->shpg = NULL;
4428 fail:
4429 	return rc;
4430 }
4431 
4432 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4433 {
4434 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4435 						   struct bnxt_re_ucontext,
4436 						   ib_uctx);
4437 
4438 	struct bnxt_re_dev *rdev = uctx->rdev;
4439 
4440 	rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4441 	uctx->shpage_mmap = NULL;
4442 	if (uctx->shpg)
4443 		free_page((unsigned long)uctx->shpg);
4444 
4445 	if (uctx->dpi.dbr) {
4446 		/* Free DPI only if this is the first PD allocated by the
4447 		 * application and mark the context dpi as NULL
4448 		 */
4449 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4450 		uctx->dpi.dbr = NULL;
4451 	}
4452 }
4453 
4454 static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
4455 {
4456 	int rc;
4457 
4458 	rc = bnxt_re_hwrm_alloc_vnic(rdev);
4459 	if (rc)
4460 		return rc;
4461 
4462 	rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
4463 	if (rc)
4464 		goto out_free_vnic;
4465 
4466 	return 0;
4467 out_free_vnic:
4468 	bnxt_re_hwrm_free_vnic(rdev);
4469 	return rc;
4470 }
4471 
4472 struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
4473 				    struct ib_flow_attr *attr,
4474 				    struct ib_udata *udata)
4475 {
4476 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
4477 	struct bnxt_re_dev *rdev = qp->rdev;
4478 	struct bnxt_re_flow *flow;
4479 	int rc;
4480 
4481 	if (attr->type != IB_FLOW_ATTR_SNIFFER ||
4482 	    !rdev->rcfw.roce_mirror)
4483 		return ERR_PTR(-EOPNOTSUPP);
4484 
4485 	mutex_lock(&rdev->qp_lock);
4486 	if (rdev->sniffer_flow_created) {
4487 		ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
4488 		mutex_unlock(&rdev->qp_lock);
4489 		return ERR_PTR(-EBUSY);
4490 	}
4491 
4492 	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4493 	if (!flow) {
4494 		mutex_unlock(&rdev->qp_lock);
4495 		return ERR_PTR(-ENOMEM);
4496 	}
4497 
4498 	flow->rdev = rdev;
4499 
4500 	rc = bnxt_re_setup_vnic(rdev, qp);
4501 	if (rc)
4502 		goto out_free_flow;
4503 
4504 	rc = bnxt_qplib_create_flow(&rdev->qplib_res);
4505 	if (rc)
4506 		goto out_free_vnic;
4507 
4508 	rdev->sniffer_flow_created = 1;
4509 	mutex_unlock(&rdev->qp_lock);
4510 
4511 	return &flow->ib_flow;
4512 
4513 out_free_vnic:
4514 	bnxt_re_hwrm_free_vnic(rdev);
4515 out_free_flow:
4516 	mutex_unlock(&rdev->qp_lock);
4517 	kfree(flow);
4518 	return ERR_PTR(rc);
4519 }
4520 
4521 int bnxt_re_destroy_flow(struct ib_flow *flow_id)
4522 {
4523 	struct bnxt_re_flow *flow =
4524 		container_of(flow_id, struct bnxt_re_flow, ib_flow);
4525 	struct bnxt_re_dev *rdev = flow->rdev;
4526 	int rc;
4527 
4528 	mutex_lock(&rdev->qp_lock);
4529 	rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
4530 	if (rc)
4531 		ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
4532 	rdev->sniffer_flow_created = 0;
4533 
4534 	bnxt_re_hwrm_free_vnic(rdev);
4535 	mutex_unlock(&rdev->qp_lock);
4536 	kfree(flow);
4537 
4538 	return rc;
4539 }
4540 
4541 static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4542 {
4543 	struct bnxt_re_cq *cq = NULL, *tmp_cq;
4544 
4545 	hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4546 		if (tmp_cq->qplib_cq.id == cq_id) {
4547 			cq = tmp_cq;
4548 			break;
4549 		}
4550 	}
4551 	return cq;
4552 }
4553 
4554 static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
4555 {
4556 	struct bnxt_re_srq *srq = NULL, *tmp_srq;
4557 
4558 	hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
4559 		if (tmp_srq->qplib_srq.id == srq_id) {
4560 			srq = tmp_srq;
4561 			break;
4562 		}
4563 	}
4564 	return srq;
4565 }
4566 
4567 /* Helper function to mmap the virtual memory from user app */
4568 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4569 {
4570 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4571 						   struct bnxt_re_ucontext,
4572 						   ib_uctx);
4573 	struct bnxt_re_user_mmap_entry *bnxt_entry;
4574 	struct rdma_user_mmap_entry *rdma_entry;
4575 	int ret = 0;
4576 	u64 pfn;
4577 
4578 	rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4579 	if (!rdma_entry)
4580 		return -EINVAL;
4581 
4582 	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4583 				  rdma_entry);
4584 
4585 	switch (bnxt_entry->mmap_flag) {
4586 	case BNXT_RE_MMAP_WC_DB:
4587 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4588 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4589 					pgprot_writecombine(vma->vm_page_prot),
4590 					rdma_entry);
4591 		break;
4592 	case BNXT_RE_MMAP_UC_DB:
4593 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4594 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4595 					pgprot_noncached(vma->vm_page_prot),
4596 				rdma_entry);
4597 		break;
4598 	case BNXT_RE_MMAP_SH_PAGE:
4599 		ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4600 		break;
4601 	case BNXT_RE_MMAP_DBR_BAR:
4602 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4603 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4604 					pgprot_noncached(vma->vm_page_prot),
4605 					rdma_entry);
4606 		break;
4607 	case BNXT_RE_MMAP_DBR_PAGE:
4608 	case BNXT_RE_MMAP_TOGGLE_PAGE:
4609 		/* Driver doesn't expect write access for user space */
4610 		if (vma->vm_flags & VM_WRITE)
4611 			ret = -EFAULT;
4612 		else
4613 			ret = vm_insert_page(vma, vma->vm_start,
4614 					     virt_to_page((void *)bnxt_entry->mem_offset));
4615 		break;
4616 	default:
4617 		ret = -EINVAL;
4618 		break;
4619 	}
4620 
4621 	rdma_user_mmap_entry_put(rdma_entry);
4622 	return ret;
4623 }
4624 
4625 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4626 {
4627 	struct bnxt_re_user_mmap_entry *bnxt_entry;
4628 
4629 	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4630 				  rdma_entry);
4631 
4632 	kfree(bnxt_entry);
4633 }
4634 
4635 int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags,
4636 			u32 port_num, const struct ib_wc *in_wc,
4637 			const struct ib_grh *in_grh,
4638 			const struct ib_mad *in_mad, struct ib_mad *out_mad,
4639 			size_t *out_mad_size, u16 *out_mad_pkey_index)
4640 {
4641 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4642 	struct ib_class_port_info cpi = {};
4643 	int ret = IB_MAD_RESULT_SUCCESS;
4644 	int rc = 0;
4645 
4646 	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
4647 		return ret;
4648 
4649 	switch (in_mad->mad_hdr.attr_id) {
4650 	case IB_PMA_CLASS_PORT_INFO:
4651 		cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4652 		memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
4653 		break;
4654 	case IB_PMA_PORT_COUNTERS_EXT:
4655 		rc = bnxt_re_assign_pma_port_ext_counters(rdev, out_mad);
4656 		break;
4657 	case IB_PMA_PORT_COUNTERS:
4658 		rc = bnxt_re_assign_pma_port_counters(rdev, out_mad);
4659 		break;
4660 	default:
4661 		rc = -EINVAL;
4662 		break;
4663 	}
4664 	if (rc)
4665 		return IB_MAD_RESULT_FAILURE;
4666 	ret |= IB_MAD_RESULT_REPLY;
4667 	return ret;
4668 }
4669 
4670 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4671 {
4672 	struct bnxt_re_ucontext *uctx;
4673 
4674 	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4675 	bnxt_re_pacing_alert(uctx->rdev);
4676 	return 0;
4677 }
4678 
4679 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4680 {
4681 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4682 	enum bnxt_re_alloc_page_type alloc_type;
4683 	struct bnxt_re_user_mmap_entry *entry;
4684 	enum bnxt_re_mmap_flag mmap_flag;
4685 	struct bnxt_qplib_chip_ctx *cctx;
4686 	struct bnxt_re_ucontext *uctx;
4687 	struct bnxt_re_dev *rdev;
4688 	u64 mmap_offset;
4689 	u32 length;
4690 	u32 dpi;
4691 	u64 addr;
4692 	int err;
4693 
4694 	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4695 	if (IS_ERR(uctx))
4696 		return PTR_ERR(uctx);
4697 
4698 	err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4699 	if (err)
4700 		return err;
4701 
4702 	rdev = uctx->rdev;
4703 	cctx = rdev->chip_ctx;
4704 
4705 	switch (alloc_type) {
4706 	case BNXT_RE_ALLOC_WC_PAGE:
4707 		if (cctx->modes.db_push)  {
4708 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4709 						 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4710 				return -ENOMEM;
4711 			length = PAGE_SIZE;
4712 			dpi = uctx->wcdpi.dpi;
4713 			addr = (u64)uctx->wcdpi.umdbr;
4714 			mmap_flag = BNXT_RE_MMAP_WC_DB;
4715 		} else {
4716 			return -EINVAL;
4717 		}
4718 
4719 		break;
4720 	case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4721 		length = PAGE_SIZE;
4722 		addr = (u64)rdev->pacing.dbr_bar_addr;
4723 		mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4724 		break;
4725 
4726 	case BNXT_RE_ALLOC_DBR_PAGE:
4727 		length = PAGE_SIZE;
4728 		addr = (u64)rdev->pacing.dbr_page;
4729 		mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4730 		break;
4731 
4732 	default:
4733 		return -EOPNOTSUPP;
4734 	}
4735 
4736 	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4737 	if (!entry)
4738 		return -ENOMEM;
4739 
4740 	uobj->object = entry;
4741 	uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4742 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4743 			     &mmap_offset, sizeof(mmap_offset));
4744 	if (err)
4745 		return err;
4746 
4747 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4748 			     &length, sizeof(length));
4749 	if (err)
4750 		return err;
4751 
4752 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4753 			     &dpi, sizeof(length));
4754 	if (err)
4755 		return err;
4756 
4757 	return 0;
4758 }
4759 
4760 static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4761 				  enum rdma_remove_reason why,
4762 			    struct uverbs_attr_bundle *attrs)
4763 {
4764 	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4765 	struct bnxt_re_ucontext *uctx = entry->uctx;
4766 
4767 	switch (entry->mmap_flag) {
4768 	case BNXT_RE_MMAP_WC_DB:
4769 		if (uctx && uctx->wcdpi.dbr) {
4770 			struct bnxt_re_dev *rdev = uctx->rdev;
4771 
4772 			bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4773 			uctx->wcdpi.dbr = NULL;
4774 		}
4775 		break;
4776 	case BNXT_RE_MMAP_DBR_BAR:
4777 	case BNXT_RE_MMAP_DBR_PAGE:
4778 		break;
4779 	default:
4780 		goto exit;
4781 	}
4782 	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4783 exit:
4784 	return 0;
4785 }
4786 
4787 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4788 			    UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4789 					    BNXT_RE_OBJECT_ALLOC_PAGE,
4790 					    UVERBS_ACCESS_NEW,
4791 					    UA_MANDATORY),
4792 			    UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4793 						 enum bnxt_re_alloc_page_type,
4794 						 UA_MANDATORY),
4795 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4796 						UVERBS_ATTR_TYPE(u64),
4797 						UA_MANDATORY),
4798 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4799 						UVERBS_ATTR_TYPE(u32),
4800 						UA_MANDATORY),
4801 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4802 						UVERBS_ATTR_TYPE(u32),
4803 						UA_MANDATORY));
4804 
4805 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4806 				    UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4807 						    BNXT_RE_OBJECT_ALLOC_PAGE,
4808 						    UVERBS_ACCESS_DESTROY,
4809 						    UA_MANDATORY));
4810 
4811 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4812 			    UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4813 			    &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4814 			    &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4815 
4816 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4817 
4818 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4819 			      &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4820 
4821 /* Toggle MEM */
4822 static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4823 {
4824 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4825 	enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4826 	enum bnxt_re_get_toggle_mem_type res_type;
4827 	struct bnxt_re_user_mmap_entry *entry;
4828 	struct bnxt_re_ucontext *uctx;
4829 	struct ib_ucontext *ib_uctx;
4830 	struct bnxt_re_dev *rdev;
4831 	struct bnxt_re_srq *srq;
4832 	u32 length = PAGE_SIZE;
4833 	struct bnxt_re_cq *cq;
4834 	u64 mem_offset;
4835 	u32 offset = 0;
4836 	u64 addr = 0;
4837 	u32 res_id;
4838 	int err;
4839 
4840 	ib_uctx = ib_uverbs_get_ucontext(attrs);
4841 	if (IS_ERR(ib_uctx))
4842 		return PTR_ERR(ib_uctx);
4843 
4844 	err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4845 	if (err)
4846 		return err;
4847 
4848 	uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4849 	rdev = uctx->rdev;
4850 	err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4851 	if (err)
4852 		return err;
4853 
4854 	switch (res_type) {
4855 	case BNXT_RE_CQ_TOGGLE_MEM:
4856 		cq = bnxt_re_search_for_cq(rdev, res_id);
4857 		if (!cq)
4858 			return -EINVAL;
4859 
4860 		addr = (u64)cq->uctx_cq_page;
4861 		break;
4862 	case BNXT_RE_SRQ_TOGGLE_MEM:
4863 		srq = bnxt_re_search_for_srq(rdev, res_id);
4864 		if (!srq)
4865 			return -EINVAL;
4866 
4867 		addr = (u64)srq->uctx_srq_page;
4868 		break;
4869 
4870 	default:
4871 		return -EOPNOTSUPP;
4872 	}
4873 
4874 	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4875 	if (!entry)
4876 		return -ENOMEM;
4877 
4878 	uobj->object = entry;
4879 	uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4880 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4881 			     &mem_offset, sizeof(mem_offset));
4882 	if (err)
4883 		return err;
4884 
4885 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4886 			     &length, sizeof(length));
4887 	if (err)
4888 		return err;
4889 
4890 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4891 			     &offset, sizeof(offset));
4892 	if (err)
4893 		return err;
4894 
4895 	return 0;
4896 }
4897 
4898 static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4899 				      enum rdma_remove_reason why,
4900 				      struct uverbs_attr_bundle *attrs)
4901 {
4902 	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4903 
4904 	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4905 	return 0;
4906 }
4907 
4908 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4909 			    UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4910 					    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4911 					    UVERBS_ACCESS_NEW,
4912 					    UA_MANDATORY),
4913 			    UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4914 						 enum bnxt_re_get_toggle_mem_type,
4915 						 UA_MANDATORY),
4916 			    UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4917 					       UVERBS_ATTR_TYPE(u32),
4918 					       UA_MANDATORY),
4919 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4920 						UVERBS_ATTR_TYPE(u64),
4921 						UA_MANDATORY),
4922 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4923 						UVERBS_ATTR_TYPE(u32),
4924 						UA_MANDATORY),
4925 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4926 						UVERBS_ATTR_TYPE(u32),
4927 						UA_MANDATORY));
4928 
4929 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4930 				    UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4931 						    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4932 						    UVERBS_ACCESS_DESTROY,
4933 						    UA_MANDATORY));
4934 
4935 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4936 			    UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4937 			    &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4938 			    &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4939 
4940 const struct uapi_definition bnxt_re_uapi_defs[] = {
4941 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4942 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4943 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
4944 	{}
4945 };
4946