xref: /linux/drivers/infiniband/hw/bnxt_re/ib_verbs.c (revision 17d85f33a83b84e7d36bc3356614ae06c90e7a08)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
45 
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/ib_pma.h>
53 #include <rdma/uverbs_ioctl.h>
54 #include <linux/hashtable.h>
55 
56 #include "roce_hsi.h"
57 #include "qplib_res.h"
58 #include "qplib_sp.h"
59 #include "qplib_fp.h"
60 #include "qplib_rcfw.h"
61 
62 #include "bnxt_re.h"
63 #include "ib_verbs.h"
64 #include "debugfs.h"
65 
66 #include <rdma/uverbs_types.h>
67 #include <rdma/uverbs_std_types.h>
68 
69 #include <rdma/ib_user_ioctl_cmds.h>
70 
71 #define UVERBS_MODULE_NAME bnxt_re
72 #include <rdma/uverbs_named_ioctl.h>
73 
74 #include <rdma/bnxt_re-abi.h>
75 
__from_ib_access_flags(int iflags)76 static int __from_ib_access_flags(int iflags)
77 {
78 	int qflags = 0;
79 
80 	if (iflags & IB_ACCESS_LOCAL_WRITE)
81 		qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82 	if (iflags & IB_ACCESS_REMOTE_READ)
83 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84 	if (iflags & IB_ACCESS_REMOTE_WRITE)
85 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87 		qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88 	if (iflags & IB_ACCESS_MW_BIND)
89 		qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90 	if (iflags & IB_ZERO_BASED)
91 		qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92 	if (iflags & IB_ACCESS_ON_DEMAND)
93 		qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
94 	return qflags;
95 };
96 
__to_ib_access_flags(int qflags)97 static int __to_ib_access_flags(int qflags)
98 {
99 	int iflags = 0;
100 
101 	if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102 		iflags |= IB_ACCESS_LOCAL_WRITE;
103 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104 		iflags |= IB_ACCESS_REMOTE_WRITE;
105 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106 		iflags |= IB_ACCESS_REMOTE_READ;
107 	if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
109 	if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110 		iflags |= IB_ACCESS_MW_BIND;
111 	if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112 		iflags |= IB_ZERO_BASED;
113 	if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114 		iflags |= IB_ACCESS_ON_DEMAND;
115 	return iflags;
116 }
117 
__qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx * cctx,int iflags)118 static u8 __qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx *cctx, int iflags)
119 {
120 	u8 qflags = 0;
121 
122 	if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
123 		/* For Wh+ */
124 		return (u8)__from_ib_access_flags(iflags);
125 
126 	/* For P5, P7 and later chips */
127 	if (iflags & IB_ACCESS_LOCAL_WRITE)
128 		qflags |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
129 	if (iflags & IB_ACCESS_REMOTE_WRITE)
130 		qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
131 	if (iflags & IB_ACCESS_REMOTE_READ)
132 		qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
133 	if (iflags & IB_ACCESS_REMOTE_ATOMIC)
134 		qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC;
135 
136 	return qflags;
137 }
138 
__qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx * cctx,u8 qflags)139 static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
140 {
141 	int iflags = 0;
142 
143 	if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
144 		/* For Wh+ */
145 		return __to_ib_access_flags(qflags);
146 
147 	/* For P5, P7 and later chips */
148 	if (qflags & CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE)
149 		iflags |= IB_ACCESS_LOCAL_WRITE;
150 	if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE)
151 		iflags |= IB_ACCESS_REMOTE_WRITE;
152 	if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_READ)
153 		iflags |= IB_ACCESS_REMOTE_READ;
154 	if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC)
155 		iflags |= IB_ACCESS_REMOTE_ATOMIC;
156 
157 	return iflags;
158 }
159 
bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev * rdev,struct bnxt_qplib_mrw * qplib_mr)160 static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
161 						   struct bnxt_qplib_mrw *qplib_mr)
162 {
163 	if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
164 	    pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
165 		qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
166 }
167 
bnxt_re_build_sgl(struct ib_sge * ib_sg_list,struct bnxt_qplib_sge * sg_list,int num)168 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
169 			     struct bnxt_qplib_sge *sg_list, int num)
170 {
171 	int i, total = 0;
172 
173 	for (i = 0; i < num; i++) {
174 		sg_list[i].addr = ib_sg_list[i].addr;
175 		sg_list[i].lkey = ib_sg_list[i].lkey;
176 		sg_list[i].size = ib_sg_list[i].length;
177 		total += sg_list[i].size;
178 	}
179 	return total;
180 }
181 
182 /* Device */
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)183 int bnxt_re_query_device(struct ib_device *ibdev,
184 			 struct ib_device_attr *ib_attr,
185 			 struct ib_udata *udata)
186 {
187 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
188 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
189 
190 	memset(ib_attr, 0, sizeof(*ib_attr));
191 	memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
192 	       min(sizeof(dev_attr->fw_ver),
193 		   sizeof(ib_attr->fw_ver)));
194 	addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
195 			    rdev->netdev->dev_addr);
196 	ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
197 	ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
198 
199 	ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
200 	ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
201 	ib_attr->hw_ver = rdev->en_dev->pdev->revision;
202 	ib_attr->max_qp = dev_attr->max_qp;
203 	ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
204 	ib_attr->device_cap_flags =
205 				    IB_DEVICE_CURR_QP_STATE_MOD
206 				    | IB_DEVICE_RC_RNR_NAK_GEN
207 				    | IB_DEVICE_SHUTDOWN_PORT
208 				    | IB_DEVICE_SYS_IMAGE_GUID
209 				    | IB_DEVICE_RESIZE_MAX_WR
210 				    | IB_DEVICE_PORT_ACTIVE_EVENT
211 				    | IB_DEVICE_N_NOTIFY_CQ
212 				    | IB_DEVICE_MEM_WINDOW
213 				    | IB_DEVICE_MEM_WINDOW_TYPE_2B
214 				    | IB_DEVICE_MEM_MGT_EXTENSIONS;
215 	ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
216 	ib_attr->max_send_sge = dev_attr->max_qp_sges;
217 	ib_attr->max_recv_sge = dev_attr->max_qp_sges;
218 	ib_attr->max_sge_rd = dev_attr->max_qp_sges;
219 	ib_attr->max_cq = dev_attr->max_cq;
220 	ib_attr->max_cqe = dev_attr->max_cq_wqes;
221 	ib_attr->max_mr = dev_attr->max_mr;
222 	ib_attr->max_pd = dev_attr->max_pd;
223 	ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
224 	ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
225 	ib_attr->atomic_cap = IB_ATOMIC_NONE;
226 	ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
227 	if (dev_attr->is_atomic) {
228 		ib_attr->atomic_cap = IB_ATOMIC_GLOB;
229 		ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
230 	}
231 
232 	ib_attr->max_ee_rd_atom = 0;
233 	ib_attr->max_res_rd_atom = 0;
234 	ib_attr->max_ee_init_rd_atom = 0;
235 	ib_attr->max_ee = 0;
236 	ib_attr->max_rdd = 0;
237 	ib_attr->max_mw = dev_attr->max_mw;
238 	ib_attr->max_raw_ipv6_qp = 0;
239 	ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
240 	ib_attr->max_mcast_grp = 0;
241 	ib_attr->max_mcast_qp_attach = 0;
242 	ib_attr->max_total_mcast_qp_attach = 0;
243 	ib_attr->max_ah = dev_attr->max_ah;
244 
245 	ib_attr->max_srq = dev_attr->max_srq;
246 	ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
247 	ib_attr->max_srq_sge = dev_attr->max_srq_sges;
248 
249 	ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
250 
251 	ib_attr->max_pkeys = 1;
252 	ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
253 	return 0;
254 }
255 
bnxt_re_modify_device(struct ib_device * ibdev,int device_modify_mask,struct ib_device_modify * device_modify)256 int bnxt_re_modify_device(struct ib_device *ibdev,
257 			  int device_modify_mask,
258 			  struct ib_device_modify *device_modify)
259 {
260 	ibdev_dbg(ibdev, "Modify device with mask 0x%x", device_modify_mask);
261 
262 	if (device_modify_mask & ~IB_DEVICE_MODIFY_NODE_DESC)
263 		return -EOPNOTSUPP;
264 
265 	if (!(device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC))
266 		return 0;
267 
268 	memcpy(ibdev->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX);
269 	return 0;
270 }
271 
272 /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * port_attr)273 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
274 		       struct ib_port_attr *port_attr)
275 {
276 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
277 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
278 	int rc;
279 
280 	memset(port_attr, 0, sizeof(*port_attr));
281 
282 	if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
283 		port_attr->state = IB_PORT_ACTIVE;
284 		port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
285 	} else {
286 		port_attr->state = IB_PORT_DOWN;
287 		port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
288 	}
289 	port_attr->max_mtu = IB_MTU_4096;
290 	port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
291 	/* One GID is reserved for RawEth QP. Report one less */
292 	port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
293 				  dev_attr->max_sgid);
294 	port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
295 				    IB_PORT_DEVICE_MGMT_SUP |
296 				    IB_PORT_VENDOR_CLASS_SUP;
297 	port_attr->ip_gids = true;
298 
299 	port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
300 	port_attr->bad_pkey_cntr = 0;
301 	port_attr->qkey_viol_cntr = 0;
302 	port_attr->pkey_tbl_len = dev_attr->max_pkey;
303 	port_attr->lid = 0;
304 	port_attr->sm_lid = 0;
305 	port_attr->lmc = 0;
306 	port_attr->max_vl_num = 4;
307 	port_attr->sm_sl = 0;
308 	port_attr->subnet_timeout = 0;
309 	port_attr->init_type_reply = 0;
310 	rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
311 			      &port_attr->active_width);
312 
313 	return rc;
314 }
315 
bnxt_re_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)316 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
317 			       struct ib_port_immutable *immutable)
318 {
319 	struct ib_port_attr port_attr;
320 
321 	if (bnxt_re_query_port(ibdev, port_num, &port_attr))
322 		return -EINVAL;
323 
324 	immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
325 	immutable->gid_tbl_len = port_attr.gid_tbl_len;
326 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
327 	immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
328 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
329 	return 0;
330 }
331 
bnxt_re_query_fw_str(struct ib_device * ibdev,char * str)332 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
333 {
334 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
335 
336 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
337 		 rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
338 		 rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
339 }
340 
bnxt_re_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)341 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
342 		       u16 index, u16 *pkey)
343 {
344 	if (index > 0)
345 		return -EINVAL;
346 
347 	*pkey = IB_DEFAULT_PKEY_FULL;
348 
349 	return 0;
350 }
351 
bnxt_re_query_gid(struct ib_device * ibdev,u32 port_num,int index,union ib_gid * gid)352 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
353 		      int index, union ib_gid *gid)
354 {
355 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
356 	int rc;
357 
358 	/* Ignore port_num */
359 	memset(gid, 0, sizeof(*gid));
360 	rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
361 				 &rdev->qplib_res.sgid_tbl, index,
362 				 (struct bnxt_qplib_gid *)gid);
363 	return rc;
364 }
365 
bnxt_re_del_gid(const struct ib_gid_attr * attr,void ** context)366 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
367 {
368 	int rc = 0;
369 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
370 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
371 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
372 	struct bnxt_qplib_gid *gid_to_del;
373 	u16 vlan_id = 0xFFFF;
374 
375 	/* Delete the entry from the hardware */
376 	ctx = *context;
377 	if (!ctx)
378 		return -EINVAL;
379 
380 	if (sgid_tbl->active) {
381 		if (ctx->idx >= sgid_tbl->max)
382 			return -EINVAL;
383 		gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
384 		vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
385 		/* DEL_GID is called in WQ context(netdevice_event_work_handler)
386 		 * or via the ib_unregister_device path. In the former case QP1
387 		 * may not be destroyed yet, in which case just return as FW
388 		 * needs that entry to be present and will fail it's deletion.
389 		 * We could get invoked again after QP1 is destroyed OR get an
390 		 * ADD_GID call with a different GID value for the same index
391 		 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
392 		 */
393 		if (ctx->idx == 0 &&
394 		    rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
395 		    ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
396 			ibdev_dbg(&rdev->ibdev,
397 				  "Trying to delete GID0 while QP1 is alive\n");
398 			return -EFAULT;
399 		}
400 		ctx->refcnt--;
401 		if (!ctx->refcnt) {
402 			rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
403 						 vlan_id,  true);
404 			if (rc) {
405 				ibdev_err(&rdev->ibdev,
406 					  "Failed to remove GID: %#x", rc);
407 			} else {
408 				ctx_tbl = sgid_tbl->ctx;
409 				ctx_tbl[ctx->idx] = NULL;
410 				kfree(ctx);
411 			}
412 		}
413 	} else {
414 		return -EINVAL;
415 	}
416 	return rc;
417 }
418 
bnxt_re_add_gid(const struct ib_gid_attr * attr,void ** context)419 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
420 {
421 	int rc;
422 	u32 tbl_idx = 0;
423 	u16 vlan_id = 0xFFFF;
424 	struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
425 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
426 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
427 
428 	rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
429 	if (rc)
430 		return rc;
431 
432 	rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
433 				 rdev->qplib_res.netdev->dev_addr,
434 				 vlan_id, true, &tbl_idx, false, 0);
435 	if (rc == -EALREADY) {
436 		ctx_tbl = sgid_tbl->ctx;
437 		ctx_tbl[tbl_idx]->refcnt++;
438 		*context = ctx_tbl[tbl_idx];
439 		return 0;
440 	}
441 
442 	if (rc < 0) {
443 		ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
444 		return rc;
445 	}
446 
447 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
448 	if (!ctx)
449 		return -ENOMEM;
450 	ctx_tbl = sgid_tbl->ctx;
451 	ctx->idx = tbl_idx;
452 	ctx->refcnt = 1;
453 	ctx_tbl[tbl_idx] = ctx;
454 	*context = ctx;
455 
456 	return rc;
457 }
458 
bnxt_re_get_link_layer(struct ib_device * ibdev,u32 port_num)459 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
460 					    u32 port_num)
461 {
462 	return IB_LINK_LAYER_ETHERNET;
463 }
464 
465 #define	BNXT_RE_FENCE_PBL_SIZE	DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
466 
bnxt_re_create_fence_wqe(struct bnxt_re_pd * pd)467 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
468 {
469 	struct bnxt_re_fence_data *fence = &pd->fence;
470 	struct ib_mr *ib_mr = &fence->mr->ib_mr;
471 	struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
472 	struct bnxt_re_dev *rdev = pd->rdev;
473 
474 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
475 		return;
476 
477 	memset(wqe, 0, sizeof(*wqe));
478 	wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
479 	wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
480 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
481 	wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
482 	wqe->bind.zero_based = false;
483 	wqe->bind.parent_l_key = ib_mr->lkey;
484 	wqe->bind.va = (u64)(unsigned long)fence->va;
485 	wqe->bind.length = fence->size;
486 	wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
487 	wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
488 
489 	/* Save the initial rkey in fence structure for now;
490 	 * wqe->bind.r_key will be set at (re)bind time.
491 	 */
492 	fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
493 }
494 
bnxt_re_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)495 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
496 {
497 	struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
498 					     qplib_qp);
499 	struct ib_pd *ib_pd = qp->ib_qp.pd;
500 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
501 	struct bnxt_re_fence_data *fence = &pd->fence;
502 	struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
503 	struct bnxt_qplib_swqe wqe;
504 	int rc;
505 
506 	memcpy(&wqe, fence_wqe, sizeof(wqe));
507 	wqe.bind.r_key = fence->bind_rkey;
508 	fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
509 
510 	ibdev_dbg(&qp->rdev->ibdev,
511 		  "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
512 		wqe.bind.r_key, qp->qplib_qp.id, pd);
513 	rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
514 	if (rc) {
515 		ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
516 		return rc;
517 	}
518 	bnxt_qplib_post_send_db(&qp->qplib_qp);
519 
520 	return rc;
521 }
522 
bnxt_re_destroy_fence_mr(struct bnxt_re_pd * pd)523 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
524 {
525 	struct bnxt_re_fence_data *fence = &pd->fence;
526 	struct bnxt_re_dev *rdev = pd->rdev;
527 	struct device *dev = &rdev->en_dev->pdev->dev;
528 	struct bnxt_re_mr *mr = fence->mr;
529 
530 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
531 		return;
532 
533 	if (fence->mw) {
534 		bnxt_re_dealloc_mw(fence->mw);
535 		fence->mw = NULL;
536 	}
537 	if (mr) {
538 		if (mr->ib_mr.rkey)
539 			bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
540 					     true);
541 		if (mr->ib_mr.lkey)
542 			bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
543 		kfree(mr);
544 		fence->mr = NULL;
545 	}
546 	if (fence->dma_addr) {
547 		dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
548 				 DMA_BIDIRECTIONAL);
549 		fence->dma_addr = 0;
550 	}
551 }
552 
bnxt_re_create_fence_mr(struct bnxt_re_pd * pd)553 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
554 {
555 	int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
556 	struct bnxt_re_fence_data *fence = &pd->fence;
557 	struct bnxt_re_dev *rdev = pd->rdev;
558 	struct device *dev = &rdev->en_dev->pdev->dev;
559 	struct bnxt_re_mr *mr = NULL;
560 	dma_addr_t dma_addr = 0;
561 	struct ib_mw *mw;
562 	int rc;
563 
564 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
565 		return 0;
566 
567 	dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
568 				  DMA_BIDIRECTIONAL);
569 	rc = dma_mapping_error(dev, dma_addr);
570 	if (rc) {
571 		ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
572 		rc = -EIO;
573 		fence->dma_addr = 0;
574 		goto fail;
575 	}
576 	fence->dma_addr = dma_addr;
577 
578 	/* Allocate a MR */
579 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
580 	if (!mr) {
581 		rc = -ENOMEM;
582 		goto fail;
583 	}
584 	fence->mr = mr;
585 	mr->rdev = rdev;
586 	mr->qplib_mr.pd = &pd->qplib_pd;
587 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
588 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
589 	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
590 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
591 		if (rc) {
592 			ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
593 			goto fail;
594 		}
595 
596 		/* Register MR */
597 		mr->ib_mr.lkey = mr->qplib_mr.lkey;
598 	} else {
599 		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
600 	}
601 	mr->qplib_mr.va = (u64)(unsigned long)fence->va;
602 	mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
603 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
604 			       BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
605 	if (rc) {
606 		ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
607 		goto fail;
608 	}
609 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
610 
611 	/* Create a fence MW only for kernel consumers */
612 	mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
613 	if (IS_ERR(mw)) {
614 		ibdev_err(&rdev->ibdev,
615 			  "Failed to create fence-MW for PD: %p\n", pd);
616 		rc = PTR_ERR(mw);
617 		goto fail;
618 	}
619 	fence->mw = mw;
620 
621 	bnxt_re_create_fence_wqe(pd);
622 	return 0;
623 
624 fail:
625 	bnxt_re_destroy_fence_mr(pd);
626 	return rc;
627 }
628 
629 static struct bnxt_re_user_mmap_entry*
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext * uctx,u64 mem_offset,enum bnxt_re_mmap_flag mmap_flag,u64 * offset)630 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
631 			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
632 {
633 	struct bnxt_re_user_mmap_entry *entry;
634 	int ret;
635 
636 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
637 	if (!entry)
638 		return NULL;
639 
640 	entry->mem_offset = mem_offset;
641 	entry->mmap_flag = mmap_flag;
642 	entry->uctx = uctx;
643 
644 	switch (mmap_flag) {
645 	case BNXT_RE_MMAP_SH_PAGE:
646 		ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
647 							&entry->rdma_entry, PAGE_SIZE, 0);
648 		break;
649 	case BNXT_RE_MMAP_UC_DB:
650 	case BNXT_RE_MMAP_WC_DB:
651 	case BNXT_RE_MMAP_DBR_BAR:
652 	case BNXT_RE_MMAP_DBR_PAGE:
653 	case BNXT_RE_MMAP_TOGGLE_PAGE:
654 		ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
655 						  &entry->rdma_entry, PAGE_SIZE);
656 		break;
657 	default:
658 		ret = -EINVAL;
659 		break;
660 	}
661 
662 	if (ret) {
663 		kfree(entry);
664 		return NULL;
665 	}
666 	if (offset)
667 		*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
668 
669 	return entry;
670 }
671 
672 /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)673 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
674 {
675 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
676 	struct bnxt_re_dev *rdev = pd->rdev;
677 
678 	if (udata) {
679 		rdma_user_mmap_entry_remove(pd->pd_db_mmap);
680 		pd->pd_db_mmap = NULL;
681 	}
682 
683 	bnxt_re_destroy_fence_mr(pd);
684 
685 	if (pd->qplib_pd.id) {
686 		if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
687 					   &rdev->qplib_res.pd_tbl,
688 					   &pd->qplib_pd))
689 			atomic_dec(&rdev->stats.res.pd_count);
690 	}
691 	return 0;
692 }
693 
bnxt_re_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)694 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
695 {
696 	struct ib_device *ibdev = ibpd->device;
697 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
698 	struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
699 		udata, struct bnxt_re_ucontext, ib_uctx);
700 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
701 	struct bnxt_re_user_mmap_entry *entry = NULL;
702 	u32 active_pds;
703 	int rc = 0;
704 
705 	pd->rdev = rdev;
706 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
707 		ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
708 		rc = -ENOMEM;
709 		goto fail;
710 	}
711 
712 	if (udata) {
713 		struct bnxt_re_pd_resp resp = {};
714 
715 		if (!ucntx->dpi.dbr) {
716 			/* Allocate DPI in alloc_pd to avoid failing of
717 			 * ibv_devinfo and family of application when DPIs
718 			 * are depleted.
719 			 */
720 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
721 						 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
722 				rc = -ENOMEM;
723 				goto dbfail;
724 			}
725 		}
726 
727 		resp.pdid = pd->qplib_pd.id;
728 		/* Still allow mapping this DBR to the new user PD. */
729 		resp.dpi = ucntx->dpi.dpi;
730 
731 		entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
732 						  BNXT_RE_MMAP_UC_DB, &resp.dbr);
733 
734 		if (!entry) {
735 			rc = -ENOMEM;
736 			goto dbfail;
737 		}
738 
739 		pd->pd_db_mmap = &entry->rdma_entry;
740 
741 		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
742 		if (rc) {
743 			rdma_user_mmap_entry_remove(pd->pd_db_mmap);
744 			rc = -EFAULT;
745 			goto dbfail;
746 		}
747 	}
748 
749 	if (!udata)
750 		if (bnxt_re_create_fence_mr(pd))
751 			ibdev_warn(&rdev->ibdev,
752 				   "Failed to create Fence-MR\n");
753 	active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
754 	if (active_pds > rdev->stats.res.pd_watermark)
755 		rdev->stats.res.pd_watermark = active_pds;
756 
757 	return 0;
758 dbfail:
759 	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
760 			      &pd->qplib_pd);
761 fail:
762 	return rc;
763 }
764 
765 /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)766 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
767 {
768 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
769 	struct bnxt_re_dev *rdev = ah->rdev;
770 	bool block = true;
771 	int rc;
772 
773 	block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
774 	rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
775 	if (BNXT_RE_CHECK_RC(rc)) {
776 		if (rc == -ETIMEDOUT)
777 			rc = 0;
778 		else
779 			goto fail;
780 	}
781 	atomic_dec(&rdev->stats.res.ah_count);
782 fail:
783 	return rc;
784 }
785 
bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)786 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
787 {
788 	u8 nw_type;
789 
790 	switch (ntype) {
791 	case RDMA_NETWORK_IPV4:
792 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
793 		break;
794 	case RDMA_NETWORK_IPV6:
795 		nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
796 		break;
797 	default:
798 		nw_type = CMDQ_CREATE_AH_TYPE_V1;
799 		break;
800 	}
801 	return nw_type;
802 }
803 
bnxt_re_create_ah(struct ib_ah * ib_ah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)804 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
805 		      struct ib_udata *udata)
806 {
807 	struct ib_pd *ib_pd = ib_ah->pd;
808 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
809 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
810 	const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
811 	struct bnxt_re_dev *rdev = pd->rdev;
812 	const struct ib_gid_attr *sgid_attr;
813 	struct bnxt_re_gid_ctx *ctx;
814 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
815 	u32 active_ahs;
816 	u8 nw_type;
817 	int rc;
818 
819 	if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
820 		ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
821 		return -EINVAL;
822 	}
823 
824 	ah->rdev = rdev;
825 	ah->qplib_ah.pd = &pd->qplib_pd;
826 
827 	/* Supply the configuration for the HW */
828 	memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
829 	       sizeof(union ib_gid));
830 	sgid_attr = grh->sgid_attr;
831 	/* Get the HW context of the GID. The reference
832 	 * of GID table entry is already taken by the caller.
833 	 */
834 	ctx = rdma_read_gid_hw_context(sgid_attr);
835 	ah->qplib_ah.sgid_index = ctx->idx;
836 	ah->qplib_ah.host_sgid_index = grh->sgid_index;
837 	ah->qplib_ah.traffic_class = grh->traffic_class;
838 	ah->qplib_ah.flow_label = grh->flow_label;
839 	ah->qplib_ah.hop_limit = grh->hop_limit;
840 	ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
841 
842 	/* Get network header type for this GID */
843 	nw_type = rdma_gid_attr_network_type(sgid_attr);
844 	ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
845 
846 	memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
847 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
848 				  !(init_attr->flags &
849 				    RDMA_CREATE_AH_SLEEPABLE));
850 	if (rc) {
851 		ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
852 		return rc;
853 	}
854 
855 	/* Write AVID to shared page. */
856 	if (udata) {
857 		struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
858 			udata, struct bnxt_re_ucontext, ib_uctx);
859 		unsigned long flag;
860 		u32 *wrptr;
861 
862 		spin_lock_irqsave(&uctx->sh_lock, flag);
863 		wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
864 		*wrptr = ah->qplib_ah.id;
865 		wmb(); /* make sure cache is updated. */
866 		spin_unlock_irqrestore(&uctx->sh_lock, flag);
867 	}
868 	active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
869 	if (active_ahs > rdev->stats.res.ah_watermark)
870 		rdev->stats.res.ah_watermark = active_ahs;
871 
872 	return 0;
873 }
874 
bnxt_re_query_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)875 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
876 {
877 	struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
878 
879 	ah_attr->type = ib_ah->type;
880 	rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
881 	memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
882 	rdma_ah_set_grh(ah_attr, NULL, 0,
883 			ah->qplib_ah.host_sgid_index,
884 			0, ah->qplib_ah.traffic_class);
885 	rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
886 	rdma_ah_set_port_num(ah_attr, 1);
887 	rdma_ah_set_static_rate(ah_attr, 0);
888 	return 0;
889 }
890 
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)891 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
892 	__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
893 {
894 	unsigned long flags;
895 
896 	spin_lock_irqsave(&qp->scq->cq_lock, flags);
897 	if (qp->rcq != qp->scq)
898 		spin_lock(&qp->rcq->cq_lock);
899 	else
900 		__acquire(&qp->rcq->cq_lock);
901 
902 	return flags;
903 }
904 
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)905 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
906 			unsigned long flags)
907 	__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
908 {
909 	if (qp->rcq != qp->scq)
910 		spin_unlock(&qp->rcq->cq_lock);
911 	else
912 		__release(&qp->rcq->cq_lock);
913 	spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
914 }
915 
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)916 static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
917 {
918 	struct bnxt_re_qp *gsi_sqp;
919 	struct bnxt_re_ah *gsi_sah;
920 	struct bnxt_re_dev *rdev;
921 	int rc;
922 
923 	rdev = qp->rdev;
924 	gsi_sqp = rdev->gsi_ctx.gsi_sqp;
925 	gsi_sah = rdev->gsi_ctx.gsi_sah;
926 
927 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
928 	bnxt_qplib_destroy_ah(&rdev->qplib_res,
929 			      &gsi_sah->qplib_ah,
930 			      true);
931 	atomic_dec(&rdev->stats.res.ah_count);
932 	bnxt_qplib_clean_qp(&qp->qplib_qp);
933 
934 	ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
935 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
936 	if (rc)
937 		ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
938 
939 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
940 
941 	/* remove from active qp list */
942 	mutex_lock(&rdev->qp_lock);
943 	list_del(&gsi_sqp->list);
944 	mutex_unlock(&rdev->qp_lock);
945 	atomic_dec(&rdev->stats.res.qp_count);
946 
947 	kfree(rdev->gsi_ctx.sqp_tbl);
948 	kfree(gsi_sah);
949 	kfree(gsi_sqp);
950 	rdev->gsi_ctx.gsi_sqp = NULL;
951 	rdev->gsi_ctx.gsi_sah = NULL;
952 	rdev->gsi_ctx.sqp_tbl = NULL;
953 }
954 
bnxt_re_del_unique_gid(struct bnxt_re_dev * rdev)955 static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
956 {
957 	int rc;
958 
959 	if (!rdev->rcfw.roce_mirror)
960 		return;
961 
962 	rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
963 				 (struct bnxt_qplib_gid *)&rdev->ugid,
964 				 0xFFFF, true);
965 	if (rc)
966 		dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
967 }
968 
969 /* Queue Pairs */
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)970 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
971 {
972 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
973 	struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
974 	struct bnxt_re_dev *rdev = qp->rdev;
975 	struct bnxt_qplib_nq *scq_nq = NULL;
976 	struct bnxt_qplib_nq *rcq_nq = NULL;
977 	unsigned int flags;
978 	int rc;
979 
980 	bnxt_re_debug_rem_qpinfo(rdev, qp);
981 
982 	bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
983 
984 	rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
985 	if (rc)
986 		ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
987 
988 	if (rdma_is_kernel_res(&qp->ib_qp.res)) {
989 		flags = bnxt_re_lock_cqs(qp);
990 		bnxt_qplib_clean_qp(&qp->qplib_qp);
991 		bnxt_re_unlock_cqs(qp, flags);
992 	}
993 
994 	bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
995 
996 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
997 		bnxt_re_destroy_gsi_sqp(qp);
998 
999 	mutex_lock(&rdev->qp_lock);
1000 	list_del(&qp->list);
1001 	mutex_unlock(&rdev->qp_lock);
1002 	atomic_dec(&rdev->stats.res.qp_count);
1003 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
1004 		atomic_dec(&rdev->stats.res.rc_qp_count);
1005 	else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
1006 		atomic_dec(&rdev->stats.res.ud_qp_count);
1007 
1008 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
1009 		bnxt_re_del_unique_gid(rdev);
1010 
1011 	ib_umem_release(qp->rumem);
1012 	ib_umem_release(qp->sumem);
1013 
1014 	/* Flush all the entries of notification queue associated with
1015 	 * given qp.
1016 	 */
1017 	scq_nq = qplib_qp->scq->nq;
1018 	rcq_nq = qplib_qp->rcq->nq;
1019 	bnxt_re_synchronize_nq(scq_nq);
1020 	if (scq_nq != rcq_nq)
1021 		bnxt_re_synchronize_nq(rcq_nq);
1022 
1023 	return 0;
1024 }
1025 
__from_ib_qp_type(enum ib_qp_type type)1026 static u8 __from_ib_qp_type(enum ib_qp_type type)
1027 {
1028 	switch (type) {
1029 	case IB_QPT_GSI:
1030 		return CMDQ_CREATE_QP1_TYPE_GSI;
1031 	case IB_QPT_RC:
1032 		return CMDQ_CREATE_QP_TYPE_RC;
1033 	case IB_QPT_UD:
1034 		return CMDQ_CREATE_QP_TYPE_UD;
1035 	case IB_QPT_RAW_PACKET:
1036 		return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
1037 	default:
1038 		return IB_QPT_MAX;
1039 	}
1040 }
1041 
bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)1042 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
1043 				   int rsge, int max)
1044 {
1045 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1046 		rsge = max;
1047 	return bnxt_re_get_rwqe_size(rsge);
1048 }
1049 
bnxt_re_get_wqe_size(int ilsize,int nsge)1050 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
1051 {
1052 	u16 wqe_size, calc_ils;
1053 
1054 	wqe_size = bnxt_re_get_swqe_size(nsge);
1055 	if (ilsize) {
1056 		calc_ils = sizeof(struct sq_send_hdr) + ilsize;
1057 		wqe_size = max_t(u16, calc_ils, wqe_size);
1058 		wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
1059 	}
1060 	return wqe_size;
1061 }
1062 
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)1063 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
1064 				   struct ib_qp_init_attr *init_attr)
1065 {
1066 	struct bnxt_qplib_dev_attr *dev_attr;
1067 	struct bnxt_qplib_qp *qplqp;
1068 	struct bnxt_re_dev *rdev;
1069 	struct bnxt_qplib_q *sq;
1070 	int align, ilsize;
1071 
1072 	rdev = qp->rdev;
1073 	qplqp = &qp->qplib_qp;
1074 	sq = &qplqp->sq;
1075 	dev_attr = rdev->dev_attr;
1076 
1077 	align = sizeof(struct sq_send_hdr);
1078 	ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1079 
1080 	/* For gen p4 and gen p5 fixed wqe compatibility mode
1081 	 * wqe size is fixed to 128 bytes - ie 6 SGEs
1082 	 */
1083 	if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
1084 		sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
1085 		sq->max_sge = BNXT_STATIC_MAX_SGE;
1086 	} else {
1087 		sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
1088 		if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
1089 			return -EINVAL;
1090 	}
1091 
1092 	if (init_attr->cap.max_inline_data) {
1093 		qplqp->max_inline_data = sq->wqe_size -
1094 			sizeof(struct sq_send_hdr);
1095 		init_attr->cap.max_inline_data = qplqp->max_inline_data;
1096 	}
1097 
1098 	return 0;
1099 }
1100 
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct bnxt_re_ucontext * cntx,struct bnxt_re_qp_req * ureq)1101 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1102 				struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
1103 				struct bnxt_re_qp_req *ureq)
1104 {
1105 	struct bnxt_qplib_qp *qplib_qp;
1106 	int bytes = 0, psn_sz;
1107 	struct ib_umem *umem;
1108 	int psn_nume;
1109 
1110 	qplib_qp = &qp->qplib_qp;
1111 
1112 	bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1113 	/* Consider mapping PSN search memory only for RC QPs. */
1114 	if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1115 		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1116 						   sizeof(struct sq_psn_search_ext) :
1117 						   sizeof(struct sq_psn_search);
1118 		if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
1119 			psn_nume = ureq->sq_slots;
1120 		} else {
1121 			psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1122 			qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1123 				 sizeof(struct bnxt_qplib_sge));
1124 		}
1125 		if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
1126 			psn_nume = roundup_pow_of_two(psn_nume);
1127 		bytes += (psn_nume * psn_sz);
1128 	}
1129 
1130 	bytes = PAGE_ALIGN(bytes);
1131 	umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
1132 			   IB_ACCESS_LOCAL_WRITE);
1133 	if (IS_ERR(umem))
1134 		return PTR_ERR(umem);
1135 
1136 	qp->sumem = umem;
1137 	qplib_qp->sq.sg_info.umem = umem;
1138 	qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1139 	qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1140 	qplib_qp->qp_handle = ureq->qp_handle;
1141 
1142 	if (!qp->qplib_qp.srq) {
1143 		bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1144 		bytes = PAGE_ALIGN(bytes);
1145 		umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
1146 				   IB_ACCESS_LOCAL_WRITE);
1147 		if (IS_ERR(umem))
1148 			goto rqfail;
1149 		qp->rumem = umem;
1150 		qplib_qp->rq.sg_info.umem = umem;
1151 		qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1152 		qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1153 	}
1154 
1155 	qplib_qp->dpi = &cntx->dpi;
1156 	return 0;
1157 rqfail:
1158 	ib_umem_release(qp->sumem);
1159 	qp->sumem = NULL;
1160 	memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1161 
1162 	return PTR_ERR(umem);
1163 }
1164 
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1165 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1166 				(struct bnxt_re_pd *pd,
1167 				 struct bnxt_qplib_res *qp1_res,
1168 				 struct bnxt_qplib_qp *qp1_qp)
1169 {
1170 	struct bnxt_re_dev *rdev = pd->rdev;
1171 	struct bnxt_re_ah *ah;
1172 	union ib_gid sgid;
1173 	int rc;
1174 
1175 	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1176 	if (!ah)
1177 		return NULL;
1178 
1179 	ah->rdev = rdev;
1180 	ah->qplib_ah.pd = &pd->qplib_pd;
1181 
1182 	rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1183 	if (rc)
1184 		goto fail;
1185 
1186 	/* supply the dgid data same as sgid */
1187 	memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1188 	       sizeof(union ib_gid));
1189 	ah->qplib_ah.sgid_index = 0;
1190 
1191 	ah->qplib_ah.traffic_class = 0;
1192 	ah->qplib_ah.flow_label = 0;
1193 	ah->qplib_ah.hop_limit = 1;
1194 	ah->qplib_ah.sl = 0;
1195 	/* Have DMAC same as SMAC */
1196 	ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1197 
1198 	rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1199 	if (rc) {
1200 		ibdev_err(&rdev->ibdev,
1201 			  "Failed to allocate HW AH for Shadow QP");
1202 		goto fail;
1203 	}
1204 	atomic_inc(&rdev->stats.res.ah_count);
1205 
1206 	return ah;
1207 
1208 fail:
1209 	kfree(ah);
1210 	return NULL;
1211 }
1212 
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1213 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1214 				(struct bnxt_re_pd *pd,
1215 				 struct bnxt_qplib_res *qp1_res,
1216 				 struct bnxt_qplib_qp *qp1_qp)
1217 {
1218 	struct bnxt_re_dev *rdev = pd->rdev;
1219 	struct bnxt_re_qp *qp;
1220 	int rc;
1221 
1222 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1223 	if (!qp)
1224 		return NULL;
1225 
1226 	qp->rdev = rdev;
1227 
1228 	/* Initialize the shadow QP structure from the QP1 values */
1229 	ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1230 
1231 	qp->qplib_qp.pd = &pd->qplib_pd;
1232 	qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1233 	qp->qplib_qp.type = IB_QPT_UD;
1234 
1235 	qp->qplib_qp.max_inline_data = 0;
1236 	qp->qplib_qp.sig_type = true;
1237 
1238 	/* Shadow QP SQ depth should be same as QP1 RQ depth */
1239 	qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1240 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1241 	qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
1242 	qp->qplib_qp.sq.max_sge = 2;
1243 	/* Q full delta can be 1 since it is internal QP */
1244 	qp->qplib_qp.sq.q_full_delta = 1;
1245 	qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1246 	qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1247 
1248 	qp->qplib_qp.scq = qp1_qp->scq;
1249 	qp->qplib_qp.rcq = qp1_qp->rcq;
1250 
1251 	qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1252 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1253 	qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
1254 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1255 	/* Q full delta can be 1 since it is internal QP */
1256 	qp->qplib_qp.rq.q_full_delta = 1;
1257 	qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1258 	qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1259 
1260 	qp->qplib_qp.mtu = qp1_qp->mtu;
1261 
1262 	qp->qplib_qp.sq_hdr_buf_size = 0;
1263 	qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1264 	qp->qplib_qp.dpi = &rdev->dpi_privileged;
1265 
1266 	rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1267 	if (rc)
1268 		goto fail;
1269 
1270 	spin_lock_init(&qp->sq_lock);
1271 	INIT_LIST_HEAD(&qp->list);
1272 	mutex_lock(&rdev->qp_lock);
1273 	list_add_tail(&qp->list, &rdev->qp_list);
1274 	atomic_inc(&rdev->stats.res.qp_count);
1275 	mutex_unlock(&rdev->qp_lock);
1276 	return qp;
1277 fail:
1278 	kfree(qp);
1279 	return NULL;
1280 }
1281 
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1282 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1283 				struct ib_qp_init_attr *init_attr,
1284 				struct bnxt_re_ucontext *uctx)
1285 {
1286 	struct bnxt_qplib_dev_attr *dev_attr;
1287 	struct bnxt_qplib_qp *qplqp;
1288 	struct bnxt_re_dev *rdev;
1289 	struct bnxt_qplib_q *rq;
1290 	int entries;
1291 
1292 	rdev = qp->rdev;
1293 	qplqp = &qp->qplib_qp;
1294 	rq = &qplqp->rq;
1295 	dev_attr = rdev->dev_attr;
1296 
1297 	if (init_attr->srq) {
1298 		struct bnxt_re_srq *srq;
1299 
1300 		srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1301 		qplqp->srq = &srq->qplib_srq;
1302 		rq->max_wqe = 0;
1303 	} else {
1304 		rq->max_sge = init_attr->cap.max_recv_sge;
1305 		if (rq->max_sge > dev_attr->max_qp_sges)
1306 			rq->max_sge = dev_attr->max_qp_sges;
1307 		init_attr->cap.max_recv_sge = rq->max_sge;
1308 		rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1309 						       dev_attr->max_qp_sges);
1310 		/* Allocate 1 more than what's provided so posting max doesn't
1311 		 * mean empty.
1312 		 */
1313 		entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1314 		rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1315 		rq->max_sw_wqe = rq->max_wqe;
1316 		rq->q_full_delta = 0;
1317 		rq->sg_info.pgsize = PAGE_SIZE;
1318 		rq->sg_info.pgshft = PAGE_SHIFT;
1319 	}
1320 
1321 	return 0;
1322 }
1323 
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1324 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1325 {
1326 	struct bnxt_qplib_dev_attr *dev_attr;
1327 	struct bnxt_qplib_qp *qplqp;
1328 	struct bnxt_re_dev *rdev;
1329 
1330 	rdev = qp->rdev;
1331 	qplqp = &qp->qplib_qp;
1332 	dev_attr = rdev->dev_attr;
1333 
1334 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1335 		qplqp->rq.max_sge = dev_attr->max_qp_sges;
1336 		if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1337 			qplqp->rq.max_sge = dev_attr->max_qp_sges;
1338 		qplqp->rq.max_sge = 6;
1339 	}
1340 }
1341 
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx,struct bnxt_re_qp_req * ureq)1342 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1343 				struct ib_qp_init_attr *init_attr,
1344 				struct bnxt_re_ucontext *uctx,
1345 				struct bnxt_re_qp_req *ureq)
1346 {
1347 	struct bnxt_qplib_dev_attr *dev_attr;
1348 	struct bnxt_qplib_qp *qplqp;
1349 	struct bnxt_re_dev *rdev;
1350 	struct bnxt_qplib_q *sq;
1351 	int diff = 0;
1352 	int entries;
1353 	int rc;
1354 
1355 	rdev = qp->rdev;
1356 	qplqp = &qp->qplib_qp;
1357 	sq = &qplqp->sq;
1358 	dev_attr = rdev->dev_attr;
1359 
1360 	sq->max_sge = init_attr->cap.max_send_sge;
1361 	entries = init_attr->cap.max_send_wr;
1362 	if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
1363 		sq->max_wqe = ureq->sq_slots;
1364 		sq->max_sw_wqe = ureq->sq_slots;
1365 		sq->wqe_size = sizeof(struct sq_sge);
1366 	} else {
1367 		if (sq->max_sge > dev_attr->max_qp_sges) {
1368 			sq->max_sge = dev_attr->max_qp_sges;
1369 			init_attr->cap.max_send_sge = sq->max_sge;
1370 		}
1371 
1372 		rc = bnxt_re_setup_swqe_size(qp, init_attr);
1373 		if (rc)
1374 			return rc;
1375 
1376 		/* Allocate 128 + 1 more than what's provided */
1377 		diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1378 			0 : BNXT_QPLIB_RESERVED_QP_WRS;
1379 		entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1380 		sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1381 		if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1382 			sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
1383 		else
1384 			sq->max_sw_wqe = sq->max_wqe;
1385 
1386 	}
1387 	sq->q_full_delta = diff + 1;
1388 	/*
1389 	 * Reserving one slot for Phantom WQE. Application can
1390 	 * post one extra entry in this case. But allowing this to avoid
1391 	 * unexpected Queue full condition
1392 	 */
1393 	qplqp->sq.q_full_delta -= 1;
1394 	qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1395 	qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1396 
1397 	return 0;
1398 }
1399 
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1400 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1401 				       struct ib_qp_init_attr *init_attr,
1402 				       struct bnxt_re_ucontext *uctx)
1403 {
1404 	struct bnxt_qplib_dev_attr *dev_attr;
1405 	struct bnxt_qplib_qp *qplqp;
1406 	struct bnxt_re_dev *rdev;
1407 	int entries;
1408 
1409 	rdev = qp->rdev;
1410 	qplqp = &qp->qplib_qp;
1411 	dev_attr = rdev->dev_attr;
1412 
1413 	if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1414 		entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1415 		qplqp->sq.max_wqe = min_t(u32, entries,
1416 					  dev_attr->max_qp_wqes + 1);
1417 		qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1418 			init_attr->cap.max_send_wr;
1419 		qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1420 		if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1421 			qplqp->sq.max_sge = dev_attr->max_qp_sges;
1422 	}
1423 }
1424 
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)1425 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1426 				struct ib_qp_init_attr *init_attr)
1427 {
1428 	struct bnxt_qplib_chip_ctx *chip_ctx;
1429 	int qptype;
1430 
1431 	chip_ctx = rdev->chip_ctx;
1432 
1433 	qptype = __from_ib_qp_type(init_attr->qp_type);
1434 	if (qptype == IB_QPT_MAX) {
1435 		ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1436 		qptype = -EOPNOTSUPP;
1437 		goto out;
1438 	}
1439 
1440 	if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1441 	    init_attr->qp_type == IB_QPT_GSI)
1442 		qptype = CMDQ_CREATE_QP_TYPE_GSI;
1443 out:
1444 	return qptype;
1445 }
1446 
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx,struct bnxt_re_qp_req * ureq)1447 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1448 				struct ib_qp_init_attr *init_attr,
1449 				struct bnxt_re_ucontext *uctx,
1450 				struct bnxt_re_qp_req *ureq)
1451 {
1452 	struct bnxt_qplib_dev_attr *dev_attr;
1453 	struct bnxt_qplib_qp *qplqp;
1454 	struct bnxt_re_dev *rdev;
1455 	struct bnxt_re_cq *cq;
1456 	int rc = 0, qptype;
1457 
1458 	rdev = qp->rdev;
1459 	qplqp = &qp->qplib_qp;
1460 	dev_attr = rdev->dev_attr;
1461 
1462 	/* Setup misc params */
1463 	ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1464 	qplqp->pd = &pd->qplib_pd;
1465 	qplqp->qp_handle = (u64)qplqp;
1466 	qplqp->max_inline_data = init_attr->cap.max_inline_data;
1467 	qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1468 	qptype = bnxt_re_init_qp_type(rdev, init_attr);
1469 	if (qptype < 0) {
1470 		rc = qptype;
1471 		goto out;
1472 	}
1473 	qplqp->type = (u8)qptype;
1474 	qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
1475 	if (init_attr->qp_type == IB_QPT_RC) {
1476 		qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1477 		qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1478 	}
1479 	qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1480 	qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1481 	if (init_attr->create_flags) {
1482 		ibdev_dbg(&rdev->ibdev,
1483 			  "QP create flags 0x%x not supported",
1484 			  init_attr->create_flags);
1485 		return -EOPNOTSUPP;
1486 	}
1487 
1488 	/* Setup CQs */
1489 	if (init_attr->send_cq) {
1490 		cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1491 		qplqp->scq = &cq->qplib_cq;
1492 		qp->scq = cq;
1493 	}
1494 
1495 	if (init_attr->recv_cq) {
1496 		cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1497 		qplqp->rcq = &cq->qplib_cq;
1498 		qp->rcq = cq;
1499 	}
1500 
1501 	/* Setup RQ/SRQ */
1502 	rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1503 	if (rc)
1504 		goto out;
1505 	if (init_attr->qp_type == IB_QPT_GSI)
1506 		bnxt_re_adjust_gsi_rq_attr(qp);
1507 
1508 	/* Setup SQ */
1509 	rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
1510 	if (rc)
1511 		goto out;
1512 	if (init_attr->qp_type == IB_QPT_GSI)
1513 		bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1514 
1515 	if (uctx) /* This will update DPI and qp_handle */
1516 		rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
1517 out:
1518 	return rc;
1519 }
1520 
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)1521 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1522 				     struct bnxt_re_pd *pd)
1523 {
1524 	struct bnxt_re_sqp_entries *sqp_tbl;
1525 	struct bnxt_re_dev *rdev;
1526 	struct bnxt_re_qp *sqp;
1527 	struct bnxt_re_ah *sah;
1528 	int rc = 0;
1529 
1530 	rdev = qp->rdev;
1531 	/* Create a shadow QP to handle the QP1 traffic */
1532 	sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1533 			  GFP_KERNEL);
1534 	if (!sqp_tbl)
1535 		return -ENOMEM;
1536 	rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1537 
1538 	sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1539 	if (!sqp) {
1540 		rc = -ENODEV;
1541 		ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1542 		goto out;
1543 	}
1544 	rdev->gsi_ctx.gsi_sqp = sqp;
1545 
1546 	sqp->rcq = qp->rcq;
1547 	sqp->scq = qp->scq;
1548 	sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1549 					  &qp->qplib_qp);
1550 	if (!sah) {
1551 		bnxt_qplib_destroy_qp(&rdev->qplib_res,
1552 				      &sqp->qplib_qp);
1553 		rc = -ENODEV;
1554 		ibdev_err(&rdev->ibdev,
1555 			  "Failed to create AH entry for ShadowQP");
1556 		goto out;
1557 	}
1558 	rdev->gsi_ctx.gsi_sah = sah;
1559 
1560 	return 0;
1561 out:
1562 	kfree(sqp_tbl);
1563 	return rc;
1564 }
1565 
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr)1566 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1567 				 struct ib_qp_init_attr *init_attr)
1568 {
1569 	struct bnxt_re_dev *rdev;
1570 	struct bnxt_qplib_qp *qplqp;
1571 	int rc;
1572 
1573 	rdev = qp->rdev;
1574 	qplqp = &qp->qplib_qp;
1575 
1576 	qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1577 	qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1578 
1579 	rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1580 	if (rc) {
1581 		ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1582 		goto out;
1583 	}
1584 
1585 	rc = bnxt_re_create_shadow_gsi(qp, pd);
1586 out:
1587 	return rc;
1588 }
1589 
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)1590 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1591 				   struct ib_qp_init_attr *init_attr,
1592 				   struct bnxt_qplib_dev_attr *dev_attr)
1593 {
1594 	bool rc = true;
1595 
1596 	if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1597 	    init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1598 	    init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1599 	    init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1600 	    init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1601 		ibdev_err(&rdev->ibdev,
1602 			  "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1603 			  init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1604 			  init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1605 			  init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1606 			  init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1607 			  init_attr->cap.max_inline_data,
1608 			  dev_attr->max_inline_data);
1609 		rc = false;
1610 	}
1611 	return rc;
1612 }
1613 
bnxt_re_add_unique_gid(struct bnxt_re_dev * rdev)1614 static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
1615 {
1616 	struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
1617 	struct bnxt_qplib_res *res = &rdev->qplib_res;
1618 	int rc;
1619 
1620 	if (!rdev->rcfw.roce_mirror)
1621 		return 0;
1622 
1623 	rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
1624 	addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
1625 
1626 	rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
1627 				 (struct bnxt_qplib_gid *)&rdev->ugid,
1628 				 rdev->qplib_res.netdev->dev_addr,
1629 				 0xFFFF, true, &rdev->ugid_index, true,
1630 				 hctx->stats3.fw_id);
1631 	if (rc)
1632 		dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
1633 
1634 	return rc;
1635 }
1636 
bnxt_re_create_qp(struct ib_qp * ib_qp,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)1637 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1638 		      struct ib_udata *udata)
1639 {
1640 	struct bnxt_qplib_dev_attr *dev_attr;
1641 	struct bnxt_re_ucontext *uctx;
1642 	struct bnxt_re_qp_req ureq;
1643 	struct bnxt_re_dev *rdev;
1644 	struct bnxt_re_pd *pd;
1645 	struct bnxt_re_qp *qp;
1646 	struct ib_pd *ib_pd;
1647 	u32 active_qps;
1648 	int rc;
1649 
1650 	ib_pd = ib_qp->pd;
1651 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1652 	rdev = pd->rdev;
1653 	dev_attr = rdev->dev_attr;
1654 	qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1655 
1656 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1657 	if (udata)
1658 		if (ib_copy_from_udata(&ureq, udata,  min(udata->inlen, sizeof(ureq))))
1659 			return -EFAULT;
1660 
1661 	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1662 	if (!rc) {
1663 		rc = -EINVAL;
1664 		goto fail;
1665 	}
1666 
1667 	qp->rdev = rdev;
1668 	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
1669 	if (rc)
1670 		goto fail;
1671 
1672 	if (qp_init_attr->qp_type == IB_QPT_GSI &&
1673 	    !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1674 		rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1675 		if (rc == -ENODEV)
1676 			goto qp_destroy;
1677 		if (rc)
1678 			goto fail;
1679 	} else {
1680 		rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1681 		if (rc) {
1682 			ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1683 			goto free_umem;
1684 		}
1685 		if (udata) {
1686 			struct bnxt_re_qp_resp resp;
1687 
1688 			resp.qpid = qp->qplib_qp.id;
1689 			resp.rsvd = 0;
1690 			rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1691 			if (rc) {
1692 				ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1693 				goto qp_destroy;
1694 			}
1695 		}
1696 	}
1697 
1698 	/* Support for RawEth QP is added to capture TCP pkt dump.
1699 	 * So unique SGID is used to avoid incorrect statistics on per
1700 	 * function stats_ctx
1701 	 */
1702 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
1703 		rc = bnxt_re_add_unique_gid(rdev);
1704 		if (rc)
1705 			goto qp_destroy;
1706 		qp->qplib_qp.ugid_index = rdev->ugid_index;
1707 	}
1708 
1709 	qp->ib_qp.qp_num = qp->qplib_qp.id;
1710 	if (qp_init_attr->qp_type == IB_QPT_GSI)
1711 		rdev->gsi_ctx.gsi_qp = qp;
1712 	spin_lock_init(&qp->sq_lock);
1713 	spin_lock_init(&qp->rq_lock);
1714 	INIT_LIST_HEAD(&qp->list);
1715 	mutex_lock(&rdev->qp_lock);
1716 	list_add_tail(&qp->list, &rdev->qp_list);
1717 	mutex_unlock(&rdev->qp_lock);
1718 	active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1719 	if (active_qps > rdev->stats.res.qp_watermark)
1720 		rdev->stats.res.qp_watermark = active_qps;
1721 	if (qp_init_attr->qp_type == IB_QPT_RC) {
1722 		active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1723 		if (active_qps > rdev->stats.res.rc_qp_watermark)
1724 			rdev->stats.res.rc_qp_watermark = active_qps;
1725 	} else if (qp_init_attr->qp_type == IB_QPT_UD) {
1726 		active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1727 		if (active_qps > rdev->stats.res.ud_qp_watermark)
1728 			rdev->stats.res.ud_qp_watermark = active_qps;
1729 	}
1730 	bnxt_re_debug_add_qpinfo(rdev, qp);
1731 
1732 	return 0;
1733 qp_destroy:
1734 	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1735 free_umem:
1736 	ib_umem_release(qp->rumem);
1737 	ib_umem_release(qp->sumem);
1738 fail:
1739 	return rc;
1740 }
1741 
__from_ib_qp_state(enum ib_qp_state state)1742 static u8 __from_ib_qp_state(enum ib_qp_state state)
1743 {
1744 	switch (state) {
1745 	case IB_QPS_RESET:
1746 		return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1747 	case IB_QPS_INIT:
1748 		return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1749 	case IB_QPS_RTR:
1750 		return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1751 	case IB_QPS_RTS:
1752 		return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1753 	case IB_QPS_SQD:
1754 		return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1755 	case IB_QPS_SQE:
1756 		return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1757 	case IB_QPS_ERR:
1758 	default:
1759 		return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1760 	}
1761 }
1762 
__to_ib_qp_state(u8 state)1763 static enum ib_qp_state __to_ib_qp_state(u8 state)
1764 {
1765 	switch (state) {
1766 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1767 		return IB_QPS_RESET;
1768 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1769 		return IB_QPS_INIT;
1770 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1771 		return IB_QPS_RTR;
1772 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1773 		return IB_QPS_RTS;
1774 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1775 		return IB_QPS_SQD;
1776 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1777 		return IB_QPS_SQE;
1778 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1779 	default:
1780 		return IB_QPS_ERR;
1781 	}
1782 }
1783 
__from_ib_mtu(enum ib_mtu mtu)1784 static u32 __from_ib_mtu(enum ib_mtu mtu)
1785 {
1786 	switch (mtu) {
1787 	case IB_MTU_256:
1788 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1789 	case IB_MTU_512:
1790 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1791 	case IB_MTU_1024:
1792 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1793 	case IB_MTU_2048:
1794 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1795 	case IB_MTU_4096:
1796 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1797 	default:
1798 		return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1799 	}
1800 }
1801 
__to_ib_mtu(u32 mtu)1802 static enum ib_mtu __to_ib_mtu(u32 mtu)
1803 {
1804 	switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1805 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1806 		return IB_MTU_256;
1807 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1808 		return IB_MTU_512;
1809 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1810 		return IB_MTU_1024;
1811 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1812 		return IB_MTU_2048;
1813 	case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1814 		return IB_MTU_4096;
1815 	default:
1816 		return IB_MTU_2048;
1817 	}
1818 }
1819 
1820 /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1821 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1822 {
1823 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1824 					       ib_srq);
1825 	struct bnxt_re_dev *rdev = srq->rdev;
1826 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1827 
1828 	if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1829 		free_page((unsigned long)srq->uctx_srq_page);
1830 		hash_del(&srq->hash_entry);
1831 	}
1832 	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1833 	ib_umem_release(srq->umem);
1834 	atomic_dec(&rdev->stats.res.srq_count);
1835 	return 0;
1836 }
1837 
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1838 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1839 				 struct bnxt_re_pd *pd,
1840 				 struct bnxt_re_srq *srq,
1841 				 struct ib_udata *udata)
1842 {
1843 	struct bnxt_re_srq_req ureq;
1844 	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1845 	struct ib_umem *umem;
1846 	int bytes = 0;
1847 	struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1848 		udata, struct bnxt_re_ucontext, ib_uctx);
1849 
1850 	if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1851 		return -EFAULT;
1852 
1853 	bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1854 	bytes = PAGE_ALIGN(bytes);
1855 	umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1856 			   IB_ACCESS_LOCAL_WRITE);
1857 	if (IS_ERR(umem))
1858 		return PTR_ERR(umem);
1859 
1860 	srq->umem = umem;
1861 	qplib_srq->sg_info.umem = umem;
1862 	qplib_srq->sg_info.pgsize = PAGE_SIZE;
1863 	qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1864 	qplib_srq->srq_handle = ureq.srq_handle;
1865 	qplib_srq->dpi = &cntx->dpi;
1866 
1867 	return 0;
1868 }
1869 
bnxt_re_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1870 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1871 		       struct ib_srq_init_attr *srq_init_attr,
1872 		       struct ib_udata *udata)
1873 {
1874 	struct bnxt_qplib_dev_attr *dev_attr;
1875 	struct bnxt_re_ucontext *uctx;
1876 	struct bnxt_re_dev *rdev;
1877 	struct bnxt_re_srq *srq;
1878 	struct bnxt_re_pd *pd;
1879 	struct ib_pd *ib_pd;
1880 	u32 active_srqs;
1881 	int rc, entries;
1882 
1883 	ib_pd = ib_srq->pd;
1884 	pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1885 	rdev = pd->rdev;
1886 	dev_attr = rdev->dev_attr;
1887 	srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1888 
1889 	if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1890 		ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1891 		rc = -EINVAL;
1892 		goto exit;
1893 	}
1894 
1895 	if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1896 		rc = -EOPNOTSUPP;
1897 		goto exit;
1898 	}
1899 
1900 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1901 	srq->rdev = rdev;
1902 	srq->qplib_srq.pd = &pd->qplib_pd;
1903 	srq->qplib_srq.dpi = &rdev->dpi_privileged;
1904 	/* Allocate 1 more than what's provided so posting max doesn't
1905 	 * mean empty
1906 	 */
1907 	entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1908 	if (entries > dev_attr->max_srq_wqes + 1)
1909 		entries = dev_attr->max_srq_wqes + 1;
1910 	srq->qplib_srq.max_wqe = entries;
1911 
1912 	srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1913 	 /* 128 byte wqe size for SRQ . So use max sges */
1914 	srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1915 	srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1916 	srq->srq_limit = srq_init_attr->attr.srq_limit;
1917 	srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
1918 	srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
1919 	srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
1920 
1921 	if (udata) {
1922 		rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1923 		if (rc)
1924 			goto fail;
1925 	}
1926 
1927 	rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1928 	if (rc) {
1929 		ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1930 		goto fail;
1931 	}
1932 
1933 	if (udata) {
1934 		struct bnxt_re_srq_resp resp = {};
1935 
1936 		resp.srqid = srq->qplib_srq.id;
1937 		if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
1938 			hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
1939 			srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
1940 			if (!srq->uctx_srq_page) {
1941 				rc = -ENOMEM;
1942 				goto fail;
1943 			}
1944 			resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
1945 		}
1946 		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1947 		if (rc) {
1948 			ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1949 			bnxt_qplib_destroy_srq(&rdev->qplib_res,
1950 					       &srq->qplib_srq);
1951 			goto fail;
1952 		}
1953 	}
1954 	active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1955 	if (active_srqs > rdev->stats.res.srq_watermark)
1956 		rdev->stats.res.srq_watermark = active_srqs;
1957 	spin_lock_init(&srq->lock);
1958 
1959 	return 0;
1960 
1961 fail:
1962 	ib_umem_release(srq->umem);
1963 exit:
1964 	return rc;
1965 }
1966 
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1967 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1968 		       enum ib_srq_attr_mask srq_attr_mask,
1969 		       struct ib_udata *udata)
1970 {
1971 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1972 					       ib_srq);
1973 	struct bnxt_re_dev *rdev = srq->rdev;
1974 
1975 	switch (srq_attr_mask) {
1976 	case IB_SRQ_MAX_WR:
1977 		/* SRQ resize is not supported */
1978 		return -EINVAL;
1979 	case IB_SRQ_LIMIT:
1980 		/* Change the SRQ threshold */
1981 		if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1982 			return -EINVAL;
1983 
1984 		srq->qplib_srq.threshold = srq_attr->srq_limit;
1985 		bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
1986 
1987 		/* On success, update the shadow */
1988 		srq->srq_limit = srq_attr->srq_limit;
1989 		/* No need to Build and send response back to udata */
1990 		return 0;
1991 	default:
1992 		ibdev_err(&rdev->ibdev,
1993 			  "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1994 		return -EINVAL;
1995 	}
1996 }
1997 
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1998 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1999 {
2000 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
2001 					       ib_srq);
2002 	struct bnxt_re_srq tsrq;
2003 	struct bnxt_re_dev *rdev = srq->rdev;
2004 	int rc;
2005 
2006 	/* Get live SRQ attr */
2007 	tsrq.qplib_srq.id = srq->qplib_srq.id;
2008 	rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
2009 	if (rc) {
2010 		ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
2011 		return rc;
2012 	}
2013 	srq_attr->max_wr = srq->qplib_srq.max_wqe;
2014 	srq_attr->max_sge = srq->qplib_srq.max_sge;
2015 	srq_attr->srq_limit = tsrq.qplib_srq.threshold;
2016 
2017 	return 0;
2018 }
2019 
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2020 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
2021 			  const struct ib_recv_wr **bad_wr)
2022 {
2023 	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
2024 					       ib_srq);
2025 	struct bnxt_qplib_swqe wqe;
2026 	unsigned long flags;
2027 	int rc = 0;
2028 
2029 	spin_lock_irqsave(&srq->lock, flags);
2030 	while (wr) {
2031 		/* Transcribe each ib_recv_wr to qplib_swqe */
2032 		wqe.num_sge = wr->num_sge;
2033 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2034 		wqe.wr_id = wr->wr_id;
2035 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2036 
2037 		rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
2038 		if (rc) {
2039 			*bad_wr = wr;
2040 			break;
2041 		}
2042 		wr = wr->next;
2043 	}
2044 	spin_unlock_irqrestore(&srq->lock, flags);
2045 
2046 	return rc;
2047 }
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)2048 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
2049 				    struct bnxt_re_qp *qp1_qp,
2050 				    int qp_attr_mask)
2051 {
2052 	struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
2053 	int rc;
2054 
2055 	if (qp_attr_mask & IB_QP_STATE) {
2056 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2057 		qp->qplib_qp.state = qp1_qp->qplib_qp.state;
2058 	}
2059 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2060 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2061 		qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
2062 	}
2063 
2064 	if (qp_attr_mask & IB_QP_QKEY) {
2065 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2066 		/* Using a Random  QKEY */
2067 		qp->qplib_qp.qkey = 0x81818181;
2068 	}
2069 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2070 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2071 		qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
2072 	}
2073 
2074 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2075 	if (rc)
2076 		ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
2077 	return rc;
2078 }
2079 
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)2080 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2081 		      int qp_attr_mask, struct ib_udata *udata)
2082 {
2083 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2084 	struct bnxt_re_dev *rdev = qp->rdev;
2085 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
2086 	enum ib_qp_state curr_qp_state, new_qp_state;
2087 	int rc, entries;
2088 	unsigned int flags;
2089 	u8 nw_type;
2090 
2091 	if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2092 		return -EOPNOTSUPP;
2093 
2094 	qp->qplib_qp.modify_flags = 0;
2095 	if (qp_attr_mask & IB_QP_STATE) {
2096 		curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2097 		new_qp_state = qp_attr->qp_state;
2098 		if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
2099 					ib_qp->qp_type, qp_attr_mask)) {
2100 			ibdev_err(&rdev->ibdev,
2101 				  "Invalid attribute mask: %#x specified ",
2102 				  qp_attr_mask);
2103 			ibdev_err(&rdev->ibdev,
2104 				  "for qpn: %#x type: %#x",
2105 				  ib_qp->qp_num, ib_qp->qp_type);
2106 			ibdev_err(&rdev->ibdev,
2107 				  "curr_qp_state=0x%x, new_qp_state=0x%x\n",
2108 				  curr_qp_state, new_qp_state);
2109 			return -EINVAL;
2110 		}
2111 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2112 		qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2113 
2114 		if (!qp->sumem &&
2115 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2116 			ibdev_dbg(&rdev->ibdev,
2117 				  "Move QP = %p to flush list\n", qp);
2118 			flags = bnxt_re_lock_cqs(qp);
2119 			bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2120 			bnxt_re_unlock_cqs(qp, flags);
2121 		}
2122 		if (!qp->sumem &&
2123 		    qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2124 			ibdev_dbg(&rdev->ibdev,
2125 				  "Move QP = %p out of flush list\n", qp);
2126 			flags = bnxt_re_lock_cqs(qp);
2127 			bnxt_qplib_clean_qp(&qp->qplib_qp);
2128 			bnxt_re_unlock_cqs(qp, flags);
2129 		}
2130 	}
2131 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
2132 		qp->qplib_qp.modify_flags |=
2133 				CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
2134 		qp->qplib_qp.en_sqd_async_notify = true;
2135 	}
2136 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
2137 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2138 		qp->qplib_qp.access =
2139 			__qp_access_flags_from_ib(qp->qplib_qp.cctx,
2140 						  qp_attr->qp_access_flags);
2141 		/* LOCAL_WRITE access must be set to allow RC receive */
2142 		qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
2143 	}
2144 	if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2145 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2146 		qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2147 	}
2148 	if (qp_attr_mask & IB_QP_QKEY) {
2149 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2150 		qp->qplib_qp.qkey = qp_attr->qkey;
2151 	}
2152 	if (qp_attr_mask & IB_QP_AV) {
2153 		const struct ib_global_route *grh =
2154 			rdma_ah_read_grh(&qp_attr->ah_attr);
2155 		const struct ib_gid_attr *sgid_attr;
2156 		struct bnxt_re_gid_ctx *ctx;
2157 
2158 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2159 				     CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2160 				     CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2161 				     CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2162 				     CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2163 				     CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2164 				     CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2165 		memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2166 		       sizeof(qp->qplib_qp.ah.dgid.data));
2167 		qp->qplib_qp.ah.flow_label = grh->flow_label;
2168 		sgid_attr = grh->sgid_attr;
2169 		/* Get the HW context of the GID. The reference
2170 		 * of GID table entry is already taken by the caller.
2171 		 */
2172 		ctx = rdma_read_gid_hw_context(sgid_attr);
2173 		qp->qplib_qp.ah.sgid_index = ctx->idx;
2174 		qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2175 		qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2176 		qp->qplib_qp.ah.traffic_class = grh->traffic_class >> 2;
2177 		qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2178 		ether_addr_copy(qp->qplib_qp.ah.dmac,
2179 				qp_attr->ah_attr.roce.dmac);
2180 
2181 		rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2182 					     &qp->qplib_qp.smac[0]);
2183 		if (rc)
2184 			return rc;
2185 
2186 		nw_type = rdma_gid_attr_network_type(sgid_attr);
2187 		switch (nw_type) {
2188 		case RDMA_NETWORK_IPV4:
2189 			qp->qplib_qp.nw_type =
2190 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2191 			break;
2192 		case RDMA_NETWORK_IPV6:
2193 			qp->qplib_qp.nw_type =
2194 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2195 			break;
2196 		default:
2197 			qp->qplib_qp.nw_type =
2198 				CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2199 			break;
2200 		}
2201 	}
2202 
2203 	if (qp_attr->qp_state == IB_QPS_RTR) {
2204 		enum ib_mtu qpmtu;
2205 
2206 		qpmtu = iboe_get_mtu(rdev->netdev->mtu);
2207 		if (qp_attr_mask & IB_QP_PATH_MTU) {
2208 			if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
2209 			    ib_mtu_enum_to_int(qpmtu))
2210 				return -EINVAL;
2211 			qpmtu = qp_attr->path_mtu;
2212 		}
2213 
2214 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2215 		qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
2216 		qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
2217 	}
2218 
2219 	if (qp_attr_mask & IB_QP_TIMEOUT) {
2220 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2221 		qp->qplib_qp.timeout = qp_attr->timeout;
2222 	}
2223 	if (qp_attr_mask & IB_QP_RETRY_CNT) {
2224 		qp->qplib_qp.modify_flags |=
2225 				CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2226 		qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2227 	}
2228 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
2229 		qp->qplib_qp.modify_flags |=
2230 				CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2231 		qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2232 	}
2233 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2234 		qp->qplib_qp.modify_flags |=
2235 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2236 		qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2237 	}
2238 	if (qp_attr_mask & IB_QP_RQ_PSN) {
2239 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2240 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2241 	}
2242 	if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2243 		qp->qplib_qp.modify_flags |=
2244 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2245 		/* Cap the max_rd_atomic to device max */
2246 		qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2247 						   dev_attr->max_qp_rd_atom);
2248 	}
2249 	if (qp_attr_mask & IB_QP_SQ_PSN) {
2250 		qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2251 		qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2252 	}
2253 	if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2254 		if (qp_attr->max_dest_rd_atomic >
2255 		    dev_attr->max_qp_init_rd_atom) {
2256 			ibdev_err(&rdev->ibdev,
2257 				  "max_dest_rd_atomic requested%d is > dev_max%d",
2258 				  qp_attr->max_dest_rd_atomic,
2259 				  dev_attr->max_qp_init_rd_atom);
2260 			return -EINVAL;
2261 		}
2262 
2263 		qp->qplib_qp.modify_flags |=
2264 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2265 		qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2266 	}
2267 	if (qp_attr_mask & IB_QP_CAP) {
2268 		struct bnxt_re_ucontext *uctx =
2269 			rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2270 
2271 		qp->qplib_qp.modify_flags |=
2272 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2273 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2274 				CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2275 				CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2276 				CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2277 		if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2278 		    (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2279 		    (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2280 		    (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2281 		    (qp_attr->cap.max_inline_data >=
2282 						dev_attr->max_inline_data)) {
2283 			ibdev_err(&rdev->ibdev,
2284 				  "Create QP failed - max exceeded");
2285 			return -EINVAL;
2286 		}
2287 		entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2288 		qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2289 						dev_attr->max_qp_wqes + 1);
2290 		qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2291 						qp_attr->cap.max_send_wr;
2292 		/*
2293 		 * Reserving one slot for Phantom WQE. Some application can
2294 		 * post one extra entry in this case. Allowing this to avoid
2295 		 * unexpected Queue full condition
2296 		 */
2297 		qp->qplib_qp.sq.q_full_delta -= 1;
2298 		qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2299 		if (qp->qplib_qp.rq.max_wqe) {
2300 			entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2301 			qp->qplib_qp.rq.max_wqe =
2302 				min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2303 			qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
2304 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2305 						       qp_attr->cap.max_recv_wr;
2306 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2307 		} else {
2308 			/* SRQ was used prior, just ignore the RQ caps */
2309 		}
2310 	}
2311 	if (qp_attr_mask & IB_QP_DEST_QPN) {
2312 		qp->qplib_qp.modify_flags |=
2313 				CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2314 		qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2315 	}
2316 	rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2317 	if (rc) {
2318 		ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2319 		return rc;
2320 	}
2321 	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2322 		rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2323 	return rc;
2324 }
2325 
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2326 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2327 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2328 {
2329 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2330 	struct bnxt_re_dev *rdev = qp->rdev;
2331 	struct bnxt_qplib_qp *qplib_qp;
2332 	int rc;
2333 
2334 	qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2335 	if (!qplib_qp)
2336 		return -ENOMEM;
2337 
2338 	qplib_qp->id = qp->qplib_qp.id;
2339 	qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2340 
2341 	rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2342 	if (rc) {
2343 		ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2344 		goto out;
2345 	}
2346 	qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2347 	qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2348 	qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2349 	qp_attr->qp_access_flags = __qp_access_flags_to_ib(qp->qplib_qp.cctx,
2350 							   qplib_qp->access);
2351 	qp_attr->pkey_index = qplib_qp->pkey_index;
2352 	qp_attr->qkey = qplib_qp->qkey;
2353 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2354 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
2355 			qplib_qp->ah.host_sgid_index,
2356 			qplib_qp->ah.hop_limit,
2357 			qplib_qp->ah.traffic_class);
2358 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2359 	rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2360 	ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2361 	qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2362 	qp_attr->timeout = qplib_qp->timeout;
2363 	qp_attr->retry_cnt = qplib_qp->retry_cnt;
2364 	qp_attr->rnr_retry = qplib_qp->rnr_retry;
2365 	qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2366 	qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
2367 	qp_attr->rq_psn = qplib_qp->rq.psn;
2368 	qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2369 	qp_attr->sq_psn = qplib_qp->sq.psn;
2370 	qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2371 	qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2372 							 IB_SIGNAL_REQ_WR;
2373 	qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2374 
2375 	qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2376 	qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2377 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2378 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2379 	qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2380 	qp_init_attr->cap = qp_attr->cap;
2381 
2382 out:
2383 	kfree(qplib_qp);
2384 	return rc;
2385 }
2386 
2387 /* Routine for sending QP1 packets for RoCE V1 an V2
2388  */
bnxt_re_build_qp1_send_v2(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2389 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2390 				     const struct ib_send_wr *wr,
2391 				     struct bnxt_qplib_swqe *wqe,
2392 				     int payload_size)
2393 {
2394 	struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2395 					     ib_ah);
2396 	struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2397 	const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2398 	struct bnxt_qplib_sge sge;
2399 	u8 nw_type;
2400 	u16 ether_type;
2401 	union ib_gid dgid;
2402 	bool is_eth = false;
2403 	bool is_vlan = false;
2404 	bool is_grh = false;
2405 	bool is_udp = false;
2406 	u8 ip_version = 0;
2407 	u16 vlan_id = 0xFFFF;
2408 	void *buf;
2409 	int i, rc;
2410 
2411 	memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2412 
2413 	rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2414 	if (rc)
2415 		return rc;
2416 
2417 	/* Get network header type for this GID */
2418 	nw_type = rdma_gid_attr_network_type(sgid_attr);
2419 	switch (nw_type) {
2420 	case RDMA_NETWORK_IPV4:
2421 		nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2422 		break;
2423 	case RDMA_NETWORK_IPV6:
2424 		nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2425 		break;
2426 	default:
2427 		nw_type = BNXT_RE_ROCE_V1_PACKET;
2428 		break;
2429 	}
2430 	memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2431 	is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2432 	if (is_udp) {
2433 		if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2434 			ip_version = 4;
2435 			ether_type = ETH_P_IP;
2436 		} else {
2437 			ip_version = 6;
2438 			ether_type = ETH_P_IPV6;
2439 		}
2440 		is_grh = false;
2441 	} else {
2442 		ether_type = ETH_P_IBOE;
2443 		is_grh = true;
2444 	}
2445 
2446 	is_eth = true;
2447 	is_vlan = vlan_id && (vlan_id < 0x1000);
2448 
2449 	ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2450 			  ip_version, is_udp, 0, &qp->qp1_hdr);
2451 
2452 	/* ETH */
2453 	ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2454 	ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2455 
2456 	/* For vlan, check the sgid for vlan existence */
2457 
2458 	if (!is_vlan) {
2459 		qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2460 	} else {
2461 		qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2462 		qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2463 	}
2464 
2465 	if (is_grh || (ip_version == 6)) {
2466 		memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2467 		       sizeof(sgid_attr->gid));
2468 		memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2469 		       sizeof(sgid_attr->gid));
2470 		qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
2471 	}
2472 
2473 	if (ip_version == 4) {
2474 		qp->qp1_hdr.ip4.tos = 0;
2475 		qp->qp1_hdr.ip4.id = 0;
2476 		qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2477 		qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2478 
2479 		memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2480 		memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2481 		qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2482 	}
2483 
2484 	if (is_udp) {
2485 		qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2486 		qp->qp1_hdr.udp.sport = htons(0x8CD1);
2487 		qp->qp1_hdr.udp.csum = 0;
2488 	}
2489 
2490 	/* BTH */
2491 	if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2492 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2493 		qp->qp1_hdr.immediate_present = 1;
2494 	} else {
2495 		qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2496 	}
2497 	if (wr->send_flags & IB_SEND_SOLICITED)
2498 		qp->qp1_hdr.bth.solicited_event = 1;
2499 	/* pad_count */
2500 	qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2501 
2502 	/* P_key for QP1 is for all members */
2503 	qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2504 	qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2505 	qp->qp1_hdr.bth.ack_req = 0;
2506 	qp->send_psn++;
2507 	qp->send_psn &= BTH_PSN_MASK;
2508 	qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2509 	/* DETH */
2510 	/* Use the priviledged Q_Key for QP1 */
2511 	qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2512 	qp->qp1_hdr.deth.source_qpn = IB_QP1;
2513 
2514 	/* Pack the QP1 to the transmit buffer */
2515 	buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2516 	if (buf) {
2517 		ib_ud_header_pack(&qp->qp1_hdr, buf);
2518 		for (i = wqe->num_sge; i; i--) {
2519 			wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2520 			wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2521 			wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2522 		}
2523 
2524 		/*
2525 		 * Max Header buf size for IPV6 RoCE V2 is 86,
2526 		 * which is same as the QP1 SQ header buffer.
2527 		 * Header buf size for IPV4 RoCE V2 can be 66.
2528 		 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2529 		 * Subtract 20 bytes from QP1 SQ header buf size
2530 		 */
2531 		if (is_udp && ip_version == 4)
2532 			sge.size -= 20;
2533 		/*
2534 		 * Max Header buf size for RoCE V1 is 78.
2535 		 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2536 		 * Subtract 8 bytes from QP1 SQ header buf size
2537 		 */
2538 		if (!is_udp)
2539 			sge.size -= 8;
2540 
2541 		/* Subtract 4 bytes for non vlan packets */
2542 		if (!is_vlan)
2543 			sge.size -= 4;
2544 
2545 		wqe->sg_list[0].addr = sge.addr;
2546 		wqe->sg_list[0].lkey = sge.lkey;
2547 		wqe->sg_list[0].size = sge.size;
2548 		wqe->num_sge++;
2549 
2550 	} else {
2551 		ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2552 		rc = -ENOMEM;
2553 	}
2554 	return rc;
2555 }
2556 
2557 /* For the MAD layer, it only provides the recv SGE the size of
2558  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
2559  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
2560  * receive packet (334 bytes) with no VLAN and then copy the GRH
2561  * and the MAD datagram out to the provided SGE.
2562  */
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2563 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2564 					    const struct ib_recv_wr *wr,
2565 					    struct bnxt_qplib_swqe *wqe,
2566 					    int payload_size)
2567 {
2568 	struct bnxt_re_sqp_entries *sqp_entry;
2569 	struct bnxt_qplib_sge ref, sge;
2570 	struct bnxt_re_dev *rdev;
2571 	u32 rq_prod_index;
2572 
2573 	rdev = qp->rdev;
2574 
2575 	rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2576 
2577 	if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2578 		return -ENOMEM;
2579 
2580 	/* Create 1 SGE to receive the entire
2581 	 * ethernet packet
2582 	 */
2583 	/* Save the reference from ULP */
2584 	ref.addr = wqe->sg_list[0].addr;
2585 	ref.lkey = wqe->sg_list[0].lkey;
2586 	ref.size = wqe->sg_list[0].size;
2587 
2588 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2589 
2590 	/* SGE 1 */
2591 	wqe->sg_list[0].addr = sge.addr;
2592 	wqe->sg_list[0].lkey = sge.lkey;
2593 	wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2594 	sge.size -= wqe->sg_list[0].size;
2595 
2596 	sqp_entry->sge.addr = ref.addr;
2597 	sqp_entry->sge.lkey = ref.lkey;
2598 	sqp_entry->sge.size = ref.size;
2599 	/* Store the wrid for reporting completion */
2600 	sqp_entry->wrid = wqe->wr_id;
2601 	/* change the wqe->wrid to table index */
2602 	wqe->wr_id = rq_prod_index;
2603 	return 0;
2604 }
2605 
is_ud_qp(struct bnxt_re_qp * qp)2606 static int is_ud_qp(struct bnxt_re_qp *qp)
2607 {
2608 	return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2609 		qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2610 }
2611 
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2612 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2613 				  const struct ib_send_wr *wr,
2614 				  struct bnxt_qplib_swqe *wqe)
2615 {
2616 	struct bnxt_re_ah *ah = NULL;
2617 
2618 	if (is_ud_qp(qp)) {
2619 		ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2620 		wqe->send.q_key = ud_wr(wr)->remote_qkey;
2621 		wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2622 		wqe->send.avid = ah->qplib_ah.id;
2623 	}
2624 	switch (wr->opcode) {
2625 	case IB_WR_SEND:
2626 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2627 		break;
2628 	case IB_WR_SEND_WITH_IMM:
2629 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2630 		wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
2631 		break;
2632 	case IB_WR_SEND_WITH_INV:
2633 		wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2634 		wqe->send.inv_key = wr->ex.invalidate_rkey;
2635 		break;
2636 	default:
2637 		return -EINVAL;
2638 	}
2639 	if (wr->send_flags & IB_SEND_SIGNALED)
2640 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2641 	if (wr->send_flags & IB_SEND_FENCE)
2642 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2643 	if (wr->send_flags & IB_SEND_SOLICITED)
2644 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2645 	if (wr->send_flags & IB_SEND_INLINE)
2646 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2647 
2648 	return 0;
2649 }
2650 
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2651 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2652 				  struct bnxt_qplib_swqe *wqe)
2653 {
2654 	switch (wr->opcode) {
2655 	case IB_WR_RDMA_WRITE:
2656 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2657 		break;
2658 	case IB_WR_RDMA_WRITE_WITH_IMM:
2659 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2660 		wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
2661 		break;
2662 	case IB_WR_RDMA_READ:
2663 		wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2664 		wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2665 		break;
2666 	default:
2667 		return -EINVAL;
2668 	}
2669 	wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2670 	wqe->rdma.r_key = rdma_wr(wr)->rkey;
2671 	if (wr->send_flags & IB_SEND_SIGNALED)
2672 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2673 	if (wr->send_flags & IB_SEND_FENCE)
2674 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2675 	if (wr->send_flags & IB_SEND_SOLICITED)
2676 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2677 	if (wr->send_flags & IB_SEND_INLINE)
2678 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2679 
2680 	return 0;
2681 }
2682 
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2683 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2684 				    struct bnxt_qplib_swqe *wqe)
2685 {
2686 	switch (wr->opcode) {
2687 	case IB_WR_ATOMIC_CMP_AND_SWP:
2688 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2689 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2690 		wqe->atomic.swap_data = atomic_wr(wr)->swap;
2691 		break;
2692 	case IB_WR_ATOMIC_FETCH_AND_ADD:
2693 		wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2694 		wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2695 		break;
2696 	default:
2697 		return -EINVAL;
2698 	}
2699 	wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2700 	wqe->atomic.r_key = atomic_wr(wr)->rkey;
2701 	if (wr->send_flags & IB_SEND_SIGNALED)
2702 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2703 	if (wr->send_flags & IB_SEND_FENCE)
2704 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2705 	if (wr->send_flags & IB_SEND_SOLICITED)
2706 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2707 	return 0;
2708 }
2709 
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2710 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2711 				 struct bnxt_qplib_swqe *wqe)
2712 {
2713 	wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2714 	wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2715 
2716 	if (wr->send_flags & IB_SEND_SIGNALED)
2717 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2718 	if (wr->send_flags & IB_SEND_SOLICITED)
2719 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2720 
2721 	return 0;
2722 }
2723 
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)2724 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2725 				 struct bnxt_qplib_swqe *wqe)
2726 {
2727 	struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2728 	struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2729 	int access = wr->access;
2730 
2731 	wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2732 	wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2733 	wqe->frmr.page_list = mr->pages;
2734 	wqe->frmr.page_list_len = mr->npages;
2735 	wqe->frmr.levels = qplib_frpl->hwq.level;
2736 	wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2737 
2738 	if (wr->wr.send_flags & IB_SEND_SIGNALED)
2739 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2740 
2741 	if (access & IB_ACCESS_LOCAL_WRITE)
2742 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2743 	if (access & IB_ACCESS_REMOTE_READ)
2744 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2745 	if (access & IB_ACCESS_REMOTE_WRITE)
2746 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2747 	if (access & IB_ACCESS_REMOTE_ATOMIC)
2748 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2749 	if (access & IB_ACCESS_MW_BIND)
2750 		wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2751 
2752 	wqe->frmr.l_key = wr->key;
2753 	wqe->frmr.length = wr->mr->length;
2754 	wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2755 	wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2756 	wqe->frmr.va = wr->mr->iova;
2757 	return 0;
2758 }
2759 
bnxt_re_copy_inline_data(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2760 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2761 				    const struct ib_send_wr *wr,
2762 				    struct bnxt_qplib_swqe *wqe)
2763 {
2764 	/*  Copy the inline data to the data  field */
2765 	u8 *in_data;
2766 	u32 i, sge_len;
2767 	void *sge_addr;
2768 
2769 	in_data = wqe->inline_data;
2770 	for (i = 0; i < wr->num_sge; i++) {
2771 		sge_addr = (void *)(unsigned long)
2772 				wr->sg_list[i].addr;
2773 		sge_len = wr->sg_list[i].length;
2774 
2775 		if ((sge_len + wqe->inline_len) >
2776 		    BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2777 			ibdev_err(&rdev->ibdev,
2778 				  "Inline data size requested > supported value");
2779 			return -EINVAL;
2780 		}
2781 		sge_len = wr->sg_list[i].length;
2782 
2783 		memcpy(in_data, sge_addr, sge_len);
2784 		in_data += wr->sg_list[i].length;
2785 		wqe->inline_len += wr->sg_list[i].length;
2786 	}
2787 	return wqe->inline_len;
2788 }
2789 
bnxt_re_copy_wr_payload(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2790 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2791 				   const struct ib_send_wr *wr,
2792 				   struct bnxt_qplib_swqe *wqe)
2793 {
2794 	int payload_sz = 0;
2795 
2796 	if (wr->send_flags & IB_SEND_INLINE)
2797 		payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2798 	else
2799 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2800 					       wqe->num_sge);
2801 
2802 	return payload_sz;
2803 }
2804 
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)2805 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2806 {
2807 	if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2808 	     qp->ib_qp.qp_type == IB_QPT_GSI ||
2809 	     qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2810 	     qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2811 		int qp_attr_mask;
2812 		struct ib_qp_attr qp_attr;
2813 
2814 		qp_attr_mask = IB_QP_STATE;
2815 		qp_attr.qp_state = IB_QPS_RTS;
2816 		bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2817 		qp->qplib_qp.wqe_cnt = 0;
2818 	}
2819 }
2820 
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)2821 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2822 				       struct bnxt_re_qp *qp,
2823 				       const struct ib_send_wr *wr)
2824 {
2825 	int rc = 0, payload_sz = 0;
2826 	unsigned long flags;
2827 
2828 	spin_lock_irqsave(&qp->sq_lock, flags);
2829 	while (wr) {
2830 		struct bnxt_qplib_swqe wqe = {};
2831 
2832 		/* Common */
2833 		wqe.num_sge = wr->num_sge;
2834 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2835 			ibdev_err(&rdev->ibdev,
2836 				  "Limit exceeded for Send SGEs");
2837 			rc = -EINVAL;
2838 			goto bad;
2839 		}
2840 
2841 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2842 		if (payload_sz < 0) {
2843 			rc = -EINVAL;
2844 			goto bad;
2845 		}
2846 		wqe.wr_id = wr->wr_id;
2847 
2848 		wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2849 
2850 		rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2851 		if (!rc)
2852 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2853 bad:
2854 		if (rc) {
2855 			ibdev_err(&rdev->ibdev,
2856 				  "Post send failed opcode = %#x rc = %d",
2857 				  wr->opcode, rc);
2858 			break;
2859 		}
2860 		wr = wr->next;
2861 	}
2862 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2863 	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2864 		bnxt_ud_qp_hw_stall_workaround(qp);
2865 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2866 	return rc;
2867 }
2868 
bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe * wqe)2869 static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2870 {
2871 	/* Need unconditional fence for non-wire memory opcode
2872 	 * to work as expected.
2873 	 */
2874 	if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2875 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2876 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2877 	    wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2878 		wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2879 }
2880 
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2881 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2882 		      const struct ib_send_wr **bad_wr)
2883 {
2884 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2885 	struct bnxt_qplib_swqe wqe;
2886 	int rc = 0, payload_sz = 0;
2887 	unsigned long flags;
2888 
2889 	spin_lock_irqsave(&qp->sq_lock, flags);
2890 	while (wr) {
2891 		/* House keeping */
2892 		memset(&wqe, 0, sizeof(wqe));
2893 
2894 		/* Common */
2895 		wqe.num_sge = wr->num_sge;
2896 		if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2897 			ibdev_err(&qp->rdev->ibdev,
2898 				  "Limit exceeded for Send SGEs");
2899 			rc = -EINVAL;
2900 			goto bad;
2901 		}
2902 
2903 		payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2904 		if (payload_sz < 0) {
2905 			rc = -EINVAL;
2906 			goto bad;
2907 		}
2908 		wqe.wr_id = wr->wr_id;
2909 
2910 		switch (wr->opcode) {
2911 		case IB_WR_SEND:
2912 		case IB_WR_SEND_WITH_IMM:
2913 			if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2914 				rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2915 							       payload_sz);
2916 				if (rc)
2917 					goto bad;
2918 				wqe.rawqp1.lflags |=
2919 					SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2920 			}
2921 			switch (wr->send_flags) {
2922 			case IB_SEND_IP_CSUM:
2923 				wqe.rawqp1.lflags |=
2924 					SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2925 				break;
2926 			default:
2927 				break;
2928 			}
2929 			fallthrough;
2930 		case IB_WR_SEND_WITH_INV:
2931 			rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2932 			break;
2933 		case IB_WR_RDMA_WRITE:
2934 		case IB_WR_RDMA_WRITE_WITH_IMM:
2935 		case IB_WR_RDMA_READ:
2936 			rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2937 			break;
2938 		case IB_WR_ATOMIC_CMP_AND_SWP:
2939 		case IB_WR_ATOMIC_FETCH_AND_ADD:
2940 			rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2941 			break;
2942 		case IB_WR_RDMA_READ_WITH_INV:
2943 			ibdev_err(&qp->rdev->ibdev,
2944 				  "RDMA Read with Invalidate is not supported");
2945 			rc = -EINVAL;
2946 			goto bad;
2947 		case IB_WR_LOCAL_INV:
2948 			rc = bnxt_re_build_inv_wqe(wr, &wqe);
2949 			break;
2950 		case IB_WR_REG_MR:
2951 			rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2952 			break;
2953 		default:
2954 			/* Unsupported WRs */
2955 			ibdev_err(&qp->rdev->ibdev,
2956 				  "WR (%#x) is not supported", wr->opcode);
2957 			rc = -EINVAL;
2958 			goto bad;
2959 		}
2960 		if (!rc) {
2961 			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2962 				bnxt_re_legacy_set_uc_fence(&wqe);
2963 			rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2964 		}
2965 bad:
2966 		if (rc) {
2967 			ibdev_err(&qp->rdev->ibdev,
2968 				  "post_send failed op:%#x qps = %#x rc = %d\n",
2969 				  wr->opcode, qp->qplib_qp.state, rc);
2970 			*bad_wr = wr;
2971 			break;
2972 		}
2973 		wr = wr->next;
2974 	}
2975 	bnxt_qplib_post_send_db(&qp->qplib_qp);
2976 	if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2977 		bnxt_ud_qp_hw_stall_workaround(qp);
2978 	spin_unlock_irqrestore(&qp->sq_lock, flags);
2979 
2980 	return rc;
2981 }
2982 
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_recv_wr * wr)2983 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2984 				       struct bnxt_re_qp *qp,
2985 				       const struct ib_recv_wr *wr)
2986 {
2987 	struct bnxt_qplib_swqe wqe;
2988 	int rc = 0;
2989 
2990 	while (wr) {
2991 		/* House keeping */
2992 		memset(&wqe, 0, sizeof(wqe));
2993 
2994 		/* Common */
2995 		wqe.num_sge = wr->num_sge;
2996 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2997 			ibdev_err(&rdev->ibdev,
2998 				  "Limit exceeded for Receive SGEs");
2999 			rc = -EINVAL;
3000 			break;
3001 		}
3002 		bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
3003 		wqe.wr_id = wr->wr_id;
3004 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3005 
3006 		rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3007 		if (rc)
3008 			break;
3009 
3010 		wr = wr->next;
3011 	}
3012 	if (!rc)
3013 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
3014 	return rc;
3015 }
3016 
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3017 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
3018 		      const struct ib_recv_wr **bad_wr)
3019 {
3020 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
3021 	struct bnxt_qplib_swqe wqe;
3022 	int rc = 0, payload_sz = 0;
3023 	unsigned long flags;
3024 	u32 count = 0;
3025 
3026 	spin_lock_irqsave(&qp->rq_lock, flags);
3027 	while (wr) {
3028 		/* House keeping */
3029 		memset(&wqe, 0, sizeof(wqe));
3030 
3031 		/* Common */
3032 		wqe.num_sge = wr->num_sge;
3033 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3034 			ibdev_err(&qp->rdev->ibdev,
3035 				  "Limit exceeded for Receive SGEs");
3036 			rc = -EINVAL;
3037 			*bad_wr = wr;
3038 			break;
3039 		}
3040 
3041 		payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
3042 					       wr->num_sge);
3043 		wqe.wr_id = wr->wr_id;
3044 		wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
3045 
3046 		if (ib_qp->qp_type == IB_QPT_GSI &&
3047 		    qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
3048 			rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
3049 							      payload_sz);
3050 		if (!rc)
3051 			rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3052 		if (rc) {
3053 			*bad_wr = wr;
3054 			break;
3055 		}
3056 
3057 		/* Ring DB if the RQEs posted reaches a threshold value */
3058 		if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
3059 			bnxt_qplib_post_recv_db(&qp->qplib_qp);
3060 			count = 0;
3061 		}
3062 
3063 		wr = wr->next;
3064 	}
3065 
3066 	if (count)
3067 		bnxt_qplib_post_recv_db(&qp->qplib_qp);
3068 
3069 	spin_unlock_irqrestore(&qp->rq_lock, flags);
3070 
3071 	return rc;
3072 }
3073 
bnxt_re_get_nq(struct bnxt_re_dev * rdev)3074 static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
3075 {
3076 	int min, indx;
3077 
3078 	mutex_lock(&rdev->nqr->load_lock);
3079 	for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) {
3080 		if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load)
3081 			min = indx;
3082 	}
3083 	rdev->nqr->nq[min].load++;
3084 	mutex_unlock(&rdev->nqr->load_lock);
3085 
3086 	return &rdev->nqr->nq[min];
3087 }
3088 
bnxt_re_put_nq(struct bnxt_re_dev * rdev,struct bnxt_qplib_nq * nq)3089 static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
3090 {
3091 	mutex_lock(&rdev->nqr->load_lock);
3092 	nq->load--;
3093 	mutex_unlock(&rdev->nqr->load_lock);
3094 }
3095 
3096 /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)3097 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
3098 {
3099 	struct bnxt_qplib_chip_ctx *cctx;
3100 	struct bnxt_qplib_nq *nq;
3101 	struct bnxt_re_dev *rdev;
3102 	struct bnxt_re_cq *cq;
3103 
3104 	cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3105 	rdev = cq->rdev;
3106 	nq = cq->qplib_cq.nq;
3107 	cctx = rdev->chip_ctx;
3108 
3109 	if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3110 		free_page((unsigned long)cq->uctx_cq_page);
3111 		hash_del(&cq->hash_entry);
3112 	}
3113 	bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3114 
3115 	bnxt_re_put_nq(rdev, nq);
3116 	ib_umem_release(cq->umem);
3117 
3118 	atomic_dec(&rdev->stats.res.cq_count);
3119 	kfree(cq->cql);
3120 	return 0;
3121 }
3122 
bnxt_re_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)3123 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
3124 		      struct uverbs_attr_bundle *attrs)
3125 {
3126 	struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3127 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
3128 	struct ib_udata *udata = &attrs->driver_udata;
3129 	struct bnxt_re_ucontext *uctx =
3130 		rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3131 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
3132 	struct bnxt_qplib_chip_ctx *cctx;
3133 	int cqe = attr->cqe;
3134 	int rc, entries;
3135 	u32 active_cqs;
3136 
3137 	if (attr->flags)
3138 		return -EOPNOTSUPP;
3139 
3140 	/* Validate CQ fields */
3141 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3142 		ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
3143 		return -EINVAL;
3144 	}
3145 
3146 	cq->rdev = rdev;
3147 	cctx = rdev->chip_ctx;
3148 	cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
3149 
3150 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3151 	if (entries > dev_attr->max_cq_wqes + 1)
3152 		entries = dev_attr->max_cq_wqes + 1;
3153 
3154 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3155 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3156 	if (udata) {
3157 		struct bnxt_re_cq_req req;
3158 		if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3159 			rc = -EFAULT;
3160 			goto fail;
3161 		}
3162 
3163 		cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3164 				       entries * sizeof(struct cq_base),
3165 				       IB_ACCESS_LOCAL_WRITE);
3166 		if (IS_ERR(cq->umem)) {
3167 			rc = PTR_ERR(cq->umem);
3168 			goto fail;
3169 		}
3170 		cq->qplib_cq.sg_info.umem = cq->umem;
3171 		cq->qplib_cq.dpi = &uctx->dpi;
3172 	} else {
3173 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3174 		cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3175 				  GFP_KERNEL);
3176 		if (!cq->cql) {
3177 			rc = -ENOMEM;
3178 			goto fail;
3179 		}
3180 
3181 		cq->qplib_cq.dpi = &rdev->dpi_privileged;
3182 	}
3183 	cq->qplib_cq.max_wqe = entries;
3184 	cq->qplib_cq.coalescing = &rdev->cq_coalescing;
3185 	cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
3186 	cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id;
3187 
3188 	rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3189 	if (rc) {
3190 		ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3191 		goto fail;
3192 	}
3193 
3194 	cq->ib_cq.cqe = entries;
3195 	cq->cq_period = cq->qplib_cq.period;
3196 
3197 	active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3198 	if (active_cqs > rdev->stats.res.cq_watermark)
3199 		rdev->stats.res.cq_watermark = active_cqs;
3200 	spin_lock_init(&cq->cq_lock);
3201 
3202 	if (udata) {
3203 		struct bnxt_re_cq_resp resp = {};
3204 
3205 		if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3206 			hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3207 			/* Allocate a page */
3208 			cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3209 			if (!cq->uctx_cq_page) {
3210 				rc = -ENOMEM;
3211 				goto c2fail;
3212 			}
3213 			resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3214 		}
3215 		resp.cqid = cq->qplib_cq.id;
3216 		resp.tail = cq->qplib_cq.hwq.cons;
3217 		resp.phase = cq->qplib_cq.period;
3218 		resp.rsvd = 0;
3219 		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3220 		if (rc) {
3221 			ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3222 			bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3223 			goto free_mem;
3224 		}
3225 	}
3226 
3227 	return 0;
3228 
3229 free_mem:
3230 	free_page((unsigned long)cq->uctx_cq_page);
3231 c2fail:
3232 	ib_umem_release(cq->umem);
3233 fail:
3234 	kfree(cq->cql);
3235 	return rc;
3236 }
3237 
bnxt_re_resize_cq_complete(struct bnxt_re_cq * cq)3238 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3239 {
3240 	struct bnxt_re_dev *rdev = cq->rdev;
3241 
3242 	bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3243 
3244 	cq->qplib_cq.max_wqe = cq->resize_cqe;
3245 	if (cq->resize_umem) {
3246 		ib_umem_release(cq->umem);
3247 		cq->umem = cq->resize_umem;
3248 		cq->resize_umem = NULL;
3249 		cq->resize_cqe = 0;
3250 	}
3251 }
3252 
bnxt_re_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)3253 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3254 {
3255 	struct bnxt_qplib_sg_info sg_info = {};
3256 	struct bnxt_qplib_dpi *orig_dpi = NULL;
3257 	struct bnxt_qplib_dev_attr *dev_attr;
3258 	struct bnxt_re_ucontext *uctx = NULL;
3259 	struct bnxt_re_resize_cq_req req;
3260 	struct bnxt_re_dev *rdev;
3261 	struct bnxt_re_cq *cq;
3262 	int rc, entries;
3263 
3264 	cq =  container_of(ibcq, struct bnxt_re_cq, ib_cq);
3265 	rdev = cq->rdev;
3266 	dev_attr = rdev->dev_attr;
3267 	if (!ibcq->uobject) {
3268 		ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3269 		return -EOPNOTSUPP;
3270 	}
3271 
3272 	if (cq->resize_umem) {
3273 		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3274 			  cq->qplib_cq.id);
3275 		return -EBUSY;
3276 	}
3277 
3278 	/* Check the requested cq depth out of supported depth */
3279 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3280 		ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3281 			  cq->qplib_cq.id, cqe);
3282 		return -EINVAL;
3283 	}
3284 
3285 	uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3286 	entries = bnxt_re_init_depth(cqe + 1, uctx);
3287 	if (entries > dev_attr->max_cq_wqes + 1)
3288 		entries = dev_attr->max_cq_wqes + 1;
3289 
3290 	/* uverbs consumer */
3291 	if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3292 		rc = -EFAULT;
3293 		goto fail;
3294 	}
3295 
3296 	cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3297 				      entries * sizeof(struct cq_base),
3298 				      IB_ACCESS_LOCAL_WRITE);
3299 	if (IS_ERR(cq->resize_umem)) {
3300 		rc = PTR_ERR(cq->resize_umem);
3301 		ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
3302 			  __func__, cq->resize_umem);
3303 		cq->resize_umem = NULL;
3304 		goto fail;
3305 	}
3306 	cq->resize_cqe = entries;
3307 	memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3308 	orig_dpi = cq->qplib_cq.dpi;
3309 
3310 	cq->qplib_cq.sg_info.umem = cq->resize_umem;
3311 	cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3312 	cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3313 	cq->qplib_cq.dpi = &uctx->dpi;
3314 
3315 	rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3316 	if (rc) {
3317 		ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3318 			  cq->qplib_cq.id);
3319 		goto fail;
3320 	}
3321 
3322 	cq->ib_cq.cqe = cq->resize_cqe;
3323 	atomic_inc(&rdev->stats.res.resize_count);
3324 
3325 	return 0;
3326 
3327 fail:
3328 	if (cq->resize_umem) {
3329 		ib_umem_release(cq->resize_umem);
3330 		cq->resize_umem = NULL;
3331 		cq->resize_cqe = 0;
3332 		memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3333 		cq->qplib_cq.dpi = orig_dpi;
3334 	}
3335 	return rc;
3336 }
3337 
__req_to_ib_wc_status(u8 qstatus)3338 static u8 __req_to_ib_wc_status(u8 qstatus)
3339 {
3340 	switch (qstatus) {
3341 	case CQ_REQ_STATUS_OK:
3342 		return IB_WC_SUCCESS;
3343 	case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3344 		return IB_WC_BAD_RESP_ERR;
3345 	case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3346 		return IB_WC_LOC_LEN_ERR;
3347 	case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3348 		return IB_WC_LOC_QP_OP_ERR;
3349 	case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3350 		return IB_WC_LOC_PROT_ERR;
3351 	case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3352 		return IB_WC_GENERAL_ERR;
3353 	case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3354 		return IB_WC_REM_INV_REQ_ERR;
3355 	case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3356 		return IB_WC_REM_ACCESS_ERR;
3357 	case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3358 		return IB_WC_REM_OP_ERR;
3359 	case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3360 		return IB_WC_RNR_RETRY_EXC_ERR;
3361 	case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3362 		return IB_WC_RETRY_EXC_ERR;
3363 	case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3364 		return IB_WC_WR_FLUSH_ERR;
3365 	default:
3366 		return IB_WC_GENERAL_ERR;
3367 	}
3368 	return 0;
3369 }
3370 
__rawqp1_to_ib_wc_status(u8 qstatus)3371 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3372 {
3373 	switch (qstatus) {
3374 	case CQ_RES_RAWETH_QP1_STATUS_OK:
3375 		return IB_WC_SUCCESS;
3376 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3377 		return IB_WC_LOC_ACCESS_ERR;
3378 	case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3379 		return IB_WC_LOC_LEN_ERR;
3380 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3381 		return IB_WC_LOC_PROT_ERR;
3382 	case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3383 		return IB_WC_LOC_QP_OP_ERR;
3384 	case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3385 		return IB_WC_GENERAL_ERR;
3386 	case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3387 		return IB_WC_WR_FLUSH_ERR;
3388 	case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3389 		return IB_WC_WR_FLUSH_ERR;
3390 	default:
3391 		return IB_WC_GENERAL_ERR;
3392 	}
3393 }
3394 
__rc_to_ib_wc_status(u8 qstatus)3395 static u8 __rc_to_ib_wc_status(u8 qstatus)
3396 {
3397 	switch (qstatus) {
3398 	case CQ_RES_RC_STATUS_OK:
3399 		return IB_WC_SUCCESS;
3400 	case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3401 		return IB_WC_LOC_ACCESS_ERR;
3402 	case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3403 		return IB_WC_LOC_LEN_ERR;
3404 	case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3405 		return IB_WC_LOC_PROT_ERR;
3406 	case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3407 		return IB_WC_LOC_QP_OP_ERR;
3408 	case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3409 		return IB_WC_GENERAL_ERR;
3410 	case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3411 		return IB_WC_REM_INV_REQ_ERR;
3412 	case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3413 		return IB_WC_WR_FLUSH_ERR;
3414 	case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3415 		return IB_WC_WR_FLUSH_ERR;
3416 	default:
3417 		return IB_WC_GENERAL_ERR;
3418 	}
3419 }
3420 
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3421 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3422 {
3423 	switch (cqe->type) {
3424 	case BNXT_QPLIB_SWQE_TYPE_SEND:
3425 		wc->opcode = IB_WC_SEND;
3426 		break;
3427 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3428 		wc->opcode = IB_WC_SEND;
3429 		wc->wc_flags |= IB_WC_WITH_IMM;
3430 		break;
3431 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3432 		wc->opcode = IB_WC_SEND;
3433 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3434 		break;
3435 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3436 		wc->opcode = IB_WC_RDMA_WRITE;
3437 		break;
3438 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3439 		wc->opcode = IB_WC_RDMA_WRITE;
3440 		wc->wc_flags |= IB_WC_WITH_IMM;
3441 		break;
3442 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3443 		wc->opcode = IB_WC_RDMA_READ;
3444 		break;
3445 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3446 		wc->opcode = IB_WC_COMP_SWAP;
3447 		break;
3448 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3449 		wc->opcode = IB_WC_FETCH_ADD;
3450 		break;
3451 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3452 		wc->opcode = IB_WC_LOCAL_INV;
3453 		break;
3454 	case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3455 		wc->opcode = IB_WC_REG_MR;
3456 		break;
3457 	default:
3458 		wc->opcode = IB_WC_SEND;
3459 		break;
3460 	}
3461 
3462 	wc->status = __req_to_ib_wc_status(cqe->status);
3463 }
3464 
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)3465 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3466 				     u16 raweth_qp1_flags2)
3467 {
3468 	bool is_ipv6 = false, is_ipv4 = false;
3469 
3470 	/* raweth_qp1_flags Bit 9-6 indicates itype */
3471 	if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3472 	    != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3473 		return -1;
3474 
3475 	if (raweth_qp1_flags2 &
3476 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3477 	    raweth_qp1_flags2 &
3478 	    CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3479 		/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3480 		(raweth_qp1_flags2 &
3481 		 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3482 			(is_ipv6 = true) : (is_ipv4 = true);
3483 		return ((is_ipv6) ?
3484 			 BNXT_RE_ROCEV2_IPV6_PACKET :
3485 			 BNXT_RE_ROCEV2_IPV4_PACKET);
3486 	} else {
3487 		return BNXT_RE_ROCE_V1_PACKET;
3488 	}
3489 }
3490 
bnxt_re_to_ib_nw_type(int nw_type)3491 static int bnxt_re_to_ib_nw_type(int nw_type)
3492 {
3493 	u8 nw_hdr_type = 0xFF;
3494 
3495 	switch (nw_type) {
3496 	case BNXT_RE_ROCE_V1_PACKET:
3497 		nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3498 		break;
3499 	case BNXT_RE_ROCEV2_IPV4_PACKET:
3500 		nw_hdr_type = RDMA_NETWORK_IPV4;
3501 		break;
3502 	case BNXT_RE_ROCEV2_IPV6_PACKET:
3503 		nw_hdr_type = RDMA_NETWORK_IPV6;
3504 		break;
3505 	}
3506 	return nw_hdr_type;
3507 }
3508 
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)3509 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3510 				       void *rq_hdr_buf)
3511 {
3512 	u8 *tmp_buf = NULL;
3513 	struct ethhdr *eth_hdr;
3514 	u16 eth_type;
3515 	bool rc = false;
3516 
3517 	tmp_buf = (u8 *)rq_hdr_buf;
3518 	/*
3519 	 * If dest mac is not same as I/F mac, this could be a
3520 	 * loopback address or multicast address, check whether
3521 	 * it is a loopback packet
3522 	 */
3523 	if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3524 		tmp_buf += 4;
3525 		/* Check the  ether type */
3526 		eth_hdr = (struct ethhdr *)tmp_buf;
3527 		eth_type = ntohs(eth_hdr->h_proto);
3528 		switch (eth_type) {
3529 		case ETH_P_IBOE:
3530 			rc = true;
3531 			break;
3532 		case ETH_P_IP:
3533 		case ETH_P_IPV6: {
3534 			u32 len;
3535 			struct udphdr *udp_hdr;
3536 
3537 			len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3538 						      sizeof(struct ipv6hdr));
3539 			tmp_buf += sizeof(struct ethhdr) + len;
3540 			udp_hdr = (struct udphdr *)tmp_buf;
3541 			if (ntohs(udp_hdr->dest) ==
3542 				    ROCE_V2_UDP_DPORT)
3543 				rc = true;
3544 			break;
3545 			}
3546 		default:
3547 			break;
3548 		}
3549 	}
3550 
3551 	return rc;
3552 }
3553 
bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)3554 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3555 					 struct bnxt_qplib_cqe *cqe)
3556 {
3557 	struct bnxt_re_dev *rdev = gsi_qp->rdev;
3558 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3559 	struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3560 	dma_addr_t shrq_hdr_buf_map;
3561 	struct ib_sge s_sge[2] = {};
3562 	struct ib_sge r_sge[2] = {};
3563 	struct bnxt_re_ah *gsi_sah;
3564 	struct ib_recv_wr rwr = {};
3565 	dma_addr_t rq_hdr_buf_map;
3566 	struct ib_ud_wr udwr = {};
3567 	struct ib_send_wr *swr;
3568 	u32 skip_bytes = 0;
3569 	int pkt_type = 0;
3570 	void *rq_hdr_buf;
3571 	u32 offset = 0;
3572 	u32 tbl_idx;
3573 	int rc;
3574 
3575 	swr = &udwr.wr;
3576 	tbl_idx = cqe->wr_id;
3577 
3578 	rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3579 			(tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3580 	rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3581 							  tbl_idx);
3582 
3583 	/* Shadow QP header buffer */
3584 	shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3585 							    tbl_idx);
3586 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3587 
3588 	/* Store this cqe */
3589 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3590 	sqp_entry->qp1_qp = gsi_qp;
3591 
3592 	/* Find packet type from the cqe */
3593 
3594 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3595 					     cqe->raweth_qp1_flags2);
3596 	if (pkt_type < 0) {
3597 		ibdev_err(&rdev->ibdev, "Invalid packet\n");
3598 		return -EINVAL;
3599 	}
3600 
3601 	/* Adjust the offset for the user buffer and post in the rq */
3602 
3603 	if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3604 		offset = 20;
3605 
3606 	/*
3607 	 * QP1 loopback packet has 4 bytes of internal header before
3608 	 * ether header. Skip these four bytes.
3609 	 */
3610 	if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3611 		skip_bytes = 4;
3612 
3613 	/* First send SGE . Skip the ether header*/
3614 	s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3615 			+ skip_bytes;
3616 	s_sge[0].lkey = 0xFFFFFFFF;
3617 	s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3618 				BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3619 
3620 	/* Second Send SGE */
3621 	s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3622 			BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3623 	if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3624 		s_sge[1].addr += 8;
3625 	s_sge[1].lkey = 0xFFFFFFFF;
3626 	s_sge[1].length = 256;
3627 
3628 	/* First recv SGE */
3629 
3630 	r_sge[0].addr = shrq_hdr_buf_map;
3631 	r_sge[0].lkey = 0xFFFFFFFF;
3632 	r_sge[0].length = 40;
3633 
3634 	r_sge[1].addr = sqp_entry->sge.addr + offset;
3635 	r_sge[1].lkey = sqp_entry->sge.lkey;
3636 	r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3637 
3638 	/* Create receive work request */
3639 	rwr.num_sge = 2;
3640 	rwr.sg_list = r_sge;
3641 	rwr.wr_id = tbl_idx;
3642 	rwr.next = NULL;
3643 
3644 	rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3645 	if (rc) {
3646 		ibdev_err(&rdev->ibdev,
3647 			  "Failed to post Rx buffers to shadow QP");
3648 		return -ENOMEM;
3649 	}
3650 
3651 	swr->num_sge = 2;
3652 	swr->sg_list = s_sge;
3653 	swr->wr_id = tbl_idx;
3654 	swr->opcode = IB_WR_SEND;
3655 	swr->next = NULL;
3656 	gsi_sah = rdev->gsi_ctx.gsi_sah;
3657 	udwr.ah = &gsi_sah->ib_ah;
3658 	udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3659 	udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3660 
3661 	/* post data received  in the send queue */
3662 	return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3663 }
3664 
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3665 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3666 					  struct bnxt_qplib_cqe *cqe)
3667 {
3668 	wc->opcode = IB_WC_RECV;
3669 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3670 	wc->wc_flags |= IB_WC_GRH;
3671 }
3672 
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev * rdev,u16 vlan_id)3673 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3674 					u16 vlan_id)
3675 {
3676 	/*
3677 	 * Check if the vlan is configured in the host.  If not configured, it
3678 	 * can be a transparent VLAN. So dont report the vlan id.
3679 	 */
3680 	if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3681 				      htons(ETH_P_8021Q), vlan_id))
3682 		return false;
3683 	return true;
3684 }
3685 
bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)3686 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3687 				u16 *vid, u8 *sl)
3688 {
3689 	bool ret = false;
3690 	u32 metadata;
3691 	u16 tpid;
3692 
3693 	metadata = orig_cqe->raweth_qp1_metadata;
3694 	if (orig_cqe->raweth_qp1_flags2 &
3695 		CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3696 		tpid = ((metadata &
3697 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3698 			 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3699 		if (tpid == ETH_P_8021Q) {
3700 			*vid = metadata &
3701 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3702 			*sl = (metadata &
3703 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3704 			       CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3705 			ret = true;
3706 		}
3707 	}
3708 
3709 	return ret;
3710 }
3711 
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3712 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3713 				      struct bnxt_qplib_cqe *cqe)
3714 {
3715 	wc->opcode = IB_WC_RECV;
3716 	wc->status = __rc_to_ib_wc_status(cqe->status);
3717 
3718 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3719 		wc->wc_flags |= IB_WC_WITH_IMM;
3720 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3721 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3722 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3723 	    (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3724 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3725 }
3726 
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3727 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3728 					     struct ib_wc *wc,
3729 					     struct bnxt_qplib_cqe *cqe)
3730 {
3731 	struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3732 	struct bnxt_re_qp *gsi_qp = NULL;
3733 	struct bnxt_qplib_cqe *orig_cqe = NULL;
3734 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3735 	int nw_type;
3736 	u32 tbl_idx;
3737 	u16 vlan_id;
3738 	u8 sl;
3739 
3740 	tbl_idx = cqe->wr_id;
3741 
3742 	sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3743 	gsi_qp = sqp_entry->qp1_qp;
3744 	orig_cqe = &sqp_entry->cqe;
3745 
3746 	wc->wr_id = sqp_entry->wrid;
3747 	wc->byte_len = orig_cqe->length;
3748 	wc->qp = &gsi_qp->ib_qp;
3749 
3750 	wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
3751 	wc->src_qp = orig_cqe->src_qp;
3752 	memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3753 	if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3754 		if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3755 			wc->vlan_id = vlan_id;
3756 			wc->sl = sl;
3757 			wc->wc_flags |= IB_WC_WITH_VLAN;
3758 		}
3759 	}
3760 	wc->port_num = 1;
3761 	wc->vendor_err = orig_cqe->status;
3762 
3763 	wc->opcode = IB_WC_RECV;
3764 	wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3765 	wc->wc_flags |= IB_WC_GRH;
3766 
3767 	nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3768 					    orig_cqe->raweth_qp1_flags2);
3769 	if (nw_type >= 0) {
3770 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3771 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3772 	}
3773 }
3774 
bnxt_re_process_res_ud_wc(struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3775 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3776 				      struct ib_wc *wc,
3777 				      struct bnxt_qplib_cqe *cqe)
3778 {
3779 	struct bnxt_re_dev *rdev;
3780 	u16 vlan_id = 0;
3781 	u8 nw_type;
3782 
3783 	rdev = qp->rdev;
3784 	wc->opcode = IB_WC_RECV;
3785 	wc->status = __rc_to_ib_wc_status(cqe->status);
3786 
3787 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3788 		wc->wc_flags |= IB_WC_WITH_IMM;
3789 	/* report only on GSI QP for Thor */
3790 	if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3791 		wc->wc_flags |= IB_WC_GRH;
3792 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
3793 		wc->wc_flags |= IB_WC_WITH_SMAC;
3794 		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3795 			vlan_id = (cqe->cfa_meta & 0xFFF);
3796 		}
3797 		/* Mark only if vlan_id is non zero */
3798 		if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3799 			wc->vlan_id = vlan_id;
3800 			wc->wc_flags |= IB_WC_WITH_VLAN;
3801 		}
3802 		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3803 			   CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3804 		wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3805 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3806 	}
3807 
3808 }
3809 
send_phantom_wqe(struct bnxt_re_qp * qp)3810 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3811 {
3812 	struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3813 	unsigned long flags;
3814 	int rc;
3815 
3816 	spin_lock_irqsave(&qp->sq_lock, flags);
3817 
3818 	rc = bnxt_re_bind_fence_mw(lib_qp);
3819 	if (!rc) {
3820 		lib_qp->sq.phantom_wqe_cnt++;
3821 		ibdev_dbg(&qp->rdev->ibdev,
3822 			  "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3823 			  lib_qp->id, lib_qp->sq.hwq.prod,
3824 			  HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3825 			  lib_qp->sq.phantom_wqe_cnt);
3826 	}
3827 
3828 	spin_unlock_irqrestore(&qp->sq_lock, flags);
3829 	return rc;
3830 }
3831 
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)3832 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3833 {
3834 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3835 	struct bnxt_re_qp *qp, *sh_qp;
3836 	struct bnxt_qplib_cqe *cqe;
3837 	int i, ncqe, budget;
3838 	struct bnxt_qplib_q *sq;
3839 	struct bnxt_qplib_qp *lib_qp;
3840 	u32 tbl_idx;
3841 	struct bnxt_re_sqp_entries *sqp_entry = NULL;
3842 	unsigned long flags;
3843 
3844 	/* User CQ; the only processing we do is to
3845 	 * complete any pending CQ resize operation.
3846 	 */
3847 	if (cq->umem) {
3848 		if (cq->resize_umem)
3849 			bnxt_re_resize_cq_complete(cq);
3850 		return 0;
3851 	}
3852 
3853 	spin_lock_irqsave(&cq->cq_lock, flags);
3854 	budget = min_t(u32, num_entries, cq->max_cql);
3855 	num_entries = budget;
3856 	if (!cq->cql) {
3857 		ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3858 		goto exit;
3859 	}
3860 	cqe = &cq->cql[0];
3861 	while (budget) {
3862 		lib_qp = NULL;
3863 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3864 		if (lib_qp) {
3865 			sq = &lib_qp->sq;
3866 			if (sq->send_phantom) {
3867 				qp = container_of(lib_qp,
3868 						  struct bnxt_re_qp, qplib_qp);
3869 				if (send_phantom_wqe(qp) == -ENOMEM)
3870 					ibdev_err(&cq->rdev->ibdev,
3871 						  "Phantom failed! Scheduled to send again\n");
3872 				else
3873 					sq->send_phantom = false;
3874 			}
3875 		}
3876 		if (ncqe < budget)
3877 			ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3878 							      cqe + ncqe,
3879 							      budget - ncqe);
3880 
3881 		if (!ncqe)
3882 			break;
3883 
3884 		for (i = 0; i < ncqe; i++, cqe++) {
3885 			/* Transcribe each qplib_wqe back to ib_wc */
3886 			memset(wc, 0, sizeof(*wc));
3887 
3888 			wc->wr_id = cqe->wr_id;
3889 			wc->byte_len = cqe->length;
3890 			qp = container_of
3891 				((struct bnxt_qplib_qp *)
3892 				 (unsigned long)(cqe->qp_handle),
3893 				 struct bnxt_re_qp, qplib_qp);
3894 			wc->qp = &qp->ib_qp;
3895 			if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3896 				wc->ex.imm_data = cpu_to_be32(cqe->immdata);
3897 			else
3898 				wc->ex.invalidate_rkey = cqe->invrkey;
3899 			wc->src_qp = cqe->src_qp;
3900 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
3901 			wc->port_num = 1;
3902 			wc->vendor_err = cqe->status;
3903 
3904 			switch (cqe->opcode) {
3905 			case CQ_BASE_CQE_TYPE_REQ:
3906 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3907 				if (sh_qp &&
3908 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3909 					/* Handle this completion with
3910 					 * the stored completion
3911 					 */
3912 					memset(wc, 0, sizeof(*wc));
3913 					continue;
3914 				}
3915 				bnxt_re_process_req_wc(wc, cqe);
3916 				break;
3917 			case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3918 				if (!cqe->status) {
3919 					int rc = 0;
3920 
3921 					rc = bnxt_re_process_raw_qp_pkt_rx
3922 								(qp, cqe);
3923 					if (!rc) {
3924 						memset(wc, 0, sizeof(*wc));
3925 						continue;
3926 					}
3927 					cqe->status = -1;
3928 				}
3929 				/* Errors need not be looped back.
3930 				 * But change the wr_id to the one
3931 				 * stored in the table
3932 				 */
3933 				tbl_idx = cqe->wr_id;
3934 				sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3935 				wc->wr_id = sqp_entry->wrid;
3936 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
3937 				break;
3938 			case CQ_BASE_CQE_TYPE_RES_RC:
3939 				bnxt_re_process_res_rc_wc(wc, cqe);
3940 				break;
3941 			case CQ_BASE_CQE_TYPE_RES_UD:
3942 				sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3943 				if (sh_qp &&
3944 				    qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3945 					/* Handle this completion with
3946 					 * the stored completion
3947 					 */
3948 					if (cqe->status) {
3949 						continue;
3950 					} else {
3951 						bnxt_re_process_res_shadow_qp_wc
3952 								(qp, wc, cqe);
3953 						break;
3954 					}
3955 				}
3956 				bnxt_re_process_res_ud_wc(qp, wc, cqe);
3957 				break;
3958 			default:
3959 				ibdev_err(&cq->rdev->ibdev,
3960 					  "POLL CQ : type 0x%x not handled",
3961 					  cqe->opcode);
3962 				continue;
3963 			}
3964 			wc++;
3965 			budget--;
3966 		}
3967 	}
3968 exit:
3969 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3970 	return num_entries - budget;
3971 }
3972 
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)3973 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3974 			  enum ib_cq_notify_flags ib_cqn_flags)
3975 {
3976 	struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3977 	int type = 0, rc = 0;
3978 	unsigned long flags;
3979 
3980 	spin_lock_irqsave(&cq->cq_lock, flags);
3981 	/* Trigger on the very next completion */
3982 	if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3983 		type = DBC_DBC_TYPE_CQ_ARMALL;
3984 	/* Trigger on the next solicited completion */
3985 	else if (ib_cqn_flags & IB_CQ_SOLICITED)
3986 		type = DBC_DBC_TYPE_CQ_ARMSE;
3987 
3988 	/* Poll to see if there are missed events */
3989 	if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3990 	    !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3991 		rc = 1;
3992 		goto exit;
3993 	}
3994 	bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3995 
3996 exit:
3997 	spin_unlock_irqrestore(&cq->cq_lock, flags);
3998 	return rc;
3999 }
4000 
4001 /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)4002 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
4003 {
4004 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4005 	struct bnxt_re_dev *rdev = pd->rdev;
4006 	struct bnxt_re_mr *mr;
4007 	u32 active_mrs;
4008 	int rc;
4009 
4010 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4011 	if (!mr)
4012 		return ERR_PTR(-ENOMEM);
4013 
4014 	mr->rdev = rdev;
4015 	mr->qplib_mr.pd = &pd->qplib_pd;
4016 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
4017 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4018 
4019 	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
4020 		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
4021 
4022 	/* Allocate and register 0 as the address */
4023 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4024 	if (rc)
4025 		goto fail;
4026 
4027 	mr->qplib_mr.hwq.level = PBL_LVL_MAX;
4028 	mr->qplib_mr.total_size = -1; /* Infinte length */
4029 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
4030 			       PAGE_SIZE);
4031 	if (rc)
4032 		goto fail_mr;
4033 
4034 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4035 	if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
4036 			       IB_ACCESS_REMOTE_ATOMIC))
4037 		mr->ib_mr.rkey = mr->ib_mr.lkey;
4038 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4039 	if (active_mrs > rdev->stats.res.mr_watermark)
4040 		rdev->stats.res.mr_watermark = active_mrs;
4041 
4042 	return &mr->ib_mr;
4043 
4044 fail_mr:
4045 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4046 fail:
4047 	kfree(mr);
4048 	return ERR_PTR(rc);
4049 }
4050 
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)4051 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
4052 {
4053 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
4054 	struct bnxt_re_dev *rdev = mr->rdev;
4055 	int rc;
4056 
4057 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4058 	if (rc) {
4059 		ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
4060 		return rc;
4061 	}
4062 
4063 	if (mr->pages) {
4064 		rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
4065 							&mr->qplib_frpl);
4066 		kfree(mr->pages);
4067 		mr->npages = 0;
4068 		mr->pages = NULL;
4069 	}
4070 	ib_umem_release(mr->ib_umem);
4071 
4072 	kfree(mr);
4073 	atomic_dec(&rdev->stats.res.mr_count);
4074 	return rc;
4075 }
4076 
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)4077 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
4078 {
4079 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
4080 
4081 	if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
4082 		return -ENOMEM;
4083 
4084 	mr->pages[mr->npages++] = addr;
4085 	return 0;
4086 }
4087 
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)4088 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
4089 		      unsigned int *sg_offset)
4090 {
4091 	struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
4092 
4093 	mr->npages = 0;
4094 	return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
4095 }
4096 
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg)4097 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
4098 			       u32 max_num_sg)
4099 {
4100 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4101 	struct bnxt_re_dev *rdev = pd->rdev;
4102 	struct bnxt_re_mr *mr = NULL;
4103 	u32 active_mrs;
4104 	int rc;
4105 
4106 	if (type != IB_MR_TYPE_MEM_REG) {
4107 		ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
4108 		return ERR_PTR(-EINVAL);
4109 	}
4110 	if (max_num_sg > MAX_PBL_LVL_1_PGS)
4111 		return ERR_PTR(-EINVAL);
4112 
4113 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4114 	if (!mr)
4115 		return ERR_PTR(-ENOMEM);
4116 
4117 	mr->rdev = rdev;
4118 	mr->qplib_mr.pd = &pd->qplib_pd;
4119 	mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
4120 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4121 
4122 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4123 	if (rc)
4124 		goto bail;
4125 
4126 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4127 	mr->ib_mr.rkey = mr->ib_mr.lkey;
4128 
4129 	mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
4130 	if (!mr->pages) {
4131 		rc = -ENOMEM;
4132 		goto fail;
4133 	}
4134 	rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4135 						 &mr->qplib_frpl, max_num_sg);
4136 	if (rc) {
4137 		ibdev_err(&rdev->ibdev,
4138 			  "Failed to allocate HW FR page list");
4139 		goto fail_mr;
4140 	}
4141 
4142 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4143 	if (active_mrs > rdev->stats.res.mr_watermark)
4144 		rdev->stats.res.mr_watermark = active_mrs;
4145 	return &mr->ib_mr;
4146 
4147 fail_mr:
4148 	kfree(mr->pages);
4149 fail:
4150 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4151 bail:
4152 	kfree(mr);
4153 	return ERR_PTR(rc);
4154 }
4155 
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)4156 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
4157 			       struct ib_udata *udata)
4158 {
4159 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4160 	struct bnxt_re_dev *rdev = pd->rdev;
4161 	struct bnxt_re_mw *mw;
4162 	u32 active_mws;
4163 	int rc;
4164 
4165 	mw = kzalloc(sizeof(*mw), GFP_KERNEL);
4166 	if (!mw)
4167 		return ERR_PTR(-ENOMEM);
4168 	mw->rdev = rdev;
4169 	mw->qplib_mw.pd = &pd->qplib_pd;
4170 
4171 	mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4172 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4173 			       CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4174 	rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4175 	if (rc) {
4176 		ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4177 		goto fail;
4178 	}
4179 	mw->ib_mw.rkey = mw->qplib_mw.rkey;
4180 
4181 	active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4182 	if (active_mws > rdev->stats.res.mw_watermark)
4183 		rdev->stats.res.mw_watermark = active_mws;
4184 	return &mw->ib_mw;
4185 
4186 fail:
4187 	kfree(mw);
4188 	return ERR_PTR(rc);
4189 }
4190 
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)4191 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4192 {
4193 	struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4194 	struct bnxt_re_dev *rdev = mw->rdev;
4195 	int rc;
4196 
4197 	rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4198 	if (rc) {
4199 		ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4200 		return rc;
4201 	}
4202 
4203 	kfree(mw);
4204 	atomic_dec(&rdev->stats.res.mw_count);
4205 	return rc;
4206 }
4207 
__bnxt_re_user_reg_mr(struct ib_pd * ib_pd,u64 length,u64 virt_addr,int mr_access_flags,struct ib_umem * umem)4208 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4209 					   int mr_access_flags, struct ib_umem *umem)
4210 {
4211 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4212 	struct bnxt_re_dev *rdev = pd->rdev;
4213 	unsigned long page_size;
4214 	struct bnxt_re_mr *mr;
4215 	int umem_pgs, rc;
4216 	u32 active_mrs;
4217 
4218 	if (length > BNXT_RE_MAX_MR_SIZE) {
4219 		ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4220 			  length, BNXT_RE_MAX_MR_SIZE);
4221 		return ERR_PTR(-ENOMEM);
4222 	}
4223 
4224 	page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4225 	if (!page_size) {
4226 		ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4227 		return ERR_PTR(-EINVAL);
4228 	}
4229 
4230 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4231 	if (!mr)
4232 		return ERR_PTR(-ENOMEM);
4233 
4234 	mr->rdev = rdev;
4235 	mr->qplib_mr.pd = &pd->qplib_pd;
4236 	mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
4237 	mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4238 
4239 	if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
4240 		rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4241 		if (rc) {
4242 			ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4243 			rc = -EIO;
4244 			goto free_mr;
4245 		}
4246 		/* The fixed portion of the rkey is the same as the lkey */
4247 		mr->ib_mr.rkey = mr->qplib_mr.rkey;
4248 	} else {
4249 		mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
4250 	}
4251 	mr->ib_umem = umem;
4252 	mr->qplib_mr.va = virt_addr;
4253 	mr->qplib_mr.total_size = length;
4254 
4255 	if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
4256 		bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
4257 
4258 	umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4259 	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4260 			       umem_pgs, page_size);
4261 	if (rc) {
4262 		ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4263 		rc = -EIO;
4264 		goto free_mrw;
4265 	}
4266 
4267 	mr->ib_mr.lkey = mr->qplib_mr.lkey;
4268 	mr->ib_mr.rkey = mr->qplib_mr.lkey;
4269 	active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4270 	if (active_mrs > rdev->stats.res.mr_watermark)
4271 		rdev->stats.res.mr_watermark = active_mrs;
4272 
4273 	return &mr->ib_mr;
4274 
4275 free_mrw:
4276 	bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4277 free_mr:
4278 	kfree(mr);
4279 	return ERR_PTR(rc);
4280 }
4281 
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_dmah * dmah,struct ib_udata * udata)4282 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4283 				  u64 virt_addr, int mr_access_flags,
4284 				  struct ib_dmah *dmah,
4285 				  struct ib_udata *udata)
4286 {
4287 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4288 	struct bnxt_re_dev *rdev = pd->rdev;
4289 	struct ib_umem *umem;
4290 	struct ib_mr *ib_mr;
4291 
4292 	if (dmah)
4293 		return ERR_PTR(-EOPNOTSUPP);
4294 
4295 	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4296 	if (IS_ERR(umem))
4297 		return ERR_CAST(umem);
4298 
4299 	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4300 	if (IS_ERR(ib_mr))
4301 		ib_umem_release(umem);
4302 	return ib_mr;
4303 }
4304 
bnxt_re_reg_user_mr_dmabuf(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int fd,int mr_access_flags,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)4305 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4306 					 u64 length, u64 virt_addr, int fd,
4307 					 int mr_access_flags,
4308 					 struct ib_dmah *dmah,
4309 					 struct uverbs_attr_bundle *attrs)
4310 {
4311 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4312 	struct bnxt_re_dev *rdev = pd->rdev;
4313 	struct ib_umem_dmabuf *umem_dmabuf;
4314 	struct ib_umem *umem;
4315 	struct ib_mr *ib_mr;
4316 
4317 	if (dmah)
4318 		return ERR_PTR(-EOPNOTSUPP);
4319 
4320 	umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4321 						fd, mr_access_flags);
4322 	if (IS_ERR(umem_dmabuf))
4323 		return ERR_CAST(umem_dmabuf);
4324 
4325 	umem = &umem_dmabuf->umem;
4326 
4327 	ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4328 	if (IS_ERR(ib_mr))
4329 		ib_umem_release(umem);
4330 	return ib_mr;
4331 }
4332 
bnxt_re_alloc_ucontext(struct ib_ucontext * ctx,struct ib_udata * udata)4333 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4334 {
4335 	struct ib_device *ibdev = ctx->device;
4336 	struct bnxt_re_ucontext *uctx =
4337 		container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4338 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4339 	struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
4340 	struct bnxt_re_user_mmap_entry *entry;
4341 	struct bnxt_re_uctx_resp resp = {};
4342 	struct bnxt_re_uctx_req ureq = {};
4343 	u32 chip_met_rev_num = 0;
4344 	int rc;
4345 
4346 	ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4347 
4348 	if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4349 		ibdev_dbg(ibdev, " is different from the device %d ",
4350 			  BNXT_RE_ABI_VERSION);
4351 		return -EPERM;
4352 	}
4353 
4354 	uctx->rdev = rdev;
4355 
4356 	uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4357 	if (!uctx->shpg) {
4358 		rc = -ENOMEM;
4359 		goto fail;
4360 	}
4361 	spin_lock_init(&uctx->sh_lock);
4362 
4363 	resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4364 	chip_met_rev_num = rdev->chip_ctx->chip_num;
4365 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4366 			     BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4367 	chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4368 			     BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4369 	resp.chip_id0 = chip_met_rev_num;
4370 	/*Temp, Use xa_alloc instead */
4371 	resp.dev_id = rdev->en_dev->pdev->devfn;
4372 	resp.max_qp = rdev->qplib_ctx.qpc_count;
4373 	resp.pg_size = PAGE_SIZE;
4374 	resp.cqe_sz = sizeof(struct cq_base);
4375 	resp.max_cqd = dev_attr->max_cq_wqes;
4376 
4377 	if (rdev->chip_ctx->modes.db_push)
4378 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4379 
4380 	entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4381 	if (!entry) {
4382 		rc = -ENOMEM;
4383 		goto cfail;
4384 	}
4385 	uctx->shpage_mmap = &entry->rdma_entry;
4386 	if (rdev->pacing.dbr_pacing)
4387 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4388 
4389 	if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
4390 		resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
4391 
4392 	if (udata->inlen >= sizeof(ureq)) {
4393 		rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4394 		if (rc)
4395 			goto cfail;
4396 		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4397 			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4398 			uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
4399 		}
4400 		if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
4401 			resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4402 			resp.mode = rdev->chip_ctx->modes.wqe_mode;
4403 			if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
4404 				uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
4405 		}
4406 	}
4407 
4408 	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4409 	if (rc) {
4410 		ibdev_err(ibdev, "Failed to copy user context");
4411 		rc = -EFAULT;
4412 		goto cfail;
4413 	}
4414 
4415 	return 0;
4416 cfail:
4417 	free_page((unsigned long)uctx->shpg);
4418 	uctx->shpg = NULL;
4419 fail:
4420 	return rc;
4421 }
4422 
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)4423 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4424 {
4425 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4426 						   struct bnxt_re_ucontext,
4427 						   ib_uctx);
4428 
4429 	struct bnxt_re_dev *rdev = uctx->rdev;
4430 
4431 	rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4432 	uctx->shpage_mmap = NULL;
4433 	if (uctx->shpg)
4434 		free_page((unsigned long)uctx->shpg);
4435 
4436 	if (uctx->dpi.dbr) {
4437 		/* Free DPI only if this is the first PD allocated by the
4438 		 * application and mark the context dpi as NULL
4439 		 */
4440 		bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4441 		uctx->dpi.dbr = NULL;
4442 	}
4443 }
4444 
bnxt_re_setup_vnic(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp)4445 static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
4446 {
4447 	int rc;
4448 
4449 	rc = bnxt_re_hwrm_alloc_vnic(rdev);
4450 	if (rc)
4451 		return rc;
4452 
4453 	rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
4454 	if (rc)
4455 		goto out_free_vnic;
4456 
4457 	return 0;
4458 out_free_vnic:
4459 	bnxt_re_hwrm_free_vnic(rdev);
4460 	return rc;
4461 }
4462 
bnxt_re_create_flow(struct ib_qp * ib_qp,struct ib_flow_attr * attr,struct ib_udata * udata)4463 struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
4464 				    struct ib_flow_attr *attr,
4465 				    struct ib_udata *udata)
4466 {
4467 	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
4468 	struct bnxt_re_dev *rdev = qp->rdev;
4469 	struct bnxt_re_flow *flow;
4470 	int rc;
4471 
4472 	if (attr->type != IB_FLOW_ATTR_SNIFFER ||
4473 	    !rdev->rcfw.roce_mirror)
4474 		return ERR_PTR(-EOPNOTSUPP);
4475 
4476 	mutex_lock(&rdev->qp_lock);
4477 	if (rdev->sniffer_flow_created) {
4478 		ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
4479 		mutex_unlock(&rdev->qp_lock);
4480 		return ERR_PTR(-EBUSY);
4481 	}
4482 
4483 	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4484 	if (!flow) {
4485 		mutex_unlock(&rdev->qp_lock);
4486 		return ERR_PTR(-ENOMEM);
4487 	}
4488 
4489 	flow->rdev = rdev;
4490 
4491 	rc = bnxt_re_setup_vnic(rdev, qp);
4492 	if (rc)
4493 		goto out_free_flow;
4494 
4495 	rc = bnxt_qplib_create_flow(&rdev->qplib_res);
4496 	if (rc)
4497 		goto out_free_vnic;
4498 
4499 	rdev->sniffer_flow_created = 1;
4500 	mutex_unlock(&rdev->qp_lock);
4501 
4502 	return &flow->ib_flow;
4503 
4504 out_free_vnic:
4505 	bnxt_re_hwrm_free_vnic(rdev);
4506 out_free_flow:
4507 	mutex_unlock(&rdev->qp_lock);
4508 	kfree(flow);
4509 	return ERR_PTR(rc);
4510 }
4511 
bnxt_re_destroy_flow(struct ib_flow * flow_id)4512 int bnxt_re_destroy_flow(struct ib_flow *flow_id)
4513 {
4514 	struct bnxt_re_flow *flow =
4515 		container_of(flow_id, struct bnxt_re_flow, ib_flow);
4516 	struct bnxt_re_dev *rdev = flow->rdev;
4517 	int rc;
4518 
4519 	mutex_lock(&rdev->qp_lock);
4520 	rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
4521 	if (rc)
4522 		ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
4523 	rdev->sniffer_flow_created = 0;
4524 
4525 	bnxt_re_hwrm_free_vnic(rdev);
4526 	mutex_unlock(&rdev->qp_lock);
4527 	kfree(flow);
4528 
4529 	return rc;
4530 }
4531 
bnxt_re_search_for_cq(struct bnxt_re_dev * rdev,u32 cq_id)4532 static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4533 {
4534 	struct bnxt_re_cq *cq = NULL, *tmp_cq;
4535 
4536 	hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4537 		if (tmp_cq->qplib_cq.id == cq_id) {
4538 			cq = tmp_cq;
4539 			break;
4540 		}
4541 	}
4542 	return cq;
4543 }
4544 
bnxt_re_search_for_srq(struct bnxt_re_dev * rdev,u32 srq_id)4545 static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
4546 {
4547 	struct bnxt_re_srq *srq = NULL, *tmp_srq;
4548 
4549 	hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
4550 		if (tmp_srq->qplib_srq.id == srq_id) {
4551 			srq = tmp_srq;
4552 			break;
4553 		}
4554 	}
4555 	return srq;
4556 }
4557 
4558 /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)4559 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4560 {
4561 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4562 						   struct bnxt_re_ucontext,
4563 						   ib_uctx);
4564 	struct bnxt_re_user_mmap_entry *bnxt_entry;
4565 	struct rdma_user_mmap_entry *rdma_entry;
4566 	int ret = 0;
4567 	u64 pfn;
4568 
4569 	rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4570 	if (!rdma_entry)
4571 		return -EINVAL;
4572 
4573 	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4574 				  rdma_entry);
4575 
4576 	switch (bnxt_entry->mmap_flag) {
4577 	case BNXT_RE_MMAP_WC_DB:
4578 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4579 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4580 					pgprot_writecombine(vma->vm_page_prot),
4581 					rdma_entry);
4582 		break;
4583 	case BNXT_RE_MMAP_UC_DB:
4584 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4585 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4586 					pgprot_noncached(vma->vm_page_prot),
4587 				rdma_entry);
4588 		break;
4589 	case BNXT_RE_MMAP_SH_PAGE:
4590 		ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4591 		break;
4592 	case BNXT_RE_MMAP_DBR_BAR:
4593 		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4594 		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4595 					pgprot_noncached(vma->vm_page_prot),
4596 					rdma_entry);
4597 		break;
4598 	case BNXT_RE_MMAP_DBR_PAGE:
4599 	case BNXT_RE_MMAP_TOGGLE_PAGE:
4600 		/* Driver doesn't expect write access for user space */
4601 		if (vma->vm_flags & VM_WRITE)
4602 			ret = -EFAULT;
4603 		else
4604 			ret = vm_insert_page(vma, vma->vm_start,
4605 					     virt_to_page((void *)bnxt_entry->mem_offset));
4606 		break;
4607 	default:
4608 		ret = -EINVAL;
4609 		break;
4610 	}
4611 
4612 	rdma_user_mmap_entry_put(rdma_entry);
4613 	return ret;
4614 }
4615 
bnxt_re_mmap_free(struct rdma_user_mmap_entry * rdma_entry)4616 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4617 {
4618 	struct bnxt_re_user_mmap_entry *bnxt_entry;
4619 
4620 	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4621 				  rdma_entry);
4622 
4623 	kfree(bnxt_entry);
4624 }
4625 
bnxt_re_process_mad(struct ib_device * ibdev,int mad_flags,u32 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in_mad,struct ib_mad * out_mad,size_t * out_mad_size,u16 * out_mad_pkey_index)4626 int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags,
4627 			u32 port_num, const struct ib_wc *in_wc,
4628 			const struct ib_grh *in_grh,
4629 			const struct ib_mad *in_mad, struct ib_mad *out_mad,
4630 			size_t *out_mad_size, u16 *out_mad_pkey_index)
4631 {
4632 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4633 	struct ib_class_port_info cpi = {};
4634 	int ret = IB_MAD_RESULT_SUCCESS;
4635 	int rc = 0;
4636 
4637 	if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
4638 		return ret;
4639 
4640 	switch (in_mad->mad_hdr.attr_id) {
4641 	case IB_PMA_CLASS_PORT_INFO:
4642 		cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4643 		memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
4644 		break;
4645 	case IB_PMA_PORT_COUNTERS_EXT:
4646 		rc = bnxt_re_assign_pma_port_ext_counters(rdev, out_mad);
4647 		break;
4648 	case IB_PMA_PORT_COUNTERS:
4649 		rc = bnxt_re_assign_pma_port_counters(rdev, out_mad);
4650 		break;
4651 	default:
4652 		rc = -EINVAL;
4653 		break;
4654 	}
4655 	if (rc)
4656 		return IB_MAD_RESULT_FAILURE;
4657 	ret |= IB_MAD_RESULT_REPLY;
4658 	return ret;
4659 }
4660 
UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)4661 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4662 {
4663 	struct bnxt_re_ucontext *uctx;
4664 
4665 	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4666 	bnxt_re_pacing_alert(uctx->rdev);
4667 	return 0;
4668 }
4669 
UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)4670 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4671 {
4672 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4673 	enum bnxt_re_alloc_page_type alloc_type;
4674 	struct bnxt_re_user_mmap_entry *entry;
4675 	enum bnxt_re_mmap_flag mmap_flag;
4676 	struct bnxt_qplib_chip_ctx *cctx;
4677 	struct bnxt_re_ucontext *uctx;
4678 	struct bnxt_re_dev *rdev;
4679 	u64 mmap_offset;
4680 	u32 length;
4681 	u32 dpi;
4682 	u64 addr;
4683 	int err;
4684 
4685 	uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4686 	if (IS_ERR(uctx))
4687 		return PTR_ERR(uctx);
4688 
4689 	err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4690 	if (err)
4691 		return err;
4692 
4693 	rdev = uctx->rdev;
4694 	cctx = rdev->chip_ctx;
4695 
4696 	switch (alloc_type) {
4697 	case BNXT_RE_ALLOC_WC_PAGE:
4698 		if (cctx->modes.db_push)  {
4699 			if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4700 						 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4701 				return -ENOMEM;
4702 			length = PAGE_SIZE;
4703 			dpi = uctx->wcdpi.dpi;
4704 			addr = (u64)uctx->wcdpi.umdbr;
4705 			mmap_flag = BNXT_RE_MMAP_WC_DB;
4706 		} else {
4707 			return -EINVAL;
4708 		}
4709 
4710 		break;
4711 	case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4712 		length = PAGE_SIZE;
4713 		addr = (u64)rdev->pacing.dbr_bar_addr;
4714 		mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4715 		break;
4716 
4717 	case BNXT_RE_ALLOC_DBR_PAGE:
4718 		length = PAGE_SIZE;
4719 		addr = (u64)rdev->pacing.dbr_page;
4720 		mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4721 		break;
4722 
4723 	default:
4724 		return -EOPNOTSUPP;
4725 	}
4726 
4727 	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4728 	if (!entry)
4729 		return -ENOMEM;
4730 
4731 	uobj->object = entry;
4732 	uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4733 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4734 			     &mmap_offset, sizeof(mmap_offset));
4735 	if (err)
4736 		return err;
4737 
4738 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4739 			     &length, sizeof(length));
4740 	if (err)
4741 		return err;
4742 
4743 	err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4744 			     &dpi, sizeof(dpi));
4745 	if (err)
4746 		return err;
4747 
4748 	return 0;
4749 }
4750 
alloc_page_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4751 static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4752 				  enum rdma_remove_reason why,
4753 			    struct uverbs_attr_bundle *attrs)
4754 {
4755 	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4756 	struct bnxt_re_ucontext *uctx = entry->uctx;
4757 
4758 	switch (entry->mmap_flag) {
4759 	case BNXT_RE_MMAP_WC_DB:
4760 		if (uctx && uctx->wcdpi.dbr) {
4761 			struct bnxt_re_dev *rdev = uctx->rdev;
4762 
4763 			bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4764 			uctx->wcdpi.dbr = NULL;
4765 		}
4766 		break;
4767 	case BNXT_RE_MMAP_DBR_BAR:
4768 	case BNXT_RE_MMAP_DBR_PAGE:
4769 		break;
4770 	default:
4771 		goto exit;
4772 	}
4773 	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4774 exit:
4775 	return 0;
4776 }
4777 
4778 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4779 			    UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4780 					    BNXT_RE_OBJECT_ALLOC_PAGE,
4781 					    UVERBS_ACCESS_NEW,
4782 					    UA_MANDATORY),
4783 			    UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4784 						 enum bnxt_re_alloc_page_type,
4785 						 UA_MANDATORY),
4786 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4787 						UVERBS_ATTR_TYPE(u64),
4788 						UA_MANDATORY),
4789 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4790 						UVERBS_ATTR_TYPE(u32),
4791 						UA_MANDATORY),
4792 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4793 						UVERBS_ATTR_TYPE(u32),
4794 						UA_MANDATORY));
4795 
4796 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4797 				    UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4798 						    BNXT_RE_OBJECT_ALLOC_PAGE,
4799 						    UVERBS_ACCESS_DESTROY,
4800 						    UA_MANDATORY));
4801 
4802 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4803 			    UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4804 			    &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4805 			    &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4806 
4807 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4808 
4809 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4810 			      &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4811 
4812 /* Toggle MEM */
UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)4813 static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4814 {
4815 	struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4816 	enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4817 	enum bnxt_re_get_toggle_mem_type res_type;
4818 	struct bnxt_re_user_mmap_entry *entry;
4819 	struct bnxt_re_ucontext *uctx;
4820 	struct ib_ucontext *ib_uctx;
4821 	struct bnxt_re_dev *rdev;
4822 	struct bnxt_re_srq *srq;
4823 	u32 length = PAGE_SIZE;
4824 	struct bnxt_re_cq *cq;
4825 	u64 mem_offset;
4826 	u32 offset = 0;
4827 	u64 addr = 0;
4828 	u32 res_id;
4829 	int err;
4830 
4831 	ib_uctx = ib_uverbs_get_ucontext(attrs);
4832 	if (IS_ERR(ib_uctx))
4833 		return PTR_ERR(ib_uctx);
4834 
4835 	err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4836 	if (err)
4837 		return err;
4838 
4839 	uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4840 	rdev = uctx->rdev;
4841 	err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4842 	if (err)
4843 		return err;
4844 
4845 	switch (res_type) {
4846 	case BNXT_RE_CQ_TOGGLE_MEM:
4847 		cq = bnxt_re_search_for_cq(rdev, res_id);
4848 		if (!cq)
4849 			return -EINVAL;
4850 
4851 		addr = (u64)cq->uctx_cq_page;
4852 		break;
4853 	case BNXT_RE_SRQ_TOGGLE_MEM:
4854 		srq = bnxt_re_search_for_srq(rdev, res_id);
4855 		if (!srq)
4856 			return -EINVAL;
4857 
4858 		addr = (u64)srq->uctx_srq_page;
4859 		break;
4860 
4861 	default:
4862 		return -EOPNOTSUPP;
4863 	}
4864 
4865 	entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4866 	if (!entry)
4867 		return -ENOMEM;
4868 
4869 	uobj->object = entry;
4870 	uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4871 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4872 			     &mem_offset, sizeof(mem_offset));
4873 	if (err)
4874 		return err;
4875 
4876 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4877 			     &length, sizeof(length));
4878 	if (err)
4879 		return err;
4880 
4881 	err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4882 			     &offset, sizeof(offset));
4883 	if (err)
4884 		return err;
4885 
4886 	return 0;
4887 }
4888 
get_toggle_mem_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4889 static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4890 				      enum rdma_remove_reason why,
4891 				      struct uverbs_attr_bundle *attrs)
4892 {
4893 	struct  bnxt_re_user_mmap_entry *entry = uobject->object;
4894 
4895 	rdma_user_mmap_entry_remove(&entry->rdma_entry);
4896 	return 0;
4897 }
4898 
4899 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4900 			    UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4901 					    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4902 					    UVERBS_ACCESS_NEW,
4903 					    UA_MANDATORY),
4904 			    UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4905 						 enum bnxt_re_get_toggle_mem_type,
4906 						 UA_MANDATORY),
4907 			    UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4908 					       UVERBS_ATTR_TYPE(u32),
4909 					       UA_MANDATORY),
4910 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4911 						UVERBS_ATTR_TYPE(u64),
4912 						UA_MANDATORY),
4913 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4914 						UVERBS_ATTR_TYPE(u32),
4915 						UA_MANDATORY),
4916 			    UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4917 						UVERBS_ATTR_TYPE(u32),
4918 						UA_MANDATORY));
4919 
4920 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4921 				    UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4922 						    BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4923 						    UVERBS_ACCESS_DESTROY,
4924 						    UA_MANDATORY));
4925 
4926 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4927 			    UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4928 			    &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4929 			    &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4930 
4931 const struct uapi_definition bnxt_re_uapi_defs[] = {
4932 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4933 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4934 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
4935 	{}
4936 };
4937