1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44 #include <net/addrconf.h>
45
46 #include <rdma/ib_verbs.h>
47 #include <rdma/ib_user_verbs.h>
48 #include <rdma/ib_umem.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_mad.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/uverbs_ioctl.h>
53 #include <linux/hashtable.h>
54
55 #include "bnxt_ulp.h"
56
57 #include "roce_hsi.h"
58 #include "qplib_res.h"
59 #include "qplib_sp.h"
60 #include "qplib_fp.h"
61 #include "qplib_rcfw.h"
62
63 #include "bnxt_re.h"
64 #include "ib_verbs.h"
65
66 #include <rdma/uverbs_types.h>
67 #include <rdma/uverbs_std_types.h>
68
69 #include <rdma/ib_user_ioctl_cmds.h>
70
71 #define UVERBS_MODULE_NAME bnxt_re
72 #include <rdma/uverbs_named_ioctl.h>
73
74 #include <rdma/bnxt_re-abi.h>
75
__from_ib_access_flags(int iflags)76 static int __from_ib_access_flags(int iflags)
77 {
78 int qflags = 0;
79
80 if (iflags & IB_ACCESS_LOCAL_WRITE)
81 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82 if (iflags & IB_ACCESS_REMOTE_READ)
83 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84 if (iflags & IB_ACCESS_REMOTE_WRITE)
85 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88 if (iflags & IB_ACCESS_MW_BIND)
89 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90 if (iflags & IB_ZERO_BASED)
91 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92 if (iflags & IB_ACCESS_ON_DEMAND)
93 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
94 return qflags;
95 };
96
__to_ib_access_flags(int qflags)97 static enum ib_access_flags __to_ib_access_flags(int qflags)
98 {
99 enum ib_access_flags iflags = 0;
100
101 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102 iflags |= IB_ACCESS_LOCAL_WRITE;
103 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104 iflags |= IB_ACCESS_REMOTE_WRITE;
105 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106 iflags |= IB_ACCESS_REMOTE_READ;
107 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108 iflags |= IB_ACCESS_REMOTE_ATOMIC;
109 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110 iflags |= IB_ACCESS_MW_BIND;
111 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112 iflags |= IB_ZERO_BASED;
113 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114 iflags |= IB_ACCESS_ON_DEMAND;
115 return iflags;
116 };
117
bnxt_re_build_sgl(struct ib_sge * ib_sg_list,struct bnxt_qplib_sge * sg_list,int num)118 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
119 struct bnxt_qplib_sge *sg_list, int num)
120 {
121 int i, total = 0;
122
123 for (i = 0; i < num; i++) {
124 sg_list[i].addr = ib_sg_list[i].addr;
125 sg_list[i].lkey = ib_sg_list[i].lkey;
126 sg_list[i].size = ib_sg_list[i].length;
127 total += sg_list[i].size;
128 }
129 return total;
130 }
131
132 /* Device */
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)133 int bnxt_re_query_device(struct ib_device *ibdev,
134 struct ib_device_attr *ib_attr,
135 struct ib_udata *udata)
136 {
137 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
138 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
139
140 memset(ib_attr, 0, sizeof(*ib_attr));
141 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
142 min(sizeof(dev_attr->fw_ver),
143 sizeof(ib_attr->fw_ver)));
144 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
145 rdev->netdev->dev_addr);
146 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
147 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
148
149 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
150 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
151 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
152 ib_attr->max_qp = dev_attr->max_qp;
153 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
154 ib_attr->device_cap_flags =
155 IB_DEVICE_CURR_QP_STATE_MOD
156 | IB_DEVICE_RC_RNR_NAK_GEN
157 | IB_DEVICE_SHUTDOWN_PORT
158 | IB_DEVICE_SYS_IMAGE_GUID
159 | IB_DEVICE_RESIZE_MAX_WR
160 | IB_DEVICE_PORT_ACTIVE_EVENT
161 | IB_DEVICE_N_NOTIFY_CQ
162 | IB_DEVICE_MEM_WINDOW
163 | IB_DEVICE_MEM_WINDOW_TYPE_2B
164 | IB_DEVICE_MEM_MGT_EXTENSIONS;
165 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
166 ib_attr->max_send_sge = dev_attr->max_qp_sges;
167 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
168 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
169 ib_attr->max_cq = dev_attr->max_cq;
170 ib_attr->max_cqe = dev_attr->max_cq_wqes;
171 ib_attr->max_mr = dev_attr->max_mr;
172 ib_attr->max_pd = dev_attr->max_pd;
173 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
174 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
175 ib_attr->atomic_cap = IB_ATOMIC_NONE;
176 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
177 if (dev_attr->is_atomic) {
178 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
180 }
181
182 ib_attr->max_ee_rd_atom = 0;
183 ib_attr->max_res_rd_atom = 0;
184 ib_attr->max_ee_init_rd_atom = 0;
185 ib_attr->max_ee = 0;
186 ib_attr->max_rdd = 0;
187 ib_attr->max_mw = dev_attr->max_mw;
188 ib_attr->max_raw_ipv6_qp = 0;
189 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190 ib_attr->max_mcast_grp = 0;
191 ib_attr->max_mcast_qp_attach = 0;
192 ib_attr->max_total_mcast_qp_attach = 0;
193 ib_attr->max_ah = dev_attr->max_ah;
194
195 ib_attr->max_srq = dev_attr->max_srq;
196 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
197 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
198
199 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
200
201 ib_attr->max_pkeys = 1;
202 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
203 return 0;
204 }
205
206 /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u32 port_num,struct ib_port_attr * port_attr)207 int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
208 struct ib_port_attr *port_attr)
209 {
210 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
211 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
212 int rc;
213
214 memset(port_attr, 0, sizeof(*port_attr));
215
216 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
217 port_attr->state = IB_PORT_ACTIVE;
218 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
219 } else {
220 port_attr->state = IB_PORT_DOWN;
221 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
222 }
223 port_attr->max_mtu = IB_MTU_4096;
224 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
225 port_attr->gid_tbl_len = dev_attr->max_sgid;
226 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
227 IB_PORT_DEVICE_MGMT_SUP |
228 IB_PORT_VENDOR_CLASS_SUP;
229 port_attr->ip_gids = true;
230
231 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
232 port_attr->bad_pkey_cntr = 0;
233 port_attr->qkey_viol_cntr = 0;
234 port_attr->pkey_tbl_len = dev_attr->max_pkey;
235 port_attr->lid = 0;
236 port_attr->sm_lid = 0;
237 port_attr->lmc = 0;
238 port_attr->max_vl_num = 4;
239 port_attr->sm_sl = 0;
240 port_attr->subnet_timeout = 0;
241 port_attr->init_type_reply = 0;
242 rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
243 &port_attr->active_width);
244
245 return rc;
246 }
247
bnxt_re_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)248 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
249 struct ib_port_immutable *immutable)
250 {
251 struct ib_port_attr port_attr;
252
253 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
254 return -EINVAL;
255
256 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
257 immutable->gid_tbl_len = port_attr.gid_tbl_len;
258 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
259 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
260 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
261 return 0;
262 }
263
bnxt_re_query_fw_str(struct ib_device * ibdev,char * str)264 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
265 {
266 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
267
268 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
269 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
270 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
271 }
272
bnxt_re_query_pkey(struct ib_device * ibdev,u32 port_num,u16 index,u16 * pkey)273 int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
274 u16 index, u16 *pkey)
275 {
276 if (index > 0)
277 return -EINVAL;
278
279 *pkey = IB_DEFAULT_PKEY_FULL;
280
281 return 0;
282 }
283
bnxt_re_query_gid(struct ib_device * ibdev,u32 port_num,int index,union ib_gid * gid)284 int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
285 int index, union ib_gid *gid)
286 {
287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
288 int rc;
289
290 /* Ignore port_num */
291 memset(gid, 0, sizeof(*gid));
292 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
293 &rdev->qplib_res.sgid_tbl, index,
294 (struct bnxt_qplib_gid *)gid);
295 return rc;
296 }
297
bnxt_re_del_gid(const struct ib_gid_attr * attr,void ** context)298 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
299 {
300 int rc = 0;
301 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
302 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
303 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
304 struct bnxt_qplib_gid *gid_to_del;
305 u16 vlan_id = 0xFFFF;
306
307 /* Delete the entry from the hardware */
308 ctx = *context;
309 if (!ctx)
310 return -EINVAL;
311
312 if (sgid_tbl && sgid_tbl->active) {
313 if (ctx->idx >= sgid_tbl->max)
314 return -EINVAL;
315 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
316 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
317 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
318 * or via the ib_unregister_device path. In the former case QP1
319 * may not be destroyed yet, in which case just return as FW
320 * needs that entry to be present and will fail it's deletion.
321 * We could get invoked again after QP1 is destroyed OR get an
322 * ADD_GID call with a different GID value for the same index
323 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
324 */
325 if (ctx->idx == 0 &&
326 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
327 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
328 ibdev_dbg(&rdev->ibdev,
329 "Trying to delete GID0 while QP1 is alive\n");
330 return -EFAULT;
331 }
332 ctx->refcnt--;
333 if (!ctx->refcnt) {
334 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
335 vlan_id, true);
336 if (rc) {
337 ibdev_err(&rdev->ibdev,
338 "Failed to remove GID: %#x", rc);
339 } else {
340 ctx_tbl = sgid_tbl->ctx;
341 ctx_tbl[ctx->idx] = NULL;
342 kfree(ctx);
343 }
344 }
345 } else {
346 return -EINVAL;
347 }
348 return rc;
349 }
350
bnxt_re_add_gid(const struct ib_gid_attr * attr,void ** context)351 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
352 {
353 int rc;
354 u32 tbl_idx = 0;
355 u16 vlan_id = 0xFFFF;
356 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
357 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
358 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
359
360 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
361 if (rc)
362 return rc;
363
364 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
365 rdev->qplib_res.netdev->dev_addr,
366 vlan_id, true, &tbl_idx);
367 if (rc == -EALREADY) {
368 ctx_tbl = sgid_tbl->ctx;
369 ctx_tbl[tbl_idx]->refcnt++;
370 *context = ctx_tbl[tbl_idx];
371 return 0;
372 }
373
374 if (rc < 0) {
375 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
376 return rc;
377 }
378
379 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
380 if (!ctx)
381 return -ENOMEM;
382 ctx_tbl = sgid_tbl->ctx;
383 ctx->idx = tbl_idx;
384 ctx->refcnt = 1;
385 ctx_tbl[tbl_idx] = ctx;
386 *context = ctx;
387
388 return rc;
389 }
390
bnxt_re_get_link_layer(struct ib_device * ibdev,u32 port_num)391 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
392 u32 port_num)
393 {
394 return IB_LINK_LAYER_ETHERNET;
395 }
396
397 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
398
bnxt_re_create_fence_wqe(struct bnxt_re_pd * pd)399 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
400 {
401 struct bnxt_re_fence_data *fence = &pd->fence;
402 struct ib_mr *ib_mr = &fence->mr->ib_mr;
403 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404 struct bnxt_re_dev *rdev = pd->rdev;
405
406 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
407 return;
408
409 memset(wqe, 0, sizeof(*wqe));
410 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
411 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
412 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
413 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
414 wqe->bind.zero_based = false;
415 wqe->bind.parent_l_key = ib_mr->lkey;
416 wqe->bind.va = (u64)(unsigned long)fence->va;
417 wqe->bind.length = fence->size;
418 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
419 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
420
421 /* Save the initial rkey in fence structure for now;
422 * wqe->bind.r_key will be set at (re)bind time.
423 */
424 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
425 }
426
bnxt_re_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)427 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
428 {
429 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
430 qplib_qp);
431 struct ib_pd *ib_pd = qp->ib_qp.pd;
432 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
433 struct bnxt_re_fence_data *fence = &pd->fence;
434 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
435 struct bnxt_qplib_swqe wqe;
436 int rc;
437
438 memcpy(&wqe, fence_wqe, sizeof(wqe));
439 wqe.bind.r_key = fence->bind_rkey;
440 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
441
442 ibdev_dbg(&qp->rdev->ibdev,
443 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
444 wqe.bind.r_key, qp->qplib_qp.id, pd);
445 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
446 if (rc) {
447 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
448 return rc;
449 }
450 bnxt_qplib_post_send_db(&qp->qplib_qp);
451
452 return rc;
453 }
454
bnxt_re_destroy_fence_mr(struct bnxt_re_pd * pd)455 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
456 {
457 struct bnxt_re_fence_data *fence = &pd->fence;
458 struct bnxt_re_dev *rdev = pd->rdev;
459 struct device *dev = &rdev->en_dev->pdev->dev;
460 struct bnxt_re_mr *mr = fence->mr;
461
462 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
463 return;
464
465 if (fence->mw) {
466 bnxt_re_dealloc_mw(fence->mw);
467 fence->mw = NULL;
468 }
469 if (mr) {
470 if (mr->ib_mr.rkey)
471 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
472 true);
473 if (mr->ib_mr.lkey)
474 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
475 kfree(mr);
476 fence->mr = NULL;
477 }
478 if (fence->dma_addr) {
479 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
480 DMA_BIDIRECTIONAL);
481 fence->dma_addr = 0;
482 }
483 }
484
bnxt_re_create_fence_mr(struct bnxt_re_pd * pd)485 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
486 {
487 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
488 struct bnxt_re_fence_data *fence = &pd->fence;
489 struct bnxt_re_dev *rdev = pd->rdev;
490 struct device *dev = &rdev->en_dev->pdev->dev;
491 struct bnxt_re_mr *mr = NULL;
492 dma_addr_t dma_addr = 0;
493 struct ib_mw *mw;
494 int rc;
495
496 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
497 return 0;
498
499 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
500 DMA_BIDIRECTIONAL);
501 rc = dma_mapping_error(dev, dma_addr);
502 if (rc) {
503 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
504 rc = -EIO;
505 fence->dma_addr = 0;
506 goto fail;
507 }
508 fence->dma_addr = dma_addr;
509
510 /* Allocate a MR */
511 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
512 if (!mr) {
513 rc = -ENOMEM;
514 goto fail;
515 }
516 fence->mr = mr;
517 mr->rdev = rdev;
518 mr->qplib_mr.pd = &pd->qplib_pd;
519 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
520 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
521 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
522 if (rc) {
523 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
524 goto fail;
525 }
526
527 /* Register MR */
528 mr->ib_mr.lkey = mr->qplib_mr.lkey;
529 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
530 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
531 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
532 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
533 if (rc) {
534 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
535 goto fail;
536 }
537 mr->ib_mr.rkey = mr->qplib_mr.rkey;
538
539 /* Create a fence MW only for kernel consumers */
540 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
541 if (IS_ERR(mw)) {
542 ibdev_err(&rdev->ibdev,
543 "Failed to create fence-MW for PD: %p\n", pd);
544 rc = PTR_ERR(mw);
545 goto fail;
546 }
547 fence->mw = mw;
548
549 bnxt_re_create_fence_wqe(pd);
550 return 0;
551
552 fail:
553 bnxt_re_destroy_fence_mr(pd);
554 return rc;
555 }
556
557 static struct bnxt_re_user_mmap_entry*
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext * uctx,u64 mem_offset,enum bnxt_re_mmap_flag mmap_flag,u64 * offset)558 bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
559 enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
560 {
561 struct bnxt_re_user_mmap_entry *entry;
562 int ret;
563
564 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
565 if (!entry)
566 return NULL;
567
568 entry->mem_offset = mem_offset;
569 entry->mmap_flag = mmap_flag;
570 entry->uctx = uctx;
571
572 switch (mmap_flag) {
573 case BNXT_RE_MMAP_SH_PAGE:
574 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
575 &entry->rdma_entry, PAGE_SIZE, 0);
576 break;
577 case BNXT_RE_MMAP_UC_DB:
578 case BNXT_RE_MMAP_WC_DB:
579 case BNXT_RE_MMAP_DBR_BAR:
580 case BNXT_RE_MMAP_DBR_PAGE:
581 case BNXT_RE_MMAP_TOGGLE_PAGE:
582 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
583 &entry->rdma_entry, PAGE_SIZE);
584 break;
585 default:
586 ret = -EINVAL;
587 break;
588 }
589
590 if (ret) {
591 kfree(entry);
592 return NULL;
593 }
594 if (offset)
595 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
596
597 return entry;
598 }
599
600 /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd,struct ib_udata * udata)601 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
602 {
603 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
604 struct bnxt_re_dev *rdev = pd->rdev;
605
606 if (udata) {
607 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
608 pd->pd_db_mmap = NULL;
609 }
610
611 bnxt_re_destroy_fence_mr(pd);
612
613 if (pd->qplib_pd.id) {
614 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
615 &rdev->qplib_res.pd_tbl,
616 &pd->qplib_pd))
617 atomic_dec(&rdev->stats.res.pd_count);
618 }
619 return 0;
620 }
621
bnxt_re_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)622 int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
623 {
624 struct ib_device *ibdev = ibpd->device;
625 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
626 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
627 udata, struct bnxt_re_ucontext, ib_uctx);
628 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
629 struct bnxt_re_user_mmap_entry *entry = NULL;
630 u32 active_pds;
631 int rc = 0;
632
633 pd->rdev = rdev;
634 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
635 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
636 rc = -ENOMEM;
637 goto fail;
638 }
639
640 if (udata) {
641 struct bnxt_re_pd_resp resp = {};
642
643 if (!ucntx->dpi.dbr) {
644 /* Allocate DPI in alloc_pd to avoid failing of
645 * ibv_devinfo and family of application when DPIs
646 * are depleted.
647 */
648 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
649 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
650 rc = -ENOMEM;
651 goto dbfail;
652 }
653 }
654
655 resp.pdid = pd->qplib_pd.id;
656 /* Still allow mapping this DBR to the new user PD. */
657 resp.dpi = ucntx->dpi.dpi;
658
659 entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
660 BNXT_RE_MMAP_UC_DB, &resp.dbr);
661
662 if (!entry) {
663 rc = -ENOMEM;
664 goto dbfail;
665 }
666
667 pd->pd_db_mmap = &entry->rdma_entry;
668
669 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
670 if (rc) {
671 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
672 rc = -EFAULT;
673 goto dbfail;
674 }
675 }
676
677 if (!udata)
678 if (bnxt_re_create_fence_mr(pd))
679 ibdev_warn(&rdev->ibdev,
680 "Failed to create Fence-MR\n");
681 active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
682 if (active_pds > rdev->stats.res.pd_watermark)
683 rdev->stats.res.pd_watermark = active_pds;
684
685 return 0;
686 dbfail:
687 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
688 &pd->qplib_pd);
689 fail:
690 return rc;
691 }
692
693 /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah,u32 flags)694 int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
695 {
696 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
697 struct bnxt_re_dev *rdev = ah->rdev;
698 bool block = true;
699 int rc;
700
701 block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
702 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
703 if (BNXT_RE_CHECK_RC(rc)) {
704 if (rc == -ETIMEDOUT)
705 rc = 0;
706 else
707 goto fail;
708 }
709 atomic_dec(&rdev->stats.res.ah_count);
710 fail:
711 return rc;
712 }
713
bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)714 static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
715 {
716 u8 nw_type;
717
718 switch (ntype) {
719 case RDMA_NETWORK_IPV4:
720 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
721 break;
722 case RDMA_NETWORK_IPV6:
723 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
724 break;
725 default:
726 nw_type = CMDQ_CREATE_AH_TYPE_V1;
727 break;
728 }
729 return nw_type;
730 }
731
bnxt_re_create_ah(struct ib_ah * ib_ah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)732 int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
733 struct ib_udata *udata)
734 {
735 struct ib_pd *ib_pd = ib_ah->pd;
736 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
737 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
738 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
739 struct bnxt_re_dev *rdev = pd->rdev;
740 const struct ib_gid_attr *sgid_attr;
741 struct bnxt_re_gid_ctx *ctx;
742 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
743 u32 active_ahs;
744 u8 nw_type;
745 int rc;
746
747 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
748 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
749 return -EINVAL;
750 }
751
752 ah->rdev = rdev;
753 ah->qplib_ah.pd = &pd->qplib_pd;
754
755 /* Supply the configuration for the HW */
756 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
757 sizeof(union ib_gid));
758 sgid_attr = grh->sgid_attr;
759 /* Get the HW context of the GID. The reference
760 * of GID table entry is already taken by the caller.
761 */
762 ctx = rdma_read_gid_hw_context(sgid_attr);
763 ah->qplib_ah.sgid_index = ctx->idx;
764 ah->qplib_ah.host_sgid_index = grh->sgid_index;
765 ah->qplib_ah.traffic_class = grh->traffic_class;
766 ah->qplib_ah.flow_label = grh->flow_label;
767 ah->qplib_ah.hop_limit = grh->hop_limit;
768 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
769
770 /* Get network header type for this GID */
771 nw_type = rdma_gid_attr_network_type(sgid_attr);
772 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
773
774 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
775 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
776 !(init_attr->flags &
777 RDMA_CREATE_AH_SLEEPABLE));
778 if (rc) {
779 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
780 return rc;
781 }
782
783 /* Write AVID to shared page. */
784 if (udata) {
785 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
786 udata, struct bnxt_re_ucontext, ib_uctx);
787 unsigned long flag;
788 u32 *wrptr;
789
790 spin_lock_irqsave(&uctx->sh_lock, flag);
791 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
792 *wrptr = ah->qplib_ah.id;
793 wmb(); /* make sure cache is updated. */
794 spin_unlock_irqrestore(&uctx->sh_lock, flag);
795 }
796 active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
797 if (active_ahs > rdev->stats.res.ah_watermark)
798 rdev->stats.res.ah_watermark = active_ahs;
799
800 return 0;
801 }
802
bnxt_re_query_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)803 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
804 {
805 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
806
807 ah_attr->type = ib_ah->type;
808 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
809 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
810 rdma_ah_set_grh(ah_attr, NULL, 0,
811 ah->qplib_ah.host_sgid_index,
812 0, ah->qplib_ah.traffic_class);
813 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
814 rdma_ah_set_port_num(ah_attr, 1);
815 rdma_ah_set_static_rate(ah_attr, 0);
816 return 0;
817 }
818
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)819 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
820 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
821 {
822 unsigned long flags;
823
824 spin_lock_irqsave(&qp->scq->cq_lock, flags);
825 if (qp->rcq != qp->scq)
826 spin_lock(&qp->rcq->cq_lock);
827 else
828 __acquire(&qp->rcq->cq_lock);
829
830 return flags;
831 }
832
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)833 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
834 unsigned long flags)
835 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
836 {
837 if (qp->rcq != qp->scq)
838 spin_unlock(&qp->rcq->cq_lock);
839 else
840 __release(&qp->rcq->cq_lock);
841 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
842 }
843
bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp * qp)844 static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
845 {
846 struct bnxt_re_qp *gsi_sqp;
847 struct bnxt_re_ah *gsi_sah;
848 struct bnxt_re_dev *rdev;
849 int rc;
850
851 rdev = qp->rdev;
852 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
853 gsi_sah = rdev->gsi_ctx.gsi_sah;
854
855 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
856 bnxt_qplib_destroy_ah(&rdev->qplib_res,
857 &gsi_sah->qplib_ah,
858 true);
859 atomic_dec(&rdev->stats.res.ah_count);
860 bnxt_qplib_clean_qp(&qp->qplib_qp);
861
862 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
863 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
864 if (rc) {
865 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
866 goto fail;
867 }
868 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
869
870 /* remove from active qp list */
871 mutex_lock(&rdev->qp_lock);
872 list_del(&gsi_sqp->list);
873 mutex_unlock(&rdev->qp_lock);
874 atomic_dec(&rdev->stats.res.qp_count);
875
876 kfree(rdev->gsi_ctx.sqp_tbl);
877 kfree(gsi_sah);
878 kfree(gsi_sqp);
879 rdev->gsi_ctx.gsi_sqp = NULL;
880 rdev->gsi_ctx.gsi_sah = NULL;
881 rdev->gsi_ctx.sqp_tbl = NULL;
882
883 return 0;
884 fail:
885 return rc;
886 }
887
888 /* Queue Pairs */
bnxt_re_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)889 int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
890 {
891 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
892 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
893 struct bnxt_re_dev *rdev = qp->rdev;
894 struct bnxt_qplib_nq *scq_nq = NULL;
895 struct bnxt_qplib_nq *rcq_nq = NULL;
896 unsigned int flags;
897 int rc;
898
899 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
900
901 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
902 if (rc) {
903 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
904 return rc;
905 }
906
907 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
908 flags = bnxt_re_lock_cqs(qp);
909 bnxt_qplib_clean_qp(&qp->qplib_qp);
910 bnxt_re_unlock_cqs(qp, flags);
911 }
912
913 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
914
915 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
916 rc = bnxt_re_destroy_gsi_sqp(qp);
917 if (rc)
918 return rc;
919 }
920
921 mutex_lock(&rdev->qp_lock);
922 list_del(&qp->list);
923 mutex_unlock(&rdev->qp_lock);
924 atomic_dec(&rdev->stats.res.qp_count);
925 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
926 atomic_dec(&rdev->stats.res.rc_qp_count);
927 else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
928 atomic_dec(&rdev->stats.res.ud_qp_count);
929
930 ib_umem_release(qp->rumem);
931 ib_umem_release(qp->sumem);
932
933 /* Flush all the entries of notification queue associated with
934 * given qp.
935 */
936 scq_nq = qplib_qp->scq->nq;
937 rcq_nq = qplib_qp->rcq->nq;
938 bnxt_re_synchronize_nq(scq_nq);
939 if (scq_nq != rcq_nq)
940 bnxt_re_synchronize_nq(rcq_nq);
941
942 return 0;
943 }
944
__from_ib_qp_type(enum ib_qp_type type)945 static u8 __from_ib_qp_type(enum ib_qp_type type)
946 {
947 switch (type) {
948 case IB_QPT_GSI:
949 return CMDQ_CREATE_QP1_TYPE_GSI;
950 case IB_QPT_RC:
951 return CMDQ_CREATE_QP_TYPE_RC;
952 case IB_QPT_UD:
953 return CMDQ_CREATE_QP_TYPE_UD;
954 default:
955 return IB_QPT_MAX;
956 }
957 }
958
bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp * qplqp,int rsge,int max)959 static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
960 int rsge, int max)
961 {
962 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
963 rsge = max;
964 return bnxt_re_get_rwqe_size(rsge);
965 }
966
bnxt_re_get_wqe_size(int ilsize,int nsge)967 static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
968 {
969 u16 wqe_size, calc_ils;
970
971 wqe_size = bnxt_re_get_swqe_size(nsge);
972 if (ilsize) {
973 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
974 wqe_size = max_t(u16, calc_ils, wqe_size);
975 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
976 }
977 return wqe_size;
978 }
979
bnxt_re_setup_swqe_size(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr)980 static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
981 struct ib_qp_init_attr *init_attr)
982 {
983 struct bnxt_qplib_dev_attr *dev_attr;
984 struct bnxt_qplib_qp *qplqp;
985 struct bnxt_re_dev *rdev;
986 struct bnxt_qplib_q *sq;
987 int align, ilsize;
988
989 rdev = qp->rdev;
990 qplqp = &qp->qplib_qp;
991 sq = &qplqp->sq;
992 dev_attr = &rdev->dev_attr;
993
994 align = sizeof(struct sq_send_hdr);
995 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
996
997 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
998 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
999 return -EINVAL;
1000 /* For gen p4 and gen p5 backward compatibility mode
1001 * wqe size is fixed to 128 bytes
1002 */
1003 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
1004 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1005 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
1006
1007 if (init_attr->cap.max_inline_data) {
1008 qplqp->max_inline_data = sq->wqe_size -
1009 sizeof(struct sq_send_hdr);
1010 init_attr->cap.max_inline_data = qplqp->max_inline_data;
1011 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1012 sq->max_sge = qplqp->max_inline_data /
1013 sizeof(struct sq_sge);
1014 }
1015
1016 return 0;
1017 }
1018
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct ib_udata * udata)1019 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1020 struct bnxt_re_qp *qp, struct ib_udata *udata)
1021 {
1022 struct bnxt_qplib_qp *qplib_qp;
1023 struct bnxt_re_ucontext *cntx;
1024 struct bnxt_re_qp_req ureq;
1025 int bytes = 0, psn_sz;
1026 struct ib_umem *umem;
1027 int psn_nume;
1028
1029 qplib_qp = &qp->qplib_qp;
1030 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
1031 ib_uctx);
1032 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1033 return -EFAULT;
1034
1035 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1036 /* Consider mapping PSN search memory only for RC QPs. */
1037 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1038 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1039 sizeof(struct sq_psn_search_ext) :
1040 sizeof(struct sq_psn_search);
1041 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1042 qplib_qp->sq.max_wqe :
1043 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1044 sizeof(struct bnxt_qplib_sge));
1045 bytes += (psn_nume * psn_sz);
1046 }
1047
1048 bytes = PAGE_ALIGN(bytes);
1049 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
1050 IB_ACCESS_LOCAL_WRITE);
1051 if (IS_ERR(umem))
1052 return PTR_ERR(umem);
1053
1054 qp->sumem = umem;
1055 qplib_qp->sq.sg_info.umem = umem;
1056 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1057 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1058 qplib_qp->qp_handle = ureq.qp_handle;
1059
1060 if (!qp->qplib_qp.srq) {
1061 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1062 bytes = PAGE_ALIGN(bytes);
1063 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
1064 IB_ACCESS_LOCAL_WRITE);
1065 if (IS_ERR(umem))
1066 goto rqfail;
1067 qp->rumem = umem;
1068 qplib_qp->rq.sg_info.umem = umem;
1069 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1070 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1071 }
1072
1073 qplib_qp->dpi = &cntx->dpi;
1074 return 0;
1075 rqfail:
1076 ib_umem_release(qp->sumem);
1077 qp->sumem = NULL;
1078 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1079
1080 return PTR_ERR(umem);
1081 }
1082
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1083 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1084 (struct bnxt_re_pd *pd,
1085 struct bnxt_qplib_res *qp1_res,
1086 struct bnxt_qplib_qp *qp1_qp)
1087 {
1088 struct bnxt_re_dev *rdev = pd->rdev;
1089 struct bnxt_re_ah *ah;
1090 union ib_gid sgid;
1091 int rc;
1092
1093 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1094 if (!ah)
1095 return NULL;
1096
1097 ah->rdev = rdev;
1098 ah->qplib_ah.pd = &pd->qplib_pd;
1099
1100 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1101 if (rc)
1102 goto fail;
1103
1104 /* supply the dgid data same as sgid */
1105 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1106 sizeof(union ib_gid));
1107 ah->qplib_ah.sgid_index = 0;
1108
1109 ah->qplib_ah.traffic_class = 0;
1110 ah->qplib_ah.flow_label = 0;
1111 ah->qplib_ah.hop_limit = 1;
1112 ah->qplib_ah.sl = 0;
1113 /* Have DMAC same as SMAC */
1114 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1115
1116 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1117 if (rc) {
1118 ibdev_err(&rdev->ibdev,
1119 "Failed to allocate HW AH for Shadow QP");
1120 goto fail;
1121 }
1122 atomic_inc(&rdev->stats.res.ah_count);
1123
1124 return ah;
1125
1126 fail:
1127 kfree(ah);
1128 return NULL;
1129 }
1130
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)1131 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1132 (struct bnxt_re_pd *pd,
1133 struct bnxt_qplib_res *qp1_res,
1134 struct bnxt_qplib_qp *qp1_qp)
1135 {
1136 struct bnxt_re_dev *rdev = pd->rdev;
1137 struct bnxt_re_qp *qp;
1138 int rc;
1139
1140 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1141 if (!qp)
1142 return NULL;
1143
1144 qp->rdev = rdev;
1145
1146 /* Initialize the shadow QP structure from the QP1 values */
1147 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1148
1149 qp->qplib_qp.pd = &pd->qplib_pd;
1150 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1151 qp->qplib_qp.type = IB_QPT_UD;
1152
1153 qp->qplib_qp.max_inline_data = 0;
1154 qp->qplib_qp.sig_type = true;
1155
1156 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1157 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1158 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1159 qp->qplib_qp.sq.max_sge = 2;
1160 /* Q full delta can be 1 since it is internal QP */
1161 qp->qplib_qp.sq.q_full_delta = 1;
1162 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1163 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1164
1165 qp->qplib_qp.scq = qp1_qp->scq;
1166 qp->qplib_qp.rcq = qp1_qp->rcq;
1167
1168 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1169 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1170 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1171 /* Q full delta can be 1 since it is internal QP */
1172 qp->qplib_qp.rq.q_full_delta = 1;
1173 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1174 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1175
1176 qp->qplib_qp.mtu = qp1_qp->mtu;
1177
1178 qp->qplib_qp.sq_hdr_buf_size = 0;
1179 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1180 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1181
1182 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1183 if (rc)
1184 goto fail;
1185
1186 spin_lock_init(&qp->sq_lock);
1187 INIT_LIST_HEAD(&qp->list);
1188 mutex_lock(&rdev->qp_lock);
1189 list_add_tail(&qp->list, &rdev->qp_list);
1190 atomic_inc(&rdev->stats.res.qp_count);
1191 mutex_unlock(&rdev->qp_lock);
1192 return qp;
1193 fail:
1194 kfree(qp);
1195 return NULL;
1196 }
1197
bnxt_re_init_rq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1198 static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1199 struct ib_qp_init_attr *init_attr,
1200 struct bnxt_re_ucontext *uctx)
1201 {
1202 struct bnxt_qplib_dev_attr *dev_attr;
1203 struct bnxt_qplib_qp *qplqp;
1204 struct bnxt_re_dev *rdev;
1205 struct bnxt_qplib_q *rq;
1206 int entries;
1207
1208 rdev = qp->rdev;
1209 qplqp = &qp->qplib_qp;
1210 rq = &qplqp->rq;
1211 dev_attr = &rdev->dev_attr;
1212
1213 if (init_attr->srq) {
1214 struct bnxt_re_srq *srq;
1215
1216 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1217 qplqp->srq = &srq->qplib_srq;
1218 rq->max_wqe = 0;
1219 } else {
1220 rq->max_sge = init_attr->cap.max_recv_sge;
1221 if (rq->max_sge > dev_attr->max_qp_sges)
1222 rq->max_sge = dev_attr->max_qp_sges;
1223 init_attr->cap.max_recv_sge = rq->max_sge;
1224 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1225 dev_attr->max_qp_sges);
1226 /* Allocate 1 more than what's provided so posting max doesn't
1227 * mean empty.
1228 */
1229 entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1230 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1231 rq->q_full_delta = 0;
1232 rq->sg_info.pgsize = PAGE_SIZE;
1233 rq->sg_info.pgshft = PAGE_SHIFT;
1234 }
1235
1236 return 0;
1237 }
1238
bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp * qp)1239 static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1240 {
1241 struct bnxt_qplib_dev_attr *dev_attr;
1242 struct bnxt_qplib_qp *qplqp;
1243 struct bnxt_re_dev *rdev;
1244
1245 rdev = qp->rdev;
1246 qplqp = &qp->qplib_qp;
1247 dev_attr = &rdev->dev_attr;
1248
1249 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1250 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1251 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1252 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1253 qplqp->rq.max_sge = 6;
1254 }
1255 }
1256
bnxt_re_init_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1257 static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1258 struct ib_qp_init_attr *init_attr,
1259 struct bnxt_re_ucontext *uctx)
1260 {
1261 struct bnxt_qplib_dev_attr *dev_attr;
1262 struct bnxt_qplib_qp *qplqp;
1263 struct bnxt_re_dev *rdev;
1264 struct bnxt_qplib_q *sq;
1265 int entries;
1266 int diff;
1267 int rc;
1268
1269 rdev = qp->rdev;
1270 qplqp = &qp->qplib_qp;
1271 sq = &qplqp->sq;
1272 dev_attr = &rdev->dev_attr;
1273
1274 sq->max_sge = init_attr->cap.max_send_sge;
1275 if (sq->max_sge > dev_attr->max_qp_sges) {
1276 sq->max_sge = dev_attr->max_qp_sges;
1277 init_attr->cap.max_send_sge = sq->max_sge;
1278 }
1279
1280 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1281 if (rc)
1282 return rc;
1283
1284 entries = init_attr->cap.max_send_wr;
1285 /* Allocate 128 + 1 more than what's provided */
1286 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1287 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1288 entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1289 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1290 sq->q_full_delta = diff + 1;
1291 /*
1292 * Reserving one slot for Phantom WQE. Application can
1293 * post one extra entry in this case. But allowing this to avoid
1294 * unexpected Queue full condition
1295 */
1296 qplqp->sq.q_full_delta -= 1;
1297 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1298 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1299
1300 return 0;
1301 }
1302
bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp * qp,struct ib_qp_init_attr * init_attr,struct bnxt_re_ucontext * uctx)1303 static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1304 struct ib_qp_init_attr *init_attr,
1305 struct bnxt_re_ucontext *uctx)
1306 {
1307 struct bnxt_qplib_dev_attr *dev_attr;
1308 struct bnxt_qplib_qp *qplqp;
1309 struct bnxt_re_dev *rdev;
1310 int entries;
1311
1312 rdev = qp->rdev;
1313 qplqp = &qp->qplib_qp;
1314 dev_attr = &rdev->dev_attr;
1315
1316 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1317 entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1318 qplqp->sq.max_wqe = min_t(u32, entries,
1319 dev_attr->max_qp_wqes + 1);
1320 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1321 init_attr->cap.max_send_wr;
1322 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1323 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1324 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1325 }
1326 }
1327
bnxt_re_init_qp_type(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr)1328 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1329 struct ib_qp_init_attr *init_attr)
1330 {
1331 struct bnxt_qplib_chip_ctx *chip_ctx;
1332 int qptype;
1333
1334 chip_ctx = rdev->chip_ctx;
1335
1336 qptype = __from_ib_qp_type(init_attr->qp_type);
1337 if (qptype == IB_QPT_MAX) {
1338 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1339 qptype = -EOPNOTSUPP;
1340 goto out;
1341 }
1342
1343 if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1344 init_attr->qp_type == IB_QPT_GSI)
1345 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1346 out:
1347 return qptype;
1348 }
1349
bnxt_re_init_qp_attr(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1350 static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1351 struct ib_qp_init_attr *init_attr,
1352 struct ib_udata *udata)
1353 {
1354 struct bnxt_qplib_dev_attr *dev_attr;
1355 struct bnxt_re_ucontext *uctx;
1356 struct bnxt_qplib_qp *qplqp;
1357 struct bnxt_re_dev *rdev;
1358 struct bnxt_re_cq *cq;
1359 int rc = 0, qptype;
1360
1361 rdev = qp->rdev;
1362 qplqp = &qp->qplib_qp;
1363 dev_attr = &rdev->dev_attr;
1364
1365 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1366 /* Setup misc params */
1367 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1368 qplqp->pd = &pd->qplib_pd;
1369 qplqp->qp_handle = (u64)qplqp;
1370 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1371 qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1372 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1373 if (qptype < 0) {
1374 rc = qptype;
1375 goto out;
1376 }
1377 qplqp->type = (u8)qptype;
1378 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1379
1380 if (init_attr->qp_type == IB_QPT_RC) {
1381 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1382 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1383 }
1384 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1385 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1386 if (init_attr->create_flags) {
1387 ibdev_dbg(&rdev->ibdev,
1388 "QP create flags 0x%x not supported",
1389 init_attr->create_flags);
1390 return -EOPNOTSUPP;
1391 }
1392
1393 /* Setup CQs */
1394 if (init_attr->send_cq) {
1395 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1396 qplqp->scq = &cq->qplib_cq;
1397 qp->scq = cq;
1398 }
1399
1400 if (init_attr->recv_cq) {
1401 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1402 qplqp->rcq = &cq->qplib_cq;
1403 qp->rcq = cq;
1404 }
1405
1406 /* Setup RQ/SRQ */
1407 rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1408 if (rc)
1409 goto out;
1410 if (init_attr->qp_type == IB_QPT_GSI)
1411 bnxt_re_adjust_gsi_rq_attr(qp);
1412
1413 /* Setup SQ */
1414 rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
1415 if (rc)
1416 goto out;
1417 if (init_attr->qp_type == IB_QPT_GSI)
1418 bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1419
1420 if (udata) /* This will update DPI and qp_handle */
1421 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1422 out:
1423 return rc;
1424 }
1425
bnxt_re_create_shadow_gsi(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd)1426 static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1427 struct bnxt_re_pd *pd)
1428 {
1429 struct bnxt_re_sqp_entries *sqp_tbl;
1430 struct bnxt_re_dev *rdev;
1431 struct bnxt_re_qp *sqp;
1432 struct bnxt_re_ah *sah;
1433 int rc = 0;
1434
1435 rdev = qp->rdev;
1436 /* Create a shadow QP to handle the QP1 traffic */
1437 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1438 GFP_KERNEL);
1439 if (!sqp_tbl)
1440 return -ENOMEM;
1441 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1442
1443 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1444 if (!sqp) {
1445 rc = -ENODEV;
1446 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1447 goto out;
1448 }
1449 rdev->gsi_ctx.gsi_sqp = sqp;
1450
1451 sqp->rcq = qp->rcq;
1452 sqp->scq = qp->scq;
1453 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1454 &qp->qplib_qp);
1455 if (!sah) {
1456 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1457 &sqp->qplib_qp);
1458 rc = -ENODEV;
1459 ibdev_err(&rdev->ibdev,
1460 "Failed to create AH entry for ShadowQP");
1461 goto out;
1462 }
1463 rdev->gsi_ctx.gsi_sah = sah;
1464
1465 return 0;
1466 out:
1467 kfree(sqp_tbl);
1468 return rc;
1469 }
1470
bnxt_re_create_gsi_qp(struct bnxt_re_qp * qp,struct bnxt_re_pd * pd,struct ib_qp_init_attr * init_attr)1471 static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1472 struct ib_qp_init_attr *init_attr)
1473 {
1474 struct bnxt_re_dev *rdev;
1475 struct bnxt_qplib_qp *qplqp;
1476 int rc;
1477
1478 rdev = qp->rdev;
1479 qplqp = &qp->qplib_qp;
1480
1481 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1482 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1483
1484 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1485 if (rc) {
1486 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1487 goto out;
1488 }
1489
1490 rc = bnxt_re_create_shadow_gsi(qp, pd);
1491 out:
1492 return rc;
1493 }
1494
bnxt_re_test_qp_limits(struct bnxt_re_dev * rdev,struct ib_qp_init_attr * init_attr,struct bnxt_qplib_dev_attr * dev_attr)1495 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1496 struct ib_qp_init_attr *init_attr,
1497 struct bnxt_qplib_dev_attr *dev_attr)
1498 {
1499 bool rc = true;
1500
1501 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1502 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1503 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1504 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1505 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1506 ibdev_err(&rdev->ibdev,
1507 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1508 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1509 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1510 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1511 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1512 init_attr->cap.max_inline_data,
1513 dev_attr->max_inline_data);
1514 rc = false;
1515 }
1516 return rc;
1517 }
1518
bnxt_re_create_qp(struct ib_qp * ib_qp,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)1519 int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1520 struct ib_udata *udata)
1521 {
1522 struct ib_pd *ib_pd = ib_qp->pd;
1523 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1524 struct bnxt_re_dev *rdev = pd->rdev;
1525 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1526 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1527 u32 active_qps;
1528 int rc;
1529
1530 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1531 if (!rc) {
1532 rc = -EINVAL;
1533 goto fail;
1534 }
1535
1536 qp->rdev = rdev;
1537 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1538 if (rc)
1539 goto fail;
1540
1541 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1542 !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1543 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1544 if (rc == -ENODEV)
1545 goto qp_destroy;
1546 if (rc)
1547 goto fail;
1548 } else {
1549 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1550 if (rc) {
1551 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1552 goto free_umem;
1553 }
1554 if (udata) {
1555 struct bnxt_re_qp_resp resp;
1556
1557 resp.qpid = qp->qplib_qp.id;
1558 resp.rsvd = 0;
1559 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1560 if (rc) {
1561 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1562 goto qp_destroy;
1563 }
1564 }
1565 }
1566
1567 qp->ib_qp.qp_num = qp->qplib_qp.id;
1568 if (qp_init_attr->qp_type == IB_QPT_GSI)
1569 rdev->gsi_ctx.gsi_qp = qp;
1570 spin_lock_init(&qp->sq_lock);
1571 spin_lock_init(&qp->rq_lock);
1572 INIT_LIST_HEAD(&qp->list);
1573 mutex_lock(&rdev->qp_lock);
1574 list_add_tail(&qp->list, &rdev->qp_list);
1575 mutex_unlock(&rdev->qp_lock);
1576 active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1577 if (active_qps > rdev->stats.res.qp_watermark)
1578 rdev->stats.res.qp_watermark = active_qps;
1579 if (qp_init_attr->qp_type == IB_QPT_RC) {
1580 active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1581 if (active_qps > rdev->stats.res.rc_qp_watermark)
1582 rdev->stats.res.rc_qp_watermark = active_qps;
1583 } else if (qp_init_attr->qp_type == IB_QPT_UD) {
1584 active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1585 if (active_qps > rdev->stats.res.ud_qp_watermark)
1586 rdev->stats.res.ud_qp_watermark = active_qps;
1587 }
1588
1589 return 0;
1590 qp_destroy:
1591 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1592 free_umem:
1593 ib_umem_release(qp->rumem);
1594 ib_umem_release(qp->sumem);
1595 fail:
1596 return rc;
1597 }
1598
__from_ib_qp_state(enum ib_qp_state state)1599 static u8 __from_ib_qp_state(enum ib_qp_state state)
1600 {
1601 switch (state) {
1602 case IB_QPS_RESET:
1603 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1604 case IB_QPS_INIT:
1605 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1606 case IB_QPS_RTR:
1607 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1608 case IB_QPS_RTS:
1609 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1610 case IB_QPS_SQD:
1611 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1612 case IB_QPS_SQE:
1613 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1614 case IB_QPS_ERR:
1615 default:
1616 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1617 }
1618 }
1619
__to_ib_qp_state(u8 state)1620 static enum ib_qp_state __to_ib_qp_state(u8 state)
1621 {
1622 switch (state) {
1623 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1624 return IB_QPS_RESET;
1625 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1626 return IB_QPS_INIT;
1627 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1628 return IB_QPS_RTR;
1629 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1630 return IB_QPS_RTS;
1631 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1632 return IB_QPS_SQD;
1633 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1634 return IB_QPS_SQE;
1635 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1636 default:
1637 return IB_QPS_ERR;
1638 }
1639 }
1640
__from_ib_mtu(enum ib_mtu mtu)1641 static u32 __from_ib_mtu(enum ib_mtu mtu)
1642 {
1643 switch (mtu) {
1644 case IB_MTU_256:
1645 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1646 case IB_MTU_512:
1647 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1648 case IB_MTU_1024:
1649 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1650 case IB_MTU_2048:
1651 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1652 case IB_MTU_4096:
1653 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1654 default:
1655 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1656 }
1657 }
1658
__to_ib_mtu(u32 mtu)1659 static enum ib_mtu __to_ib_mtu(u32 mtu)
1660 {
1661 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1662 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1663 return IB_MTU_256;
1664 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1665 return IB_MTU_512;
1666 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1667 return IB_MTU_1024;
1668 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1669 return IB_MTU_2048;
1670 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1671 return IB_MTU_4096;
1672 default:
1673 return IB_MTU_2048;
1674 }
1675 }
1676
1677 /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq,struct ib_udata * udata)1678 int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1679 {
1680 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1681 ib_srq);
1682 struct bnxt_re_dev *rdev = srq->rdev;
1683 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1684 struct bnxt_qplib_nq *nq = NULL;
1685
1686 if (qplib_srq->cq)
1687 nq = qplib_srq->cq->nq;
1688 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1689 ib_umem_release(srq->umem);
1690 atomic_dec(&rdev->stats.res.srq_count);
1691 if (nq)
1692 nq->budget--;
1693 return 0;
1694 }
1695
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1696 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1697 struct bnxt_re_pd *pd,
1698 struct bnxt_re_srq *srq,
1699 struct ib_udata *udata)
1700 {
1701 struct bnxt_re_srq_req ureq;
1702 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1703 struct ib_umem *umem;
1704 int bytes = 0;
1705 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1706 udata, struct bnxt_re_ucontext, ib_uctx);
1707
1708 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1709 return -EFAULT;
1710
1711 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1712 bytes = PAGE_ALIGN(bytes);
1713 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1714 IB_ACCESS_LOCAL_WRITE);
1715 if (IS_ERR(umem))
1716 return PTR_ERR(umem);
1717
1718 srq->umem = umem;
1719 qplib_srq->sg_info.umem = umem;
1720 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1721 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1722 qplib_srq->srq_handle = ureq.srq_handle;
1723 qplib_srq->dpi = &cntx->dpi;
1724
1725 return 0;
1726 }
1727
bnxt_re_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1728 int bnxt_re_create_srq(struct ib_srq *ib_srq,
1729 struct ib_srq_init_attr *srq_init_attr,
1730 struct ib_udata *udata)
1731 {
1732 struct bnxt_qplib_dev_attr *dev_attr;
1733 struct bnxt_qplib_nq *nq = NULL;
1734 struct bnxt_re_ucontext *uctx;
1735 struct bnxt_re_dev *rdev;
1736 struct bnxt_re_srq *srq;
1737 struct bnxt_re_pd *pd;
1738 struct ib_pd *ib_pd;
1739 u32 active_srqs;
1740 int rc, entries;
1741
1742 ib_pd = ib_srq->pd;
1743 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1744 rdev = pd->rdev;
1745 dev_attr = &rdev->dev_attr;
1746 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1747
1748 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1749 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1750 rc = -EINVAL;
1751 goto exit;
1752 }
1753
1754 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1755 rc = -EOPNOTSUPP;
1756 goto exit;
1757 }
1758
1759 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1760 srq->rdev = rdev;
1761 srq->qplib_srq.pd = &pd->qplib_pd;
1762 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1763 /* Allocate 1 more than what's provided so posting max doesn't
1764 * mean empty
1765 */
1766 entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1767 if (entries > dev_attr->max_srq_wqes + 1)
1768 entries = dev_attr->max_srq_wqes + 1;
1769 srq->qplib_srq.max_wqe = entries;
1770
1771 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1772 /* 128 byte wqe size for SRQ . So use max sges */
1773 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1774 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1775 srq->srq_limit = srq_init_attr->attr.srq_limit;
1776 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1777 nq = &rdev->nq[0];
1778
1779 if (udata) {
1780 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1781 if (rc)
1782 goto fail;
1783 }
1784
1785 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1786 if (rc) {
1787 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1788 goto fail;
1789 }
1790
1791 if (udata) {
1792 struct bnxt_re_srq_resp resp;
1793
1794 resp.srqid = srq->qplib_srq.id;
1795 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1796 if (rc) {
1797 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1798 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1799 &srq->qplib_srq);
1800 goto fail;
1801 }
1802 }
1803 if (nq)
1804 nq->budget++;
1805 active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1806 if (active_srqs > rdev->stats.res.srq_watermark)
1807 rdev->stats.res.srq_watermark = active_srqs;
1808 spin_lock_init(&srq->lock);
1809
1810 return 0;
1811
1812 fail:
1813 ib_umem_release(srq->umem);
1814 exit:
1815 return rc;
1816 }
1817
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1818 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1819 enum ib_srq_attr_mask srq_attr_mask,
1820 struct ib_udata *udata)
1821 {
1822 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1823 ib_srq);
1824 struct bnxt_re_dev *rdev = srq->rdev;
1825 int rc;
1826
1827 switch (srq_attr_mask) {
1828 case IB_SRQ_MAX_WR:
1829 /* SRQ resize is not supported */
1830 return -EINVAL;
1831 case IB_SRQ_LIMIT:
1832 /* Change the SRQ threshold */
1833 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1834 return -EINVAL;
1835
1836 srq->qplib_srq.threshold = srq_attr->srq_limit;
1837 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1838 if (rc) {
1839 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1840 return rc;
1841 }
1842 /* On success, update the shadow */
1843 srq->srq_limit = srq_attr->srq_limit;
1844 /* No need to Build and send response back to udata */
1845 return 0;
1846 default:
1847 ibdev_err(&rdev->ibdev,
1848 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1849 return -EINVAL;
1850 }
1851 }
1852
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1853 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1854 {
1855 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1856 ib_srq);
1857 struct bnxt_re_srq tsrq;
1858 struct bnxt_re_dev *rdev = srq->rdev;
1859 int rc;
1860
1861 /* Get live SRQ attr */
1862 tsrq.qplib_srq.id = srq->qplib_srq.id;
1863 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1864 if (rc) {
1865 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1866 return rc;
1867 }
1868 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1869 srq_attr->max_sge = srq->qplib_srq.max_sge;
1870 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1871
1872 return 0;
1873 }
1874
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1875 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1876 const struct ib_recv_wr **bad_wr)
1877 {
1878 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1879 ib_srq);
1880 struct bnxt_qplib_swqe wqe;
1881 unsigned long flags;
1882 int rc = 0;
1883
1884 spin_lock_irqsave(&srq->lock, flags);
1885 while (wr) {
1886 /* Transcribe each ib_recv_wr to qplib_swqe */
1887 wqe.num_sge = wr->num_sge;
1888 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1889 wqe.wr_id = wr->wr_id;
1890 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1891
1892 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1893 if (rc) {
1894 *bad_wr = wr;
1895 break;
1896 }
1897 wr = wr->next;
1898 }
1899 spin_unlock_irqrestore(&srq->lock, flags);
1900
1901 return rc;
1902 }
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)1903 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1904 struct bnxt_re_qp *qp1_qp,
1905 int qp_attr_mask)
1906 {
1907 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1908 int rc;
1909
1910 if (qp_attr_mask & IB_QP_STATE) {
1911 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1912 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1913 }
1914 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1915 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1916 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1917 }
1918
1919 if (qp_attr_mask & IB_QP_QKEY) {
1920 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1921 /* Using a Random QKEY */
1922 qp->qplib_qp.qkey = 0x81818181;
1923 }
1924 if (qp_attr_mask & IB_QP_SQ_PSN) {
1925 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1926 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1927 }
1928
1929 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1930 if (rc)
1931 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1932 return rc;
1933 }
1934
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)1935 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1936 int qp_attr_mask, struct ib_udata *udata)
1937 {
1938 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1939 struct bnxt_re_dev *rdev = qp->rdev;
1940 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1941 enum ib_qp_state curr_qp_state, new_qp_state;
1942 int rc, entries;
1943 unsigned int flags;
1944 u8 nw_type;
1945
1946 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1947 return -EOPNOTSUPP;
1948
1949 qp->qplib_qp.modify_flags = 0;
1950 if (qp_attr_mask & IB_QP_STATE) {
1951 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1952 new_qp_state = qp_attr->qp_state;
1953 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1954 ib_qp->qp_type, qp_attr_mask)) {
1955 ibdev_err(&rdev->ibdev,
1956 "Invalid attribute mask: %#x specified ",
1957 qp_attr_mask);
1958 ibdev_err(&rdev->ibdev,
1959 "for qpn: %#x type: %#x",
1960 ib_qp->qp_num, ib_qp->qp_type);
1961 ibdev_err(&rdev->ibdev,
1962 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1963 curr_qp_state, new_qp_state);
1964 return -EINVAL;
1965 }
1966 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1967 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1968
1969 if (!qp->sumem &&
1970 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1971 ibdev_dbg(&rdev->ibdev,
1972 "Move QP = %p to flush list\n", qp);
1973 flags = bnxt_re_lock_cqs(qp);
1974 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1975 bnxt_re_unlock_cqs(qp, flags);
1976 }
1977 if (!qp->sumem &&
1978 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1979 ibdev_dbg(&rdev->ibdev,
1980 "Move QP = %p out of flush list\n", qp);
1981 flags = bnxt_re_lock_cqs(qp);
1982 bnxt_qplib_clean_qp(&qp->qplib_qp);
1983 bnxt_re_unlock_cqs(qp, flags);
1984 }
1985 }
1986 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1987 qp->qplib_qp.modify_flags |=
1988 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1989 qp->qplib_qp.en_sqd_async_notify = true;
1990 }
1991 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1992 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1993 qp->qplib_qp.access =
1994 __from_ib_access_flags(qp_attr->qp_access_flags);
1995 /* LOCAL_WRITE access must be set to allow RC receive */
1996 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1997 /* Temp: Set all params on QP as of now */
1998 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1999 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2000 }
2001 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2002 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2003 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2004 }
2005 if (qp_attr_mask & IB_QP_QKEY) {
2006 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2007 qp->qplib_qp.qkey = qp_attr->qkey;
2008 }
2009 if (qp_attr_mask & IB_QP_AV) {
2010 const struct ib_global_route *grh =
2011 rdma_ah_read_grh(&qp_attr->ah_attr);
2012 const struct ib_gid_attr *sgid_attr;
2013 struct bnxt_re_gid_ctx *ctx;
2014
2015 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2016 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2017 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2018 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2019 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2020 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2021 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2022 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2023 sizeof(qp->qplib_qp.ah.dgid.data));
2024 qp->qplib_qp.ah.flow_label = grh->flow_label;
2025 sgid_attr = grh->sgid_attr;
2026 /* Get the HW context of the GID. The reference
2027 * of GID table entry is already taken by the caller.
2028 */
2029 ctx = rdma_read_gid_hw_context(sgid_attr);
2030 qp->qplib_qp.ah.sgid_index = ctx->idx;
2031 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2032 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2033 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
2034 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2035 ether_addr_copy(qp->qplib_qp.ah.dmac,
2036 qp_attr->ah_attr.roce.dmac);
2037
2038 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2039 &qp->qplib_qp.smac[0]);
2040 if (rc)
2041 return rc;
2042
2043 nw_type = rdma_gid_attr_network_type(sgid_attr);
2044 switch (nw_type) {
2045 case RDMA_NETWORK_IPV4:
2046 qp->qplib_qp.nw_type =
2047 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2048 break;
2049 case RDMA_NETWORK_IPV6:
2050 qp->qplib_qp.nw_type =
2051 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2052 break;
2053 default:
2054 qp->qplib_qp.nw_type =
2055 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2056 break;
2057 }
2058 }
2059
2060 if (qp_attr_mask & IB_QP_PATH_MTU) {
2061 qp->qplib_qp.modify_flags |=
2062 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2063 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
2064 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
2065 } else if (qp_attr->qp_state == IB_QPS_RTR) {
2066 qp->qplib_qp.modify_flags |=
2067 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2068 qp->qplib_qp.path_mtu =
2069 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
2070 qp->qplib_qp.mtu =
2071 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
2072 }
2073
2074 if (qp_attr_mask & IB_QP_TIMEOUT) {
2075 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2076 qp->qplib_qp.timeout = qp_attr->timeout;
2077 }
2078 if (qp_attr_mask & IB_QP_RETRY_CNT) {
2079 qp->qplib_qp.modify_flags |=
2080 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2081 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2082 }
2083 if (qp_attr_mask & IB_QP_RNR_RETRY) {
2084 qp->qplib_qp.modify_flags |=
2085 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2086 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2087 }
2088 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2089 qp->qplib_qp.modify_flags |=
2090 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2091 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2092 }
2093 if (qp_attr_mask & IB_QP_RQ_PSN) {
2094 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2095 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2096 }
2097 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2098 qp->qplib_qp.modify_flags |=
2099 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2100 /* Cap the max_rd_atomic to device max */
2101 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2102 dev_attr->max_qp_rd_atom);
2103 }
2104 if (qp_attr_mask & IB_QP_SQ_PSN) {
2105 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2106 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2107 }
2108 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2109 if (qp_attr->max_dest_rd_atomic >
2110 dev_attr->max_qp_init_rd_atom) {
2111 ibdev_err(&rdev->ibdev,
2112 "max_dest_rd_atomic requested%d is > dev_max%d",
2113 qp_attr->max_dest_rd_atomic,
2114 dev_attr->max_qp_init_rd_atom);
2115 return -EINVAL;
2116 }
2117
2118 qp->qplib_qp.modify_flags |=
2119 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2120 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2121 }
2122 if (qp_attr_mask & IB_QP_CAP) {
2123 struct bnxt_re_ucontext *uctx =
2124 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2125
2126 qp->qplib_qp.modify_flags |=
2127 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2128 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2129 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2130 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2131 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2132 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2133 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2134 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2135 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2136 (qp_attr->cap.max_inline_data >=
2137 dev_attr->max_inline_data)) {
2138 ibdev_err(&rdev->ibdev,
2139 "Create QP failed - max exceeded");
2140 return -EINVAL;
2141 }
2142 entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2143 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2144 dev_attr->max_qp_wqes + 1);
2145 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2146 qp_attr->cap.max_send_wr;
2147 /*
2148 * Reserving one slot for Phantom WQE. Some application can
2149 * post one extra entry in this case. Allowing this to avoid
2150 * unexpected Queue full condition
2151 */
2152 qp->qplib_qp.sq.q_full_delta -= 1;
2153 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2154 if (qp->qplib_qp.rq.max_wqe) {
2155 entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2156 qp->qplib_qp.rq.max_wqe =
2157 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2158 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2159 qp_attr->cap.max_recv_wr;
2160 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2161 } else {
2162 /* SRQ was used prior, just ignore the RQ caps */
2163 }
2164 }
2165 if (qp_attr_mask & IB_QP_DEST_QPN) {
2166 qp->qplib_qp.modify_flags |=
2167 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2168 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2169 }
2170 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2171 if (rc) {
2172 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2173 return rc;
2174 }
2175 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2176 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2177 return rc;
2178 }
2179
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)2180 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2181 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2182 {
2183 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2184 struct bnxt_re_dev *rdev = qp->rdev;
2185 struct bnxt_qplib_qp *qplib_qp;
2186 int rc;
2187
2188 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2189 if (!qplib_qp)
2190 return -ENOMEM;
2191
2192 qplib_qp->id = qp->qplib_qp.id;
2193 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2194
2195 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2196 if (rc) {
2197 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2198 goto out;
2199 }
2200 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2201 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2202 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2203 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2204 qp_attr->pkey_index = qplib_qp->pkey_index;
2205 qp_attr->qkey = qplib_qp->qkey;
2206 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2207 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2208 qplib_qp->ah.host_sgid_index,
2209 qplib_qp->ah.hop_limit,
2210 qplib_qp->ah.traffic_class);
2211 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2212 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2213 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2214 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2215 qp_attr->timeout = qplib_qp->timeout;
2216 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2217 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2218 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2219 qp_attr->rq_psn = qplib_qp->rq.psn;
2220 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2221 qp_attr->sq_psn = qplib_qp->sq.psn;
2222 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2223 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2224 IB_SIGNAL_REQ_WR;
2225 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2226
2227 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2228 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2229 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2230 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2231 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2232 qp_init_attr->cap = qp_attr->cap;
2233
2234 out:
2235 kfree(qplib_qp);
2236 return rc;
2237 }
2238
2239 /* Routine for sending QP1 packets for RoCE V1 an V2
2240 */
bnxt_re_build_qp1_send_v2(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2241 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2242 const struct ib_send_wr *wr,
2243 struct bnxt_qplib_swqe *wqe,
2244 int payload_size)
2245 {
2246 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2247 ib_ah);
2248 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2249 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2250 struct bnxt_qplib_sge sge;
2251 u8 nw_type;
2252 u16 ether_type;
2253 union ib_gid dgid;
2254 bool is_eth = false;
2255 bool is_vlan = false;
2256 bool is_grh = false;
2257 bool is_udp = false;
2258 u8 ip_version = 0;
2259 u16 vlan_id = 0xFFFF;
2260 void *buf;
2261 int i, rc;
2262
2263 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2264
2265 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2266 if (rc)
2267 return rc;
2268
2269 /* Get network header type for this GID */
2270 nw_type = rdma_gid_attr_network_type(sgid_attr);
2271 switch (nw_type) {
2272 case RDMA_NETWORK_IPV4:
2273 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2274 break;
2275 case RDMA_NETWORK_IPV6:
2276 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2277 break;
2278 default:
2279 nw_type = BNXT_RE_ROCE_V1_PACKET;
2280 break;
2281 }
2282 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2283 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2284 if (is_udp) {
2285 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2286 ip_version = 4;
2287 ether_type = ETH_P_IP;
2288 } else {
2289 ip_version = 6;
2290 ether_type = ETH_P_IPV6;
2291 }
2292 is_grh = false;
2293 } else {
2294 ether_type = ETH_P_IBOE;
2295 is_grh = true;
2296 }
2297
2298 is_eth = true;
2299 is_vlan = vlan_id && (vlan_id < 0x1000);
2300
2301 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2302 ip_version, is_udp, 0, &qp->qp1_hdr);
2303
2304 /* ETH */
2305 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2306 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2307
2308 /* For vlan, check the sgid for vlan existence */
2309
2310 if (!is_vlan) {
2311 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2312 } else {
2313 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2314 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2315 }
2316
2317 if (is_grh || (ip_version == 6)) {
2318 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2319 sizeof(sgid_attr->gid));
2320 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2321 sizeof(sgid_attr->gid));
2322 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2323 }
2324
2325 if (ip_version == 4) {
2326 qp->qp1_hdr.ip4.tos = 0;
2327 qp->qp1_hdr.ip4.id = 0;
2328 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2329 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2330
2331 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2332 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2333 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2334 }
2335
2336 if (is_udp) {
2337 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2338 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2339 qp->qp1_hdr.udp.csum = 0;
2340 }
2341
2342 /* BTH */
2343 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2344 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2345 qp->qp1_hdr.immediate_present = 1;
2346 } else {
2347 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2348 }
2349 if (wr->send_flags & IB_SEND_SOLICITED)
2350 qp->qp1_hdr.bth.solicited_event = 1;
2351 /* pad_count */
2352 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2353
2354 /* P_key for QP1 is for all members */
2355 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2356 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2357 qp->qp1_hdr.bth.ack_req = 0;
2358 qp->send_psn++;
2359 qp->send_psn &= BTH_PSN_MASK;
2360 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2361 /* DETH */
2362 /* Use the priviledged Q_Key for QP1 */
2363 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2364 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2365
2366 /* Pack the QP1 to the transmit buffer */
2367 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2368 if (buf) {
2369 ib_ud_header_pack(&qp->qp1_hdr, buf);
2370 for (i = wqe->num_sge; i; i--) {
2371 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2372 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2373 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2374 }
2375
2376 /*
2377 * Max Header buf size for IPV6 RoCE V2 is 86,
2378 * which is same as the QP1 SQ header buffer.
2379 * Header buf size for IPV4 RoCE V2 can be 66.
2380 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2381 * Subtract 20 bytes from QP1 SQ header buf size
2382 */
2383 if (is_udp && ip_version == 4)
2384 sge.size -= 20;
2385 /*
2386 * Max Header buf size for RoCE V1 is 78.
2387 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2388 * Subtract 8 bytes from QP1 SQ header buf size
2389 */
2390 if (!is_udp)
2391 sge.size -= 8;
2392
2393 /* Subtract 4 bytes for non vlan packets */
2394 if (!is_vlan)
2395 sge.size -= 4;
2396
2397 wqe->sg_list[0].addr = sge.addr;
2398 wqe->sg_list[0].lkey = sge.lkey;
2399 wqe->sg_list[0].size = sge.size;
2400 wqe->num_sge++;
2401
2402 } else {
2403 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2404 rc = -ENOMEM;
2405 }
2406 return rc;
2407 }
2408
2409 /* For the MAD layer, it only provides the recv SGE the size of
2410 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2411 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2412 * receive packet (334 bytes) with no VLAN and then copy the GRH
2413 * and the MAD datagram out to the provided SGE.
2414 */
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2415 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2416 const struct ib_recv_wr *wr,
2417 struct bnxt_qplib_swqe *wqe,
2418 int payload_size)
2419 {
2420 struct bnxt_re_sqp_entries *sqp_entry;
2421 struct bnxt_qplib_sge ref, sge;
2422 struct bnxt_re_dev *rdev;
2423 u32 rq_prod_index;
2424
2425 rdev = qp->rdev;
2426
2427 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2428
2429 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2430 return -ENOMEM;
2431
2432 /* Create 1 SGE to receive the entire
2433 * ethernet packet
2434 */
2435 /* Save the reference from ULP */
2436 ref.addr = wqe->sg_list[0].addr;
2437 ref.lkey = wqe->sg_list[0].lkey;
2438 ref.size = wqe->sg_list[0].size;
2439
2440 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2441
2442 /* SGE 1 */
2443 wqe->sg_list[0].addr = sge.addr;
2444 wqe->sg_list[0].lkey = sge.lkey;
2445 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2446 sge.size -= wqe->sg_list[0].size;
2447
2448 sqp_entry->sge.addr = ref.addr;
2449 sqp_entry->sge.lkey = ref.lkey;
2450 sqp_entry->sge.size = ref.size;
2451 /* Store the wrid for reporting completion */
2452 sqp_entry->wrid = wqe->wr_id;
2453 /* change the wqe->wrid to table index */
2454 wqe->wr_id = rq_prod_index;
2455 return 0;
2456 }
2457
is_ud_qp(struct bnxt_re_qp * qp)2458 static int is_ud_qp(struct bnxt_re_qp *qp)
2459 {
2460 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2461 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2462 }
2463
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2464 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2465 const struct ib_send_wr *wr,
2466 struct bnxt_qplib_swqe *wqe)
2467 {
2468 struct bnxt_re_ah *ah = NULL;
2469
2470 if (is_ud_qp(qp)) {
2471 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2472 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2473 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2474 wqe->send.avid = ah->qplib_ah.id;
2475 }
2476 switch (wr->opcode) {
2477 case IB_WR_SEND:
2478 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2479 break;
2480 case IB_WR_SEND_WITH_IMM:
2481 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2482 wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
2483 break;
2484 case IB_WR_SEND_WITH_INV:
2485 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2486 wqe->send.inv_key = wr->ex.invalidate_rkey;
2487 break;
2488 default:
2489 return -EINVAL;
2490 }
2491 if (wr->send_flags & IB_SEND_SIGNALED)
2492 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2493 if (wr->send_flags & IB_SEND_FENCE)
2494 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2495 if (wr->send_flags & IB_SEND_SOLICITED)
2496 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2497 if (wr->send_flags & IB_SEND_INLINE)
2498 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2499
2500 return 0;
2501 }
2502
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2503 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2504 struct bnxt_qplib_swqe *wqe)
2505 {
2506 switch (wr->opcode) {
2507 case IB_WR_RDMA_WRITE:
2508 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2509 break;
2510 case IB_WR_RDMA_WRITE_WITH_IMM:
2511 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2512 wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
2513 break;
2514 case IB_WR_RDMA_READ:
2515 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2516 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2517 break;
2518 default:
2519 return -EINVAL;
2520 }
2521 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2522 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2523 if (wr->send_flags & IB_SEND_SIGNALED)
2524 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2525 if (wr->send_flags & IB_SEND_FENCE)
2526 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2527 if (wr->send_flags & IB_SEND_SOLICITED)
2528 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2529 if (wr->send_flags & IB_SEND_INLINE)
2530 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2531
2532 return 0;
2533 }
2534
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2535 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2536 struct bnxt_qplib_swqe *wqe)
2537 {
2538 switch (wr->opcode) {
2539 case IB_WR_ATOMIC_CMP_AND_SWP:
2540 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2541 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2542 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2543 break;
2544 case IB_WR_ATOMIC_FETCH_AND_ADD:
2545 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2546 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2547 break;
2548 default:
2549 return -EINVAL;
2550 }
2551 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2552 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2553 if (wr->send_flags & IB_SEND_SIGNALED)
2554 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2555 if (wr->send_flags & IB_SEND_FENCE)
2556 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2557 if (wr->send_flags & IB_SEND_SOLICITED)
2558 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2559 return 0;
2560 }
2561
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2562 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2563 struct bnxt_qplib_swqe *wqe)
2564 {
2565 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2566 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2567
2568 if (wr->send_flags & IB_SEND_SIGNALED)
2569 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2570 if (wr->send_flags & IB_SEND_SOLICITED)
2571 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2572
2573 return 0;
2574 }
2575
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)2576 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2577 struct bnxt_qplib_swqe *wqe)
2578 {
2579 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2580 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2581 int access = wr->access;
2582
2583 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2584 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2585 wqe->frmr.page_list = mr->pages;
2586 wqe->frmr.page_list_len = mr->npages;
2587 wqe->frmr.levels = qplib_frpl->hwq.level;
2588 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2589
2590 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2591 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2592
2593 if (access & IB_ACCESS_LOCAL_WRITE)
2594 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2595 if (access & IB_ACCESS_REMOTE_READ)
2596 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2597 if (access & IB_ACCESS_REMOTE_WRITE)
2598 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2599 if (access & IB_ACCESS_REMOTE_ATOMIC)
2600 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2601 if (access & IB_ACCESS_MW_BIND)
2602 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2603
2604 wqe->frmr.l_key = wr->key;
2605 wqe->frmr.length = wr->mr->length;
2606 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2607 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2608 wqe->frmr.va = wr->mr->iova;
2609 return 0;
2610 }
2611
bnxt_re_copy_inline_data(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2612 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2613 const struct ib_send_wr *wr,
2614 struct bnxt_qplib_swqe *wqe)
2615 {
2616 /* Copy the inline data to the data field */
2617 u8 *in_data;
2618 u32 i, sge_len;
2619 void *sge_addr;
2620
2621 in_data = wqe->inline_data;
2622 for (i = 0; i < wr->num_sge; i++) {
2623 sge_addr = (void *)(unsigned long)
2624 wr->sg_list[i].addr;
2625 sge_len = wr->sg_list[i].length;
2626
2627 if ((sge_len + wqe->inline_len) >
2628 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2629 ibdev_err(&rdev->ibdev,
2630 "Inline data size requested > supported value");
2631 return -EINVAL;
2632 }
2633 sge_len = wr->sg_list[i].length;
2634
2635 memcpy(in_data, sge_addr, sge_len);
2636 in_data += wr->sg_list[i].length;
2637 wqe->inline_len += wr->sg_list[i].length;
2638 }
2639 return wqe->inline_len;
2640 }
2641
bnxt_re_copy_wr_payload(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2642 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2643 const struct ib_send_wr *wr,
2644 struct bnxt_qplib_swqe *wqe)
2645 {
2646 int payload_sz = 0;
2647
2648 if (wr->send_flags & IB_SEND_INLINE)
2649 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2650 else
2651 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2652 wqe->num_sge);
2653
2654 return payload_sz;
2655 }
2656
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)2657 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2658 {
2659 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2660 qp->ib_qp.qp_type == IB_QPT_GSI ||
2661 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2662 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2663 int qp_attr_mask;
2664 struct ib_qp_attr qp_attr;
2665
2666 qp_attr_mask = IB_QP_STATE;
2667 qp_attr.qp_state = IB_QPS_RTS;
2668 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2669 qp->qplib_qp.wqe_cnt = 0;
2670 }
2671 }
2672
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)2673 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2674 struct bnxt_re_qp *qp,
2675 const struct ib_send_wr *wr)
2676 {
2677 int rc = 0, payload_sz = 0;
2678 unsigned long flags;
2679
2680 spin_lock_irqsave(&qp->sq_lock, flags);
2681 while (wr) {
2682 struct bnxt_qplib_swqe wqe = {};
2683
2684 /* Common */
2685 wqe.num_sge = wr->num_sge;
2686 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2687 ibdev_err(&rdev->ibdev,
2688 "Limit exceeded for Send SGEs");
2689 rc = -EINVAL;
2690 goto bad;
2691 }
2692
2693 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2694 if (payload_sz < 0) {
2695 rc = -EINVAL;
2696 goto bad;
2697 }
2698 wqe.wr_id = wr->wr_id;
2699
2700 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2701
2702 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2703 if (!rc)
2704 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2705 bad:
2706 if (rc) {
2707 ibdev_err(&rdev->ibdev,
2708 "Post send failed opcode = %#x rc = %d",
2709 wr->opcode, rc);
2710 break;
2711 }
2712 wr = wr->next;
2713 }
2714 bnxt_qplib_post_send_db(&qp->qplib_qp);
2715 bnxt_ud_qp_hw_stall_workaround(qp);
2716 spin_unlock_irqrestore(&qp->sq_lock, flags);
2717 return rc;
2718 }
2719
bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe * wqe)2720 static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2721 {
2722 /* Need unconditional fence for non-wire memory opcode
2723 * to work as expected.
2724 */
2725 if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2726 wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2727 wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2728 wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2729 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2730 }
2731
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2732 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2733 const struct ib_send_wr **bad_wr)
2734 {
2735 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2736 struct bnxt_qplib_swqe wqe;
2737 int rc = 0, payload_sz = 0;
2738 unsigned long flags;
2739
2740 spin_lock_irqsave(&qp->sq_lock, flags);
2741 while (wr) {
2742 /* House keeping */
2743 memset(&wqe, 0, sizeof(wqe));
2744
2745 /* Common */
2746 wqe.num_sge = wr->num_sge;
2747 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2748 ibdev_err(&qp->rdev->ibdev,
2749 "Limit exceeded for Send SGEs");
2750 rc = -EINVAL;
2751 goto bad;
2752 }
2753
2754 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2755 if (payload_sz < 0) {
2756 rc = -EINVAL;
2757 goto bad;
2758 }
2759 wqe.wr_id = wr->wr_id;
2760
2761 switch (wr->opcode) {
2762 case IB_WR_SEND:
2763 case IB_WR_SEND_WITH_IMM:
2764 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2765 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2766 payload_sz);
2767 if (rc)
2768 goto bad;
2769 wqe.rawqp1.lflags |=
2770 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2771 }
2772 switch (wr->send_flags) {
2773 case IB_SEND_IP_CSUM:
2774 wqe.rawqp1.lflags |=
2775 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2776 break;
2777 default:
2778 break;
2779 }
2780 fallthrough;
2781 case IB_WR_SEND_WITH_INV:
2782 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2783 break;
2784 case IB_WR_RDMA_WRITE:
2785 case IB_WR_RDMA_WRITE_WITH_IMM:
2786 case IB_WR_RDMA_READ:
2787 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2788 break;
2789 case IB_WR_ATOMIC_CMP_AND_SWP:
2790 case IB_WR_ATOMIC_FETCH_AND_ADD:
2791 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2792 break;
2793 case IB_WR_RDMA_READ_WITH_INV:
2794 ibdev_err(&qp->rdev->ibdev,
2795 "RDMA Read with Invalidate is not supported");
2796 rc = -EINVAL;
2797 goto bad;
2798 case IB_WR_LOCAL_INV:
2799 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2800 break;
2801 case IB_WR_REG_MR:
2802 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2803 break;
2804 default:
2805 /* Unsupported WRs */
2806 ibdev_err(&qp->rdev->ibdev,
2807 "WR (%#x) is not supported", wr->opcode);
2808 rc = -EINVAL;
2809 goto bad;
2810 }
2811 if (!rc) {
2812 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2813 bnxt_re_legacy_set_uc_fence(&wqe);
2814 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2815 }
2816 bad:
2817 if (rc) {
2818 ibdev_err(&qp->rdev->ibdev,
2819 "post_send failed op:%#x qps = %#x rc = %d\n",
2820 wr->opcode, qp->qplib_qp.state, rc);
2821 *bad_wr = wr;
2822 break;
2823 }
2824 wr = wr->next;
2825 }
2826 bnxt_qplib_post_send_db(&qp->qplib_qp);
2827 bnxt_ud_qp_hw_stall_workaround(qp);
2828 spin_unlock_irqrestore(&qp->sq_lock, flags);
2829
2830 return rc;
2831 }
2832
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_recv_wr * wr)2833 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2834 struct bnxt_re_qp *qp,
2835 const struct ib_recv_wr *wr)
2836 {
2837 struct bnxt_qplib_swqe wqe;
2838 int rc = 0;
2839
2840 while (wr) {
2841 /* House keeping */
2842 memset(&wqe, 0, sizeof(wqe));
2843
2844 /* Common */
2845 wqe.num_sge = wr->num_sge;
2846 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2847 ibdev_err(&rdev->ibdev,
2848 "Limit exceeded for Receive SGEs");
2849 rc = -EINVAL;
2850 break;
2851 }
2852 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2853 wqe.wr_id = wr->wr_id;
2854 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2855
2856 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2857 if (rc)
2858 break;
2859
2860 wr = wr->next;
2861 }
2862 if (!rc)
2863 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2864 return rc;
2865 }
2866
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2867 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2868 const struct ib_recv_wr **bad_wr)
2869 {
2870 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2871 struct bnxt_qplib_swqe wqe;
2872 int rc = 0, payload_sz = 0;
2873 unsigned long flags;
2874 u32 count = 0;
2875
2876 spin_lock_irqsave(&qp->rq_lock, flags);
2877 while (wr) {
2878 /* House keeping */
2879 memset(&wqe, 0, sizeof(wqe));
2880
2881 /* Common */
2882 wqe.num_sge = wr->num_sge;
2883 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2884 ibdev_err(&qp->rdev->ibdev,
2885 "Limit exceeded for Receive SGEs");
2886 rc = -EINVAL;
2887 *bad_wr = wr;
2888 break;
2889 }
2890
2891 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2892 wr->num_sge);
2893 wqe.wr_id = wr->wr_id;
2894 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2895
2896 if (ib_qp->qp_type == IB_QPT_GSI &&
2897 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2898 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2899 payload_sz);
2900 if (!rc)
2901 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2902 if (rc) {
2903 *bad_wr = wr;
2904 break;
2905 }
2906
2907 /* Ring DB if the RQEs posted reaches a threshold value */
2908 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2909 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2910 count = 0;
2911 }
2912
2913 wr = wr->next;
2914 }
2915
2916 if (count)
2917 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2918
2919 spin_unlock_irqrestore(&qp->rq_lock, flags);
2920
2921 return rc;
2922 }
2923
2924 /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)2925 int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2926 {
2927 struct bnxt_qplib_chip_ctx *cctx;
2928 struct bnxt_qplib_nq *nq;
2929 struct bnxt_re_dev *rdev;
2930 struct bnxt_re_cq *cq;
2931
2932 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2933 rdev = cq->rdev;
2934 nq = cq->qplib_cq.nq;
2935 cctx = rdev->chip_ctx;
2936
2937 if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
2938 free_page((unsigned long)cq->uctx_cq_page);
2939 hash_del(&cq->hash_entry);
2940 }
2941 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2942 ib_umem_release(cq->umem);
2943
2944 atomic_dec(&rdev->stats.res.cq_count);
2945 nq->budget--;
2946 kfree(cq->cql);
2947 return 0;
2948 }
2949
bnxt_re_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)2950 int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2951 struct uverbs_attr_bundle *attrs)
2952 {
2953 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2954 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2955 struct ib_udata *udata = &attrs->driver_udata;
2956 struct bnxt_re_ucontext *uctx =
2957 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2958 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2959 struct bnxt_qplib_chip_ctx *cctx;
2960 struct bnxt_qplib_nq *nq = NULL;
2961 unsigned int nq_alloc_cnt;
2962 int cqe = attr->cqe;
2963 int rc, entries;
2964 u32 active_cqs;
2965
2966 if (attr->flags)
2967 return -EOPNOTSUPP;
2968
2969 /* Validate CQ fields */
2970 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2971 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2972 return -EINVAL;
2973 }
2974
2975 cq->rdev = rdev;
2976 cctx = rdev->chip_ctx;
2977 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2978
2979 entries = bnxt_re_init_depth(cqe + 1, uctx);
2980 if (entries > dev_attr->max_cq_wqes + 1)
2981 entries = dev_attr->max_cq_wqes + 1;
2982
2983 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2984 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2985 if (udata) {
2986 struct bnxt_re_cq_req req;
2987 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2988 rc = -EFAULT;
2989 goto fail;
2990 }
2991
2992 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2993 entries * sizeof(struct cq_base),
2994 IB_ACCESS_LOCAL_WRITE);
2995 if (IS_ERR(cq->umem)) {
2996 rc = PTR_ERR(cq->umem);
2997 goto fail;
2998 }
2999 cq->qplib_cq.sg_info.umem = cq->umem;
3000 cq->qplib_cq.dpi = &uctx->dpi;
3001 } else {
3002 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3003 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3004 GFP_KERNEL);
3005 if (!cq->cql) {
3006 rc = -ENOMEM;
3007 goto fail;
3008 }
3009
3010 cq->qplib_cq.dpi = &rdev->dpi_privileged;
3011 }
3012 /*
3013 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3014 * used for getting the NQ index.
3015 */
3016 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
3017 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
3018 cq->qplib_cq.max_wqe = entries;
3019 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
3020 cq->qplib_cq.nq = nq;
3021
3022 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3023 if (rc) {
3024 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3025 goto fail;
3026 }
3027
3028 cq->ib_cq.cqe = entries;
3029 cq->cq_period = cq->qplib_cq.period;
3030 nq->budget++;
3031
3032 active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3033 if (active_cqs > rdev->stats.res.cq_watermark)
3034 rdev->stats.res.cq_watermark = active_cqs;
3035 spin_lock_init(&cq->cq_lock);
3036
3037 if (udata) {
3038 struct bnxt_re_cq_resp resp = {};
3039
3040 if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3041 hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3042 /* Allocate a page */
3043 cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3044 if (!cq->uctx_cq_page) {
3045 rc = -ENOMEM;
3046 goto c2fail;
3047 }
3048 resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3049 }
3050 resp.cqid = cq->qplib_cq.id;
3051 resp.tail = cq->qplib_cq.hwq.cons;
3052 resp.phase = cq->qplib_cq.period;
3053 resp.rsvd = 0;
3054 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3055 if (rc) {
3056 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3057 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3058 goto free_mem;
3059 }
3060 }
3061
3062 return 0;
3063
3064 free_mem:
3065 free_page((unsigned long)cq->uctx_cq_page);
3066 c2fail:
3067 ib_umem_release(cq->umem);
3068 fail:
3069 kfree(cq->cql);
3070 return rc;
3071 }
3072
bnxt_re_resize_cq_complete(struct bnxt_re_cq * cq)3073 static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3074 {
3075 struct bnxt_re_dev *rdev = cq->rdev;
3076
3077 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3078
3079 cq->qplib_cq.max_wqe = cq->resize_cqe;
3080 if (cq->resize_umem) {
3081 ib_umem_release(cq->umem);
3082 cq->umem = cq->resize_umem;
3083 cq->resize_umem = NULL;
3084 cq->resize_cqe = 0;
3085 }
3086 }
3087
bnxt_re_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)3088 int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3089 {
3090 struct bnxt_qplib_sg_info sg_info = {};
3091 struct bnxt_qplib_dpi *orig_dpi = NULL;
3092 struct bnxt_qplib_dev_attr *dev_attr;
3093 struct bnxt_re_ucontext *uctx = NULL;
3094 struct bnxt_re_resize_cq_req req;
3095 struct bnxt_re_dev *rdev;
3096 struct bnxt_re_cq *cq;
3097 int rc, entries;
3098
3099 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3100 rdev = cq->rdev;
3101 dev_attr = &rdev->dev_attr;
3102 if (!ibcq->uobject) {
3103 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3104 return -EOPNOTSUPP;
3105 }
3106
3107 if (cq->resize_umem) {
3108 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3109 cq->qplib_cq.id);
3110 return -EBUSY;
3111 }
3112
3113 /* Check the requested cq depth out of supported depth */
3114 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3115 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3116 cq->qplib_cq.id, cqe);
3117 return -EINVAL;
3118 }
3119
3120 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3121 entries = bnxt_re_init_depth(cqe + 1, uctx);
3122 if (entries > dev_attr->max_cq_wqes + 1)
3123 entries = dev_attr->max_cq_wqes + 1;
3124
3125 /* uverbs consumer */
3126 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3127 rc = -EFAULT;
3128 goto fail;
3129 }
3130
3131 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3132 entries * sizeof(struct cq_base),
3133 IB_ACCESS_LOCAL_WRITE);
3134 if (IS_ERR(cq->resize_umem)) {
3135 rc = PTR_ERR(cq->resize_umem);
3136 cq->resize_umem = NULL;
3137 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
3138 __func__, rc);
3139 goto fail;
3140 }
3141 cq->resize_cqe = entries;
3142 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3143 orig_dpi = cq->qplib_cq.dpi;
3144
3145 cq->qplib_cq.sg_info.umem = cq->resize_umem;
3146 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3147 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3148 cq->qplib_cq.dpi = &uctx->dpi;
3149
3150 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3151 if (rc) {
3152 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3153 cq->qplib_cq.id);
3154 goto fail;
3155 }
3156
3157 cq->ib_cq.cqe = cq->resize_cqe;
3158 atomic_inc(&rdev->stats.res.resize_count);
3159
3160 return 0;
3161
3162 fail:
3163 if (cq->resize_umem) {
3164 ib_umem_release(cq->resize_umem);
3165 cq->resize_umem = NULL;
3166 cq->resize_cqe = 0;
3167 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3168 cq->qplib_cq.dpi = orig_dpi;
3169 }
3170 return rc;
3171 }
3172
__req_to_ib_wc_status(u8 qstatus)3173 static u8 __req_to_ib_wc_status(u8 qstatus)
3174 {
3175 switch (qstatus) {
3176 case CQ_REQ_STATUS_OK:
3177 return IB_WC_SUCCESS;
3178 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3179 return IB_WC_BAD_RESP_ERR;
3180 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3181 return IB_WC_LOC_LEN_ERR;
3182 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3183 return IB_WC_LOC_QP_OP_ERR;
3184 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3185 return IB_WC_LOC_PROT_ERR;
3186 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3187 return IB_WC_GENERAL_ERR;
3188 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3189 return IB_WC_REM_INV_REQ_ERR;
3190 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3191 return IB_WC_REM_ACCESS_ERR;
3192 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3193 return IB_WC_REM_OP_ERR;
3194 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3195 return IB_WC_RNR_RETRY_EXC_ERR;
3196 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3197 return IB_WC_RETRY_EXC_ERR;
3198 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3199 return IB_WC_WR_FLUSH_ERR;
3200 default:
3201 return IB_WC_GENERAL_ERR;
3202 }
3203 return 0;
3204 }
3205
__rawqp1_to_ib_wc_status(u8 qstatus)3206 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3207 {
3208 switch (qstatus) {
3209 case CQ_RES_RAWETH_QP1_STATUS_OK:
3210 return IB_WC_SUCCESS;
3211 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3212 return IB_WC_LOC_ACCESS_ERR;
3213 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3214 return IB_WC_LOC_LEN_ERR;
3215 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3216 return IB_WC_LOC_PROT_ERR;
3217 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3218 return IB_WC_LOC_QP_OP_ERR;
3219 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3220 return IB_WC_GENERAL_ERR;
3221 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3222 return IB_WC_WR_FLUSH_ERR;
3223 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3224 return IB_WC_WR_FLUSH_ERR;
3225 default:
3226 return IB_WC_GENERAL_ERR;
3227 }
3228 }
3229
__rc_to_ib_wc_status(u8 qstatus)3230 static u8 __rc_to_ib_wc_status(u8 qstatus)
3231 {
3232 switch (qstatus) {
3233 case CQ_RES_RC_STATUS_OK:
3234 return IB_WC_SUCCESS;
3235 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3236 return IB_WC_LOC_ACCESS_ERR;
3237 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3238 return IB_WC_LOC_LEN_ERR;
3239 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3240 return IB_WC_LOC_PROT_ERR;
3241 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3242 return IB_WC_LOC_QP_OP_ERR;
3243 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3244 return IB_WC_GENERAL_ERR;
3245 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3246 return IB_WC_REM_INV_REQ_ERR;
3247 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3248 return IB_WC_WR_FLUSH_ERR;
3249 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3250 return IB_WC_WR_FLUSH_ERR;
3251 default:
3252 return IB_WC_GENERAL_ERR;
3253 }
3254 }
3255
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3256 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3257 {
3258 switch (cqe->type) {
3259 case BNXT_QPLIB_SWQE_TYPE_SEND:
3260 wc->opcode = IB_WC_SEND;
3261 break;
3262 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3263 wc->opcode = IB_WC_SEND;
3264 wc->wc_flags |= IB_WC_WITH_IMM;
3265 break;
3266 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3267 wc->opcode = IB_WC_SEND;
3268 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3269 break;
3270 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3271 wc->opcode = IB_WC_RDMA_WRITE;
3272 break;
3273 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3274 wc->opcode = IB_WC_RDMA_WRITE;
3275 wc->wc_flags |= IB_WC_WITH_IMM;
3276 break;
3277 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3278 wc->opcode = IB_WC_RDMA_READ;
3279 break;
3280 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3281 wc->opcode = IB_WC_COMP_SWAP;
3282 break;
3283 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3284 wc->opcode = IB_WC_FETCH_ADD;
3285 break;
3286 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3287 wc->opcode = IB_WC_LOCAL_INV;
3288 break;
3289 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3290 wc->opcode = IB_WC_REG_MR;
3291 break;
3292 default:
3293 wc->opcode = IB_WC_SEND;
3294 break;
3295 }
3296
3297 wc->status = __req_to_ib_wc_status(cqe->status);
3298 }
3299
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)3300 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3301 u16 raweth_qp1_flags2)
3302 {
3303 bool is_ipv6 = false, is_ipv4 = false;
3304
3305 /* raweth_qp1_flags Bit 9-6 indicates itype */
3306 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3307 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3308 return -1;
3309
3310 if (raweth_qp1_flags2 &
3311 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3312 raweth_qp1_flags2 &
3313 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3314 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3315 (raweth_qp1_flags2 &
3316 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3317 (is_ipv6 = true) : (is_ipv4 = true);
3318 return ((is_ipv6) ?
3319 BNXT_RE_ROCEV2_IPV6_PACKET :
3320 BNXT_RE_ROCEV2_IPV4_PACKET);
3321 } else {
3322 return BNXT_RE_ROCE_V1_PACKET;
3323 }
3324 }
3325
bnxt_re_to_ib_nw_type(int nw_type)3326 static int bnxt_re_to_ib_nw_type(int nw_type)
3327 {
3328 u8 nw_hdr_type = 0xFF;
3329
3330 switch (nw_type) {
3331 case BNXT_RE_ROCE_V1_PACKET:
3332 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3333 break;
3334 case BNXT_RE_ROCEV2_IPV4_PACKET:
3335 nw_hdr_type = RDMA_NETWORK_IPV4;
3336 break;
3337 case BNXT_RE_ROCEV2_IPV6_PACKET:
3338 nw_hdr_type = RDMA_NETWORK_IPV6;
3339 break;
3340 }
3341 return nw_hdr_type;
3342 }
3343
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)3344 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3345 void *rq_hdr_buf)
3346 {
3347 u8 *tmp_buf = NULL;
3348 struct ethhdr *eth_hdr;
3349 u16 eth_type;
3350 bool rc = false;
3351
3352 tmp_buf = (u8 *)rq_hdr_buf;
3353 /*
3354 * If dest mac is not same as I/F mac, this could be a
3355 * loopback address or multicast address, check whether
3356 * it is a loopback packet
3357 */
3358 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3359 tmp_buf += 4;
3360 /* Check the ether type */
3361 eth_hdr = (struct ethhdr *)tmp_buf;
3362 eth_type = ntohs(eth_hdr->h_proto);
3363 switch (eth_type) {
3364 case ETH_P_IBOE:
3365 rc = true;
3366 break;
3367 case ETH_P_IP:
3368 case ETH_P_IPV6: {
3369 u32 len;
3370 struct udphdr *udp_hdr;
3371
3372 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3373 sizeof(struct ipv6hdr));
3374 tmp_buf += sizeof(struct ethhdr) + len;
3375 udp_hdr = (struct udphdr *)tmp_buf;
3376 if (ntohs(udp_hdr->dest) ==
3377 ROCE_V2_UDP_DPORT)
3378 rc = true;
3379 break;
3380 }
3381 default:
3382 break;
3383 }
3384 }
3385
3386 return rc;
3387 }
3388
bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp * gsi_qp,struct bnxt_qplib_cqe * cqe)3389 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3390 struct bnxt_qplib_cqe *cqe)
3391 {
3392 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3393 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3394 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3395 dma_addr_t shrq_hdr_buf_map;
3396 struct ib_sge s_sge[2] = {};
3397 struct ib_sge r_sge[2] = {};
3398 struct bnxt_re_ah *gsi_sah;
3399 struct ib_recv_wr rwr = {};
3400 dma_addr_t rq_hdr_buf_map;
3401 struct ib_ud_wr udwr = {};
3402 struct ib_send_wr *swr;
3403 u32 skip_bytes = 0;
3404 int pkt_type = 0;
3405 void *rq_hdr_buf;
3406 u32 offset = 0;
3407 u32 tbl_idx;
3408 int rc;
3409
3410 swr = &udwr.wr;
3411 tbl_idx = cqe->wr_id;
3412
3413 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3414 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3415 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3416 tbl_idx);
3417
3418 /* Shadow QP header buffer */
3419 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3420 tbl_idx);
3421 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3422
3423 /* Store this cqe */
3424 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3425 sqp_entry->qp1_qp = gsi_qp;
3426
3427 /* Find packet type from the cqe */
3428
3429 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3430 cqe->raweth_qp1_flags2);
3431 if (pkt_type < 0) {
3432 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3433 return -EINVAL;
3434 }
3435
3436 /* Adjust the offset for the user buffer and post in the rq */
3437
3438 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3439 offset = 20;
3440
3441 /*
3442 * QP1 loopback packet has 4 bytes of internal header before
3443 * ether header. Skip these four bytes.
3444 */
3445 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3446 skip_bytes = 4;
3447
3448 /* First send SGE . Skip the ether header*/
3449 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3450 + skip_bytes;
3451 s_sge[0].lkey = 0xFFFFFFFF;
3452 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3453 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3454
3455 /* Second Send SGE */
3456 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3457 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3458 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3459 s_sge[1].addr += 8;
3460 s_sge[1].lkey = 0xFFFFFFFF;
3461 s_sge[1].length = 256;
3462
3463 /* First recv SGE */
3464
3465 r_sge[0].addr = shrq_hdr_buf_map;
3466 r_sge[0].lkey = 0xFFFFFFFF;
3467 r_sge[0].length = 40;
3468
3469 r_sge[1].addr = sqp_entry->sge.addr + offset;
3470 r_sge[1].lkey = sqp_entry->sge.lkey;
3471 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3472
3473 /* Create receive work request */
3474 rwr.num_sge = 2;
3475 rwr.sg_list = r_sge;
3476 rwr.wr_id = tbl_idx;
3477 rwr.next = NULL;
3478
3479 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3480 if (rc) {
3481 ibdev_err(&rdev->ibdev,
3482 "Failed to post Rx buffers to shadow QP");
3483 return -ENOMEM;
3484 }
3485
3486 swr->num_sge = 2;
3487 swr->sg_list = s_sge;
3488 swr->wr_id = tbl_idx;
3489 swr->opcode = IB_WR_SEND;
3490 swr->next = NULL;
3491 gsi_sah = rdev->gsi_ctx.gsi_sah;
3492 udwr.ah = &gsi_sah->ib_ah;
3493 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3494 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3495
3496 /* post data received in the send queue */
3497 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3498 }
3499
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3500 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3501 struct bnxt_qplib_cqe *cqe)
3502 {
3503 wc->opcode = IB_WC_RECV;
3504 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3505 wc->wc_flags |= IB_WC_GRH;
3506 }
3507
bnxt_re_check_if_vlan_valid(struct bnxt_re_dev * rdev,u16 vlan_id)3508 static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3509 u16 vlan_id)
3510 {
3511 /*
3512 * Check if the vlan is configured in the host. If not configured, it
3513 * can be a transparent VLAN. So dont report the vlan id.
3514 */
3515 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3516 htons(ETH_P_8021Q), vlan_id))
3517 return false;
3518 return true;
3519 }
3520
bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)3521 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3522 u16 *vid, u8 *sl)
3523 {
3524 bool ret = false;
3525 u32 metadata;
3526 u16 tpid;
3527
3528 metadata = orig_cqe->raweth_qp1_metadata;
3529 if (orig_cqe->raweth_qp1_flags2 &
3530 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3531 tpid = ((metadata &
3532 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3533 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3534 if (tpid == ETH_P_8021Q) {
3535 *vid = metadata &
3536 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3537 *sl = (metadata &
3538 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3539 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3540 ret = true;
3541 }
3542 }
3543
3544 return ret;
3545 }
3546
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3547 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3548 struct bnxt_qplib_cqe *cqe)
3549 {
3550 wc->opcode = IB_WC_RECV;
3551 wc->status = __rc_to_ib_wc_status(cqe->status);
3552
3553 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3554 wc->wc_flags |= IB_WC_WITH_IMM;
3555 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3556 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3557 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3558 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3559 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3560 }
3561
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * gsi_sqp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3562 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3563 struct ib_wc *wc,
3564 struct bnxt_qplib_cqe *cqe)
3565 {
3566 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3567 struct bnxt_re_qp *gsi_qp = NULL;
3568 struct bnxt_qplib_cqe *orig_cqe = NULL;
3569 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3570 int nw_type;
3571 u32 tbl_idx;
3572 u16 vlan_id;
3573 u8 sl;
3574
3575 tbl_idx = cqe->wr_id;
3576
3577 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3578 gsi_qp = sqp_entry->qp1_qp;
3579 orig_cqe = &sqp_entry->cqe;
3580
3581 wc->wr_id = sqp_entry->wrid;
3582 wc->byte_len = orig_cqe->length;
3583 wc->qp = &gsi_qp->ib_qp;
3584
3585 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata));
3586 wc->src_qp = orig_cqe->src_qp;
3587 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3588 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3589 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3590 wc->vlan_id = vlan_id;
3591 wc->sl = sl;
3592 wc->wc_flags |= IB_WC_WITH_VLAN;
3593 }
3594 }
3595 wc->port_num = 1;
3596 wc->vendor_err = orig_cqe->status;
3597
3598 wc->opcode = IB_WC_RECV;
3599 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3600 wc->wc_flags |= IB_WC_GRH;
3601
3602 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3603 orig_cqe->raweth_qp1_flags2);
3604 if (nw_type >= 0) {
3605 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3606 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3607 }
3608 }
3609
bnxt_re_process_res_ud_wc(struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3610 static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3611 struct ib_wc *wc,
3612 struct bnxt_qplib_cqe *cqe)
3613 {
3614 struct bnxt_re_dev *rdev;
3615 u16 vlan_id = 0;
3616 u8 nw_type;
3617
3618 rdev = qp->rdev;
3619 wc->opcode = IB_WC_RECV;
3620 wc->status = __rc_to_ib_wc_status(cqe->status);
3621
3622 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3623 wc->wc_flags |= IB_WC_WITH_IMM;
3624 /* report only on GSI QP for Thor */
3625 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3626 wc->wc_flags |= IB_WC_GRH;
3627 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3628 wc->wc_flags |= IB_WC_WITH_SMAC;
3629 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3630 vlan_id = (cqe->cfa_meta & 0xFFF);
3631 }
3632 /* Mark only if vlan_id is non zero */
3633 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3634 wc->vlan_id = vlan_id;
3635 wc->wc_flags |= IB_WC_WITH_VLAN;
3636 }
3637 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3638 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3639 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3640 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3641 }
3642
3643 }
3644
send_phantom_wqe(struct bnxt_re_qp * qp)3645 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3646 {
3647 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3648 unsigned long flags;
3649 int rc;
3650
3651 spin_lock_irqsave(&qp->sq_lock, flags);
3652
3653 rc = bnxt_re_bind_fence_mw(lib_qp);
3654 if (!rc) {
3655 lib_qp->sq.phantom_wqe_cnt++;
3656 ibdev_dbg(&qp->rdev->ibdev,
3657 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3658 lib_qp->id, lib_qp->sq.hwq.prod,
3659 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3660 lib_qp->sq.phantom_wqe_cnt);
3661 }
3662
3663 spin_unlock_irqrestore(&qp->sq_lock, flags);
3664 return rc;
3665 }
3666
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)3667 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3668 {
3669 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3670 struct bnxt_re_qp *qp, *sh_qp;
3671 struct bnxt_qplib_cqe *cqe;
3672 int i, ncqe, budget;
3673 struct bnxt_qplib_q *sq;
3674 struct bnxt_qplib_qp *lib_qp;
3675 u32 tbl_idx;
3676 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3677 unsigned long flags;
3678
3679 /* User CQ; the only processing we do is to
3680 * complete any pending CQ resize operation.
3681 */
3682 if (cq->umem) {
3683 if (cq->resize_umem)
3684 bnxt_re_resize_cq_complete(cq);
3685 return 0;
3686 }
3687
3688 spin_lock_irqsave(&cq->cq_lock, flags);
3689 budget = min_t(u32, num_entries, cq->max_cql);
3690 num_entries = budget;
3691 if (!cq->cql) {
3692 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3693 goto exit;
3694 }
3695 cqe = &cq->cql[0];
3696 while (budget) {
3697 lib_qp = NULL;
3698 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3699 if (lib_qp) {
3700 sq = &lib_qp->sq;
3701 if (sq->send_phantom) {
3702 qp = container_of(lib_qp,
3703 struct bnxt_re_qp, qplib_qp);
3704 if (send_phantom_wqe(qp) == -ENOMEM)
3705 ibdev_err(&cq->rdev->ibdev,
3706 "Phantom failed! Scheduled to send again\n");
3707 else
3708 sq->send_phantom = false;
3709 }
3710 }
3711 if (ncqe < budget)
3712 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3713 cqe + ncqe,
3714 budget - ncqe);
3715
3716 if (!ncqe)
3717 break;
3718
3719 for (i = 0; i < ncqe; i++, cqe++) {
3720 /* Transcribe each qplib_wqe back to ib_wc */
3721 memset(wc, 0, sizeof(*wc));
3722
3723 wc->wr_id = cqe->wr_id;
3724 wc->byte_len = cqe->length;
3725 qp = container_of
3726 ((struct bnxt_qplib_qp *)
3727 (unsigned long)(cqe->qp_handle),
3728 struct bnxt_re_qp, qplib_qp);
3729 wc->qp = &qp->ib_qp;
3730 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata));
3731 wc->src_qp = cqe->src_qp;
3732 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3733 wc->port_num = 1;
3734 wc->vendor_err = cqe->status;
3735
3736 switch (cqe->opcode) {
3737 case CQ_BASE_CQE_TYPE_REQ:
3738 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3739 if (sh_qp &&
3740 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3741 /* Handle this completion with
3742 * the stored completion
3743 */
3744 memset(wc, 0, sizeof(*wc));
3745 continue;
3746 }
3747 bnxt_re_process_req_wc(wc, cqe);
3748 break;
3749 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3750 if (!cqe->status) {
3751 int rc = 0;
3752
3753 rc = bnxt_re_process_raw_qp_pkt_rx
3754 (qp, cqe);
3755 if (!rc) {
3756 memset(wc, 0, sizeof(*wc));
3757 continue;
3758 }
3759 cqe->status = -1;
3760 }
3761 /* Errors need not be looped back.
3762 * But change the wr_id to the one
3763 * stored in the table
3764 */
3765 tbl_idx = cqe->wr_id;
3766 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3767 wc->wr_id = sqp_entry->wrid;
3768 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3769 break;
3770 case CQ_BASE_CQE_TYPE_RES_RC:
3771 bnxt_re_process_res_rc_wc(wc, cqe);
3772 break;
3773 case CQ_BASE_CQE_TYPE_RES_UD:
3774 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3775 if (sh_qp &&
3776 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3777 /* Handle this completion with
3778 * the stored completion
3779 */
3780 if (cqe->status) {
3781 continue;
3782 } else {
3783 bnxt_re_process_res_shadow_qp_wc
3784 (qp, wc, cqe);
3785 break;
3786 }
3787 }
3788 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3789 break;
3790 default:
3791 ibdev_err(&cq->rdev->ibdev,
3792 "POLL CQ : type 0x%x not handled",
3793 cqe->opcode);
3794 continue;
3795 }
3796 wc++;
3797 budget--;
3798 }
3799 }
3800 exit:
3801 spin_unlock_irqrestore(&cq->cq_lock, flags);
3802 return num_entries - budget;
3803 }
3804
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)3805 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3806 enum ib_cq_notify_flags ib_cqn_flags)
3807 {
3808 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3809 int type = 0, rc = 0;
3810 unsigned long flags;
3811
3812 spin_lock_irqsave(&cq->cq_lock, flags);
3813 /* Trigger on the very next completion */
3814 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3815 type = DBC_DBC_TYPE_CQ_ARMALL;
3816 /* Trigger on the next solicited completion */
3817 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3818 type = DBC_DBC_TYPE_CQ_ARMSE;
3819
3820 /* Poll to see if there are missed events */
3821 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3822 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3823 rc = 1;
3824 goto exit;
3825 }
3826 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3827
3828 exit:
3829 spin_unlock_irqrestore(&cq->cq_lock, flags);
3830 return rc;
3831 }
3832
3833 /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)3834 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3835 {
3836 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3837 struct bnxt_re_dev *rdev = pd->rdev;
3838 struct bnxt_re_mr *mr;
3839 u32 active_mrs;
3840 int rc;
3841
3842 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3843 if (!mr)
3844 return ERR_PTR(-ENOMEM);
3845
3846 mr->rdev = rdev;
3847 mr->qplib_mr.pd = &pd->qplib_pd;
3848 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3849 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3850
3851 /* Allocate and register 0 as the address */
3852 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3853 if (rc)
3854 goto fail;
3855
3856 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3857 mr->qplib_mr.total_size = -1; /* Infinte length */
3858 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3859 PAGE_SIZE);
3860 if (rc)
3861 goto fail_mr;
3862
3863 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3864 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3865 IB_ACCESS_REMOTE_ATOMIC))
3866 mr->ib_mr.rkey = mr->ib_mr.lkey;
3867 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3868 if (active_mrs > rdev->stats.res.mr_watermark)
3869 rdev->stats.res.mr_watermark = active_mrs;
3870
3871 return &mr->ib_mr;
3872
3873 fail_mr:
3874 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3875 fail:
3876 kfree(mr);
3877 return ERR_PTR(rc);
3878 }
3879
bnxt_re_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3880 int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3881 {
3882 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3883 struct bnxt_re_dev *rdev = mr->rdev;
3884 int rc;
3885
3886 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3887 if (rc) {
3888 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3889 return rc;
3890 }
3891
3892 if (mr->pages) {
3893 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3894 &mr->qplib_frpl);
3895 kfree(mr->pages);
3896 mr->npages = 0;
3897 mr->pages = NULL;
3898 }
3899 ib_umem_release(mr->ib_umem);
3900
3901 kfree(mr);
3902 atomic_dec(&rdev->stats.res.mr_count);
3903 return rc;
3904 }
3905
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)3906 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3907 {
3908 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3909
3910 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3911 return -ENOMEM;
3912
3913 mr->pages[mr->npages++] = addr;
3914 return 0;
3915 }
3916
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3917 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3918 unsigned int *sg_offset)
3919 {
3920 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3921
3922 mr->npages = 0;
3923 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3924 }
3925
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg)3926 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3927 u32 max_num_sg)
3928 {
3929 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3930 struct bnxt_re_dev *rdev = pd->rdev;
3931 struct bnxt_re_mr *mr = NULL;
3932 u32 active_mrs;
3933 int rc;
3934
3935 if (type != IB_MR_TYPE_MEM_REG) {
3936 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3937 return ERR_PTR(-EINVAL);
3938 }
3939 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3940 return ERR_PTR(-EINVAL);
3941
3942 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3943 if (!mr)
3944 return ERR_PTR(-ENOMEM);
3945
3946 mr->rdev = rdev;
3947 mr->qplib_mr.pd = &pd->qplib_pd;
3948 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3949 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3950
3951 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3952 if (rc)
3953 goto bail;
3954
3955 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3956 mr->ib_mr.rkey = mr->ib_mr.lkey;
3957
3958 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3959 if (!mr->pages) {
3960 rc = -ENOMEM;
3961 goto fail;
3962 }
3963 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3964 &mr->qplib_frpl, max_num_sg);
3965 if (rc) {
3966 ibdev_err(&rdev->ibdev,
3967 "Failed to allocate HW FR page list");
3968 goto fail_mr;
3969 }
3970
3971 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3972 if (active_mrs > rdev->stats.res.mr_watermark)
3973 rdev->stats.res.mr_watermark = active_mrs;
3974 return &mr->ib_mr;
3975
3976 fail_mr:
3977 kfree(mr->pages);
3978 fail:
3979 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3980 bail:
3981 kfree(mr);
3982 return ERR_PTR(rc);
3983 }
3984
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)3985 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3986 struct ib_udata *udata)
3987 {
3988 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3989 struct bnxt_re_dev *rdev = pd->rdev;
3990 struct bnxt_re_mw *mw;
3991 u32 active_mws;
3992 int rc;
3993
3994 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3995 if (!mw)
3996 return ERR_PTR(-ENOMEM);
3997 mw->rdev = rdev;
3998 mw->qplib_mw.pd = &pd->qplib_pd;
3999
4000 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4001 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4002 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4003 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4004 if (rc) {
4005 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4006 goto fail;
4007 }
4008 mw->ib_mw.rkey = mw->qplib_mw.rkey;
4009
4010 active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4011 if (active_mws > rdev->stats.res.mw_watermark)
4012 rdev->stats.res.mw_watermark = active_mws;
4013 return &mw->ib_mw;
4014
4015 fail:
4016 kfree(mw);
4017 return ERR_PTR(rc);
4018 }
4019
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)4020 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4021 {
4022 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4023 struct bnxt_re_dev *rdev = mw->rdev;
4024 int rc;
4025
4026 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4027 if (rc) {
4028 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4029 return rc;
4030 }
4031
4032 kfree(mw);
4033 atomic_dec(&rdev->stats.res.mw_count);
4034 return rc;
4035 }
4036
__bnxt_re_user_reg_mr(struct ib_pd * ib_pd,u64 length,u64 virt_addr,int mr_access_flags,struct ib_umem * umem)4037 static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4038 int mr_access_flags, struct ib_umem *umem)
4039 {
4040 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4041 struct bnxt_re_dev *rdev = pd->rdev;
4042 unsigned long page_size;
4043 struct bnxt_re_mr *mr;
4044 int umem_pgs, rc;
4045 u32 active_mrs;
4046
4047 if (length > BNXT_RE_MAX_MR_SIZE) {
4048 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4049 length, BNXT_RE_MAX_MR_SIZE);
4050 return ERR_PTR(-ENOMEM);
4051 }
4052
4053 page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4054 if (!page_size) {
4055 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4056 return ERR_PTR(-EINVAL);
4057 }
4058
4059 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4060 if (!mr)
4061 return ERR_PTR(-ENOMEM);
4062
4063 mr->rdev = rdev;
4064 mr->qplib_mr.pd = &pd->qplib_pd;
4065 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4066 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4067
4068 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4069 if (rc) {
4070 ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4071 rc = -EIO;
4072 goto free_mr;
4073 }
4074 /* The fixed portion of the rkey is the same as the lkey */
4075 mr->ib_mr.rkey = mr->qplib_mr.rkey;
4076 mr->ib_umem = umem;
4077 mr->qplib_mr.va = virt_addr;
4078 mr->qplib_mr.total_size = length;
4079
4080 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4081 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4082 umem_pgs, page_size);
4083 if (rc) {
4084 ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4085 rc = -EIO;
4086 goto free_mrw;
4087 }
4088
4089 mr->ib_mr.lkey = mr->qplib_mr.lkey;
4090 mr->ib_mr.rkey = mr->qplib_mr.lkey;
4091 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4092 if (active_mrs > rdev->stats.res.mr_watermark)
4093 rdev->stats.res.mr_watermark = active_mrs;
4094
4095 return &mr->ib_mr;
4096
4097 free_mrw:
4098 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4099 free_mr:
4100 kfree(mr);
4101 return ERR_PTR(rc);
4102 }
4103
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)4104 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4105 u64 virt_addr, int mr_access_flags,
4106 struct ib_udata *udata)
4107 {
4108 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4109 struct bnxt_re_dev *rdev = pd->rdev;
4110 struct ib_umem *umem;
4111 struct ib_mr *ib_mr;
4112
4113 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4114 if (IS_ERR(umem))
4115 return ERR_CAST(umem);
4116
4117 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4118 if (IS_ERR(ib_mr))
4119 ib_umem_release(umem);
4120 return ib_mr;
4121 }
4122
bnxt_re_reg_user_mr_dmabuf(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int fd,int mr_access_flags,struct ib_udata * udata)4123 struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4124 u64 length, u64 virt_addr, int fd,
4125 int mr_access_flags, struct ib_udata *udata)
4126 {
4127 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4128 struct bnxt_re_dev *rdev = pd->rdev;
4129 struct ib_umem_dmabuf *umem_dmabuf;
4130 struct ib_umem *umem;
4131 struct ib_mr *ib_mr;
4132
4133 umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4134 fd, mr_access_flags);
4135 if (IS_ERR(umem_dmabuf))
4136 return ERR_CAST(umem_dmabuf);
4137
4138 umem = &umem_dmabuf->umem;
4139
4140 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4141 if (IS_ERR(ib_mr))
4142 ib_umem_release(umem);
4143 return ib_mr;
4144 }
4145
bnxt_re_alloc_ucontext(struct ib_ucontext * ctx,struct ib_udata * udata)4146 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4147 {
4148 struct ib_device *ibdev = ctx->device;
4149 struct bnxt_re_ucontext *uctx =
4150 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4151 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4152 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
4153 struct bnxt_re_user_mmap_entry *entry;
4154 struct bnxt_re_uctx_resp resp = {};
4155 struct bnxt_re_uctx_req ureq = {};
4156 u32 chip_met_rev_num = 0;
4157 int rc;
4158
4159 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4160
4161 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4162 ibdev_dbg(ibdev, " is different from the device %d ",
4163 BNXT_RE_ABI_VERSION);
4164 return -EPERM;
4165 }
4166
4167 uctx->rdev = rdev;
4168
4169 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4170 if (!uctx->shpg) {
4171 rc = -ENOMEM;
4172 goto fail;
4173 }
4174 spin_lock_init(&uctx->sh_lock);
4175
4176 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4177 chip_met_rev_num = rdev->chip_ctx->chip_num;
4178 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4179 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4180 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4181 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4182 resp.chip_id0 = chip_met_rev_num;
4183 /*Temp, Use xa_alloc instead */
4184 resp.dev_id = rdev->en_dev->pdev->devfn;
4185 resp.max_qp = rdev->qplib_ctx.qpc_count;
4186 resp.pg_size = PAGE_SIZE;
4187 resp.cqe_sz = sizeof(struct cq_base);
4188 resp.max_cqd = dev_attr->max_cq_wqes;
4189
4190 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4191 resp.mode = rdev->chip_ctx->modes.wqe_mode;
4192
4193 if (rdev->chip_ctx->modes.db_push)
4194 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4195
4196 entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4197 if (!entry) {
4198 rc = -ENOMEM;
4199 goto cfail;
4200 }
4201 uctx->shpage_mmap = &entry->rdma_entry;
4202 if (rdev->pacing.dbr_pacing)
4203 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4204
4205 if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
4206 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
4207
4208 if (udata->inlen >= sizeof(ureq)) {
4209 rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4210 if (rc)
4211 goto cfail;
4212 if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4213 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4214 uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4215 }
4216 }
4217
4218 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4219 if (rc) {
4220 ibdev_err(ibdev, "Failed to copy user context");
4221 rc = -EFAULT;
4222 goto cfail;
4223 }
4224
4225 return 0;
4226 cfail:
4227 free_page((unsigned long)uctx->shpg);
4228 uctx->shpg = NULL;
4229 fail:
4230 return rc;
4231 }
4232
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)4233 void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4234 {
4235 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4236 struct bnxt_re_ucontext,
4237 ib_uctx);
4238
4239 struct bnxt_re_dev *rdev = uctx->rdev;
4240
4241 rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4242 uctx->shpage_mmap = NULL;
4243 if (uctx->shpg)
4244 free_page((unsigned long)uctx->shpg);
4245
4246 if (uctx->dpi.dbr) {
4247 /* Free DPI only if this is the first PD allocated by the
4248 * application and mark the context dpi as NULL
4249 */
4250 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4251 uctx->dpi.dbr = NULL;
4252 }
4253 }
4254
bnxt_re_search_for_cq(struct bnxt_re_dev * rdev,u32 cq_id)4255 static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4256 {
4257 struct bnxt_re_cq *cq = NULL, *tmp_cq;
4258
4259 hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4260 if (tmp_cq->qplib_cq.id == cq_id) {
4261 cq = tmp_cq;
4262 break;
4263 }
4264 }
4265 return cq;
4266 }
4267
4268 /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)4269 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4270 {
4271 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4272 struct bnxt_re_ucontext,
4273 ib_uctx);
4274 struct bnxt_re_user_mmap_entry *bnxt_entry;
4275 struct rdma_user_mmap_entry *rdma_entry;
4276 int ret = 0;
4277 u64 pfn;
4278
4279 rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4280 if (!rdma_entry)
4281 return -EINVAL;
4282
4283 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4284 rdma_entry);
4285
4286 switch (bnxt_entry->mmap_flag) {
4287 case BNXT_RE_MMAP_WC_DB:
4288 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4289 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4290 pgprot_writecombine(vma->vm_page_prot),
4291 rdma_entry);
4292 break;
4293 case BNXT_RE_MMAP_UC_DB:
4294 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4295 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4296 pgprot_noncached(vma->vm_page_prot),
4297 rdma_entry);
4298 break;
4299 case BNXT_RE_MMAP_SH_PAGE:
4300 ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4301 break;
4302 case BNXT_RE_MMAP_DBR_BAR:
4303 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4304 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4305 pgprot_noncached(vma->vm_page_prot),
4306 rdma_entry);
4307 break;
4308 case BNXT_RE_MMAP_DBR_PAGE:
4309 case BNXT_RE_MMAP_TOGGLE_PAGE:
4310 /* Driver doesn't expect write access for user space */
4311 if (vma->vm_flags & VM_WRITE)
4312 return -EFAULT;
4313 ret = vm_insert_page(vma, vma->vm_start,
4314 virt_to_page((void *)bnxt_entry->mem_offset));
4315 break;
4316 default:
4317 ret = -EINVAL;
4318 break;
4319 }
4320
4321 rdma_user_mmap_entry_put(rdma_entry);
4322 return ret;
4323 }
4324
bnxt_re_mmap_free(struct rdma_user_mmap_entry * rdma_entry)4325 void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4326 {
4327 struct bnxt_re_user_mmap_entry *bnxt_entry;
4328
4329 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4330 rdma_entry);
4331
4332 kfree(bnxt_entry);
4333 }
4334
UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)4335 static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4336 {
4337 struct bnxt_re_ucontext *uctx;
4338
4339 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4340 bnxt_re_pacing_alert(uctx->rdev);
4341 return 0;
4342 }
4343
UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)4344 static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4345 {
4346 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4347 enum bnxt_re_alloc_page_type alloc_type;
4348 struct bnxt_re_user_mmap_entry *entry;
4349 enum bnxt_re_mmap_flag mmap_flag;
4350 struct bnxt_qplib_chip_ctx *cctx;
4351 struct bnxt_re_ucontext *uctx;
4352 struct bnxt_re_dev *rdev;
4353 u64 mmap_offset;
4354 u32 length;
4355 u32 dpi;
4356 u64 addr;
4357 int err;
4358
4359 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4360 if (IS_ERR(uctx))
4361 return PTR_ERR(uctx);
4362
4363 err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4364 if (err)
4365 return err;
4366
4367 rdev = uctx->rdev;
4368 cctx = rdev->chip_ctx;
4369
4370 switch (alloc_type) {
4371 case BNXT_RE_ALLOC_WC_PAGE:
4372 if (cctx->modes.db_push) {
4373 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4374 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4375 return -ENOMEM;
4376 length = PAGE_SIZE;
4377 dpi = uctx->wcdpi.dpi;
4378 addr = (u64)uctx->wcdpi.umdbr;
4379 mmap_flag = BNXT_RE_MMAP_WC_DB;
4380 } else {
4381 return -EINVAL;
4382 }
4383
4384 break;
4385 case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4386 length = PAGE_SIZE;
4387 addr = (u64)rdev->pacing.dbr_bar_addr;
4388 mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4389 break;
4390
4391 case BNXT_RE_ALLOC_DBR_PAGE:
4392 length = PAGE_SIZE;
4393 addr = (u64)rdev->pacing.dbr_page;
4394 mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4395 break;
4396
4397 default:
4398 return -EOPNOTSUPP;
4399 }
4400
4401 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4402 if (!entry)
4403 return -ENOMEM;
4404
4405 uobj->object = entry;
4406 uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4407 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4408 &mmap_offset, sizeof(mmap_offset));
4409 if (err)
4410 return err;
4411
4412 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4413 &length, sizeof(length));
4414 if (err)
4415 return err;
4416
4417 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4418 &dpi, sizeof(length));
4419 if (err)
4420 return err;
4421
4422 return 0;
4423 }
4424
alloc_page_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4425 static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4426 enum rdma_remove_reason why,
4427 struct uverbs_attr_bundle *attrs)
4428 {
4429 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4430 struct bnxt_re_ucontext *uctx = entry->uctx;
4431
4432 switch (entry->mmap_flag) {
4433 case BNXT_RE_MMAP_WC_DB:
4434 if (uctx && uctx->wcdpi.dbr) {
4435 struct bnxt_re_dev *rdev = uctx->rdev;
4436
4437 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4438 uctx->wcdpi.dbr = NULL;
4439 }
4440 break;
4441 case BNXT_RE_MMAP_DBR_BAR:
4442 case BNXT_RE_MMAP_DBR_PAGE:
4443 break;
4444 default:
4445 goto exit;
4446 }
4447 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4448 exit:
4449 return 0;
4450 }
4451
4452 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4453 UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4454 BNXT_RE_OBJECT_ALLOC_PAGE,
4455 UVERBS_ACCESS_NEW,
4456 UA_MANDATORY),
4457 UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4458 enum bnxt_re_alloc_page_type,
4459 UA_MANDATORY),
4460 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4461 UVERBS_ATTR_TYPE(u64),
4462 UA_MANDATORY),
4463 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4464 UVERBS_ATTR_TYPE(u32),
4465 UA_MANDATORY),
4466 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4467 UVERBS_ATTR_TYPE(u32),
4468 UA_MANDATORY));
4469
4470 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4471 UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4472 BNXT_RE_OBJECT_ALLOC_PAGE,
4473 UVERBS_ACCESS_DESTROY,
4474 UA_MANDATORY));
4475
4476 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4477 UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4478 &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4479 &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4480
4481 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4482
4483 DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4484 &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4485
4486 /* Toggle MEM */
UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)4487 static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4488 {
4489 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4490 enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4491 enum bnxt_re_get_toggle_mem_type res_type;
4492 struct bnxt_re_user_mmap_entry *entry;
4493 struct bnxt_re_ucontext *uctx;
4494 struct ib_ucontext *ib_uctx;
4495 struct bnxt_re_dev *rdev;
4496 struct bnxt_re_cq *cq;
4497 u64 mem_offset;
4498 u64 addr = 0;
4499 u32 length;
4500 u32 offset;
4501 u32 cq_id;
4502 int err;
4503
4504 ib_uctx = ib_uverbs_get_ucontext(attrs);
4505 if (IS_ERR(ib_uctx))
4506 return PTR_ERR(ib_uctx);
4507
4508 err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4509 if (err)
4510 return err;
4511
4512 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4513 rdev = uctx->rdev;
4514
4515 switch (res_type) {
4516 case BNXT_RE_CQ_TOGGLE_MEM:
4517 err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4518 if (err)
4519 return err;
4520
4521 cq = bnxt_re_search_for_cq(rdev, cq_id);
4522 if (!cq)
4523 return -EINVAL;
4524
4525 length = PAGE_SIZE;
4526 addr = (u64)cq->uctx_cq_page;
4527 mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4528 offset = 0;
4529 break;
4530 case BNXT_RE_SRQ_TOGGLE_MEM:
4531 break;
4532
4533 default:
4534 return -EOPNOTSUPP;
4535 }
4536
4537 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4538 if (!entry)
4539 return -ENOMEM;
4540
4541 uobj->object = entry;
4542 uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4543 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4544 &mem_offset, sizeof(mem_offset));
4545 if (err)
4546 return err;
4547
4548 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4549 &length, sizeof(length));
4550 if (err)
4551 return err;
4552
4553 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4554 &offset, sizeof(length));
4555 if (err)
4556 return err;
4557
4558 return 0;
4559 }
4560
get_toggle_mem_obj_cleanup(struct ib_uobject * uobject,enum rdma_remove_reason why,struct uverbs_attr_bundle * attrs)4561 static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4562 enum rdma_remove_reason why,
4563 struct uverbs_attr_bundle *attrs)
4564 {
4565 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4566
4567 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4568 return 0;
4569 }
4570
4571 DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4572 UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4573 BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4574 UVERBS_ACCESS_NEW,
4575 UA_MANDATORY),
4576 UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4577 enum bnxt_re_get_toggle_mem_type,
4578 UA_MANDATORY),
4579 UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4580 UVERBS_ATTR_TYPE(u32),
4581 UA_MANDATORY),
4582 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4583 UVERBS_ATTR_TYPE(u64),
4584 UA_MANDATORY),
4585 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4586 UVERBS_ATTR_TYPE(u32),
4587 UA_MANDATORY),
4588 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4589 UVERBS_ATTR_TYPE(u32),
4590 UA_MANDATORY));
4591
4592 DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4593 UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4594 BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4595 UVERBS_ACCESS_DESTROY,
4596 UA_MANDATORY));
4597
4598 DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4599 UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4600 &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4601 &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4602
4603 const struct uapi_definition bnxt_re_uapi_defs[] = {
4604 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4605 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4606 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
4607 {}
4608 };
4609