xref: /linux/drivers/infiniband/hw/qedr/verbs.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38 
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
46 
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
50 #include "qedr.h"
51 #include "verbs.h"
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
55 
56 #define QEDR_SRQ_WQE_ELEM_SIZE	sizeof(union rdma_srq_elm)
57 #define	RDMA_MAX_SGE_PER_SRQ	(4)
58 #define RDMA_MAX_SRQ_WQE_SIZE	(RDMA_MAX_SGE_PER_SRQ + 1)
59 
60 #define DB_ADDR_SHIFT(addr)		((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61 
62 enum {
63 	QEDR_USER_MMAP_IO_WC = 0,
64 	QEDR_USER_MMAP_PHYS_PAGE,
65 };
66 
qedr_ib_copy_to_udata(struct ib_udata * udata,void * src,size_t len)67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68 					size_t len)
69 {
70 	size_t min_len = min_t(size_t, len, udata->outlen);
71 
72 	return ib_copy_to_udata(udata, src, min_len);
73 }
74 
qedr_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)75 int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
76 {
77 	if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78 		return -EINVAL;
79 
80 	*pkey = QEDR_ROCE_PKEY_DEFAULT;
81 	return 0;
82 }
83 
qedr_iw_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * sgid)84 int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
85 		      int index, union ib_gid *sgid)
86 {
87 	struct qedr_dev *dev = get_qedr_dev(ibdev);
88 
89 	memset(sgid->raw, 0, sizeof(sgid->raw));
90 	ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91 
92 	DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93 		 sgid->global.interface_id, sgid->global.subnet_prefix);
94 
95 	return 0;
96 }
97 
qedr_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99 {
100 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 	struct qedr_device_attr *qattr = &dev->attr;
102 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
103 
104 	srq_attr->srq_limit = srq->srq_limit;
105 	srq_attr->max_wr = qattr->max_srq_wr;
106 	srq_attr->max_sge = qattr->max_sge;
107 
108 	return 0;
109 }
110 
qedr_query_device(struct ib_device * ibdev,struct ib_device_attr * attr,struct ib_udata * udata)111 int qedr_query_device(struct ib_device *ibdev,
112 		      struct ib_device_attr *attr, struct ib_udata *udata)
113 {
114 	struct qedr_dev *dev = get_qedr_dev(ibdev);
115 	struct qedr_device_attr *qattr = &dev->attr;
116 
117 	if (!dev->rdma_ctx) {
118 		DP_ERR(dev,
119 		       "qedr_query_device called with invalid params rdma_ctx=%p\n",
120 		       dev->rdma_ctx);
121 		return -EINVAL;
122 	}
123 
124 	memset(attr, 0, sizeof(*attr));
125 
126 	attr->fw_ver = qattr->fw_ver;
127 	attr->sys_image_guid = qattr->sys_image_guid;
128 	attr->max_mr_size = qattr->max_mr_size;
129 	attr->page_size_cap = qattr->page_size_caps;
130 	attr->vendor_id = qattr->vendor_id;
131 	attr->vendor_part_id = qattr->vendor_part_id;
132 	attr->hw_ver = qattr->hw_ver;
133 	attr->max_qp = qattr->max_qp;
134 	attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 	attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136 	    IB_DEVICE_RC_RNR_NAK_GEN |
137 	    IB_DEVICE_MEM_MGT_EXTENSIONS;
138 	attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
139 
140 	if (!rdma_protocol_iwarp(&dev->ibdev, 1))
141 		attr->device_cap_flags |= IB_DEVICE_XRC;
142 	attr->max_send_sge = qattr->max_sge;
143 	attr->max_recv_sge = qattr->max_sge;
144 	attr->max_sge_rd = qattr->max_sge;
145 	attr->max_cq = qattr->max_cq;
146 	attr->max_cqe = qattr->max_cqe;
147 	attr->max_mr = qattr->max_mr;
148 	attr->max_mw = qattr->max_mw;
149 	attr->max_pd = qattr->max_pd;
150 	attr->atomic_cap = dev->atomic_cap;
151 	attr->max_qp_init_rd_atom =
152 	    1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153 	attr->max_qp_rd_atom =
154 	    min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155 		attr->max_qp_init_rd_atom);
156 
157 	attr->max_srq = qattr->max_srq;
158 	attr->max_srq_sge = qattr->max_srq_sge;
159 	attr->max_srq_wr = qattr->max_srq_wr;
160 
161 	attr->local_ca_ack_delay = qattr->dev_ack_delay;
162 	attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
163 	attr->max_pkeys = qattr->max_pkey;
164 	attr->max_ah = qattr->max_ah;
165 
166 	return 0;
167 }
168 
get_link_speed_and_width(int speed,u16 * ib_speed,u8 * ib_width)169 static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
170 					    u8 *ib_width)
171 {
172 	switch (speed) {
173 	case 1000:
174 		*ib_speed = IB_SPEED_SDR;
175 		*ib_width = IB_WIDTH_1X;
176 		break;
177 	case 10000:
178 		*ib_speed = IB_SPEED_QDR;
179 		*ib_width = IB_WIDTH_1X;
180 		break;
181 
182 	case 20000:
183 		*ib_speed = IB_SPEED_DDR;
184 		*ib_width = IB_WIDTH_4X;
185 		break;
186 
187 	case 25000:
188 		*ib_speed = IB_SPEED_EDR;
189 		*ib_width = IB_WIDTH_1X;
190 		break;
191 
192 	case 40000:
193 		*ib_speed = IB_SPEED_QDR;
194 		*ib_width = IB_WIDTH_4X;
195 		break;
196 
197 	case 50000:
198 		*ib_speed = IB_SPEED_HDR;
199 		*ib_width = IB_WIDTH_1X;
200 		break;
201 
202 	case 100000:
203 		*ib_speed = IB_SPEED_EDR;
204 		*ib_width = IB_WIDTH_4X;
205 		break;
206 
207 	default:
208 		/* Unsupported */
209 		*ib_speed = IB_SPEED_SDR;
210 		*ib_width = IB_WIDTH_1X;
211 	}
212 }
213 
qedr_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * attr)214 int qedr_query_port(struct ib_device *ibdev, u32 port,
215 		    struct ib_port_attr *attr)
216 {
217 	struct qedr_dev *dev;
218 	struct qed_rdma_port *rdma_port;
219 
220 	dev = get_qedr_dev(ibdev);
221 
222 	if (!dev->rdma_ctx) {
223 		DP_ERR(dev, "rdma_ctx is NULL\n");
224 		return -EINVAL;
225 	}
226 
227 	rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
228 
229 	/* *attr being zeroed by the caller, avoid zeroing it here */
230 	if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231 		attr->state = IB_PORT_ACTIVE;
232 		attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
233 	} else {
234 		attr->state = IB_PORT_DOWN;
235 		attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
236 	}
237 	attr->max_mtu = IB_MTU_4096;
238 	attr->lid = 0;
239 	attr->lmc = 0;
240 	attr->sm_lid = 0;
241 	attr->sm_sl = 0;
242 	attr->ip_gids = true;
243 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244 		attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245 		attr->gid_tbl_len = 1;
246 	} else {
247 		attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248 		attr->gid_tbl_len = QEDR_MAX_SGID;
249 		attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
250 	}
251 	attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252 	attr->qkey_viol_cntr = 0;
253 	get_link_speed_and_width(rdma_port->link_speed,
254 				 &attr->active_speed, &attr->active_width);
255 	attr->max_msg_sz = rdma_port->max_msg_size;
256 	attr->max_vl_num = 4;
257 
258 	return 0;
259 }
260 
qedr_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)261 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
262 {
263 	struct ib_device *ibdev = uctx->device;
264 	int rc;
265 	struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
266 	struct qedr_alloc_ucontext_resp uresp = {};
267 	struct qedr_alloc_ucontext_req ureq = {};
268 	struct qedr_dev *dev = get_qedr_dev(ibdev);
269 	struct qed_rdma_add_user_out_params oparams;
270 	struct qedr_user_mmap_entry *entry;
271 
272 	if (!udata)
273 		return -EFAULT;
274 
275 	if (udata->inlen) {
276 		rc = ib_copy_from_udata(&ureq, udata,
277 					min(sizeof(ureq), udata->inlen));
278 		if (rc) {
279 			DP_ERR(dev, "Problem copying data from user space\n");
280 			return -EFAULT;
281 		}
282 		ctx->edpm_mode = !!(ureq.context_flags &
283 				    QEDR_ALLOC_UCTX_EDPM_MODE);
284 		ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
285 	}
286 
287 	rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
288 	if (rc) {
289 		DP_ERR(dev,
290 		       "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
291 		       rc);
292 		return rc;
293 	}
294 
295 	ctx->dpi = oparams.dpi;
296 	ctx->dpi_addr = oparams.dpi_addr;
297 	ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298 	ctx->dpi_size = oparams.dpi_size;
299 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
300 	if (!entry) {
301 		rc = -ENOMEM;
302 		goto err;
303 	}
304 
305 	entry->io_address = ctx->dpi_phys_addr;
306 	entry->length = ctx->dpi_size;
307 	entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
308 	entry->dpi = ctx->dpi;
309 	entry->dev = dev;
310 	rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
311 					 ctx->dpi_size);
312 	if (rc) {
313 		kfree(entry);
314 		goto err;
315 	}
316 	ctx->db_mmap_entry = &entry->rdma_entry;
317 
318 	if (!dev->user_dpm_enabled)
319 		uresp.dpm_flags = 0;
320 	else if (rdma_protocol_iwarp(&dev->ibdev, 1))
321 		uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
322 	else
323 		uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
324 				  QEDR_DPM_TYPE_ROCE_LEGACY |
325 				  QEDR_DPM_TYPE_ROCE_EDPM_MODE;
326 
327 	if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
328 		uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
329 		uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
330 		uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
331 		uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
332 	}
333 
334 	uresp.wids_enabled = 1;
335 	uresp.wid_count = oparams.wid_count;
336 	uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337 	uresp.db_size = ctx->dpi_size;
338 	uresp.max_send_wr = dev->attr.max_sqe;
339 	uresp.max_recv_wr = dev->attr.max_rqe;
340 	uresp.max_srq_wr = dev->attr.max_srq_wr;
341 	uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
342 	uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
343 	uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
344 	uresp.max_cqes = QEDR_MAX_CQES;
345 
346 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
347 	if (rc)
348 		goto err;
349 
350 	ctx->dev = dev;
351 
352 	DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
353 		 &ctx->ibucontext);
354 	return 0;
355 
356 err:
357 	if (!ctx->db_mmap_entry)
358 		dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
359 	else
360 		rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
361 
362 	return rc;
363 }
364 
qedr_dealloc_ucontext(struct ib_ucontext * ibctx)365 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
366 {
367 	struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
368 
369 	DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
370 		 uctx);
371 
372 	rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
373 }
374 
qedr_mmap_free(struct rdma_user_mmap_entry * rdma_entry)375 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
376 {
377 	struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
378 	struct qedr_dev *dev = entry->dev;
379 
380 	if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
381 		free_page((unsigned long)entry->address);
382 	else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
383 		dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
384 
385 	kfree(entry);
386 }
387 
qedr_mmap(struct ib_ucontext * ucontext,struct vm_area_struct * vma)388 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
389 {
390 	struct ib_device *dev = ucontext->device;
391 	size_t length = vma->vm_end - vma->vm_start;
392 	struct rdma_user_mmap_entry *rdma_entry;
393 	struct qedr_user_mmap_entry *entry;
394 	int rc = 0;
395 	u64 pfn;
396 
397 	ibdev_dbg(dev,
398 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
399 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
400 
401 	rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
402 	if (!rdma_entry) {
403 		ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
404 			  vma->vm_pgoff);
405 		return -EINVAL;
406 	}
407 	entry = get_qedr_mmap_entry(rdma_entry);
408 	ibdev_dbg(dev,
409 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
410 		  entry->io_address, length, entry->mmap_flag);
411 
412 	switch (entry->mmap_flag) {
413 	case QEDR_USER_MMAP_IO_WC:
414 		pfn = entry->io_address >> PAGE_SHIFT;
415 		rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
416 				       pgprot_writecombine(vma->vm_page_prot),
417 				       rdma_entry);
418 		break;
419 	case QEDR_USER_MMAP_PHYS_PAGE:
420 		rc = vm_insert_page(vma, vma->vm_start,
421 				    virt_to_page(entry->address));
422 		break;
423 	default:
424 		rc = -EINVAL;
425 	}
426 
427 	if (rc)
428 		ibdev_dbg(dev,
429 			  "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
430 			  entry->io_address, length, entry->mmap_flag, rc);
431 
432 	rdma_user_mmap_entry_put(rdma_entry);
433 	return rc;
434 }
435 
qedr_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)436 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
437 {
438 	struct ib_device *ibdev = ibpd->device;
439 	struct qedr_dev *dev = get_qedr_dev(ibdev);
440 	struct qedr_pd *pd = get_qedr_pd(ibpd);
441 	u16 pd_id;
442 	int rc;
443 
444 	DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
445 		 udata ? "User Lib" : "Kernel");
446 
447 	if (!dev->rdma_ctx) {
448 		DP_ERR(dev, "invalid RDMA context\n");
449 		return -EINVAL;
450 	}
451 
452 	rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
453 	if (rc)
454 		return rc;
455 
456 	pd->pd_id = pd_id;
457 
458 	if (udata) {
459 		struct qedr_alloc_pd_uresp uresp = {
460 			.pd_id = pd_id,
461 		};
462 		struct qedr_ucontext *context = rdma_udata_to_drv_context(
463 			udata, struct qedr_ucontext, ibucontext);
464 
465 		rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
466 		if (rc) {
467 			DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
468 			dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
469 			return rc;
470 		}
471 
472 		pd->uctx = context;
473 		pd->uctx->pd = pd;
474 	}
475 
476 	return 0;
477 }
478 
qedr_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)479 int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
480 {
481 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
482 	struct qedr_pd *pd = get_qedr_pd(ibpd);
483 
484 	DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485 	dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
486 	return 0;
487 }
488 
489 
qedr_alloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)490 int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
491 {
492 	struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
493 	struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
494 
495 	return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
496 }
497 
qedr_dealloc_xrcd(struct ib_xrcd * ibxrcd,struct ib_udata * udata)498 int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
499 {
500 	struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
501 	u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
502 
503 	dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
504 	return 0;
505 }
qedr_free_pbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,struct qedr_pbl * pbl)506 static void qedr_free_pbl(struct qedr_dev *dev,
507 			  struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
508 {
509 	struct pci_dev *pdev = dev->pdev;
510 	int i;
511 
512 	for (i = 0; i < pbl_info->num_pbls; i++) {
513 		if (!pbl[i].va)
514 			continue;
515 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
516 				  pbl[i].va, pbl[i].pa);
517 	}
518 
519 	kfree(pbl);
520 }
521 
522 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
523 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
524 
525 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
526 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
527 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
528 
qedr_alloc_pbl_tbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,gfp_t flags)529 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
530 					   struct qedr_pbl_info *pbl_info,
531 					   gfp_t flags)
532 {
533 	struct pci_dev *pdev = dev->pdev;
534 	struct qedr_pbl *pbl_table;
535 	dma_addr_t *pbl_main_tbl;
536 	dma_addr_t pa;
537 	void *va;
538 	int i;
539 
540 	pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
541 	if (!pbl_table)
542 		return ERR_PTR(-ENOMEM);
543 
544 	for (i = 0; i < pbl_info->num_pbls; i++) {
545 		va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
546 					flags);
547 		if (!va)
548 			goto err;
549 
550 		pbl_table[i].va = va;
551 		pbl_table[i].pa = pa;
552 	}
553 
554 	/* Two-Layer PBLs, if we have more than one pbl we need to initialize
555 	 * the first one with physical pointers to all of the rest
556 	 */
557 	pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
558 	for (i = 0; i < pbl_info->num_pbls - 1; i++)
559 		pbl_main_tbl[i] = pbl_table[i + 1].pa;
560 
561 	return pbl_table;
562 
563 err:
564 	for (i--; i >= 0; i--)
565 		dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
566 				  pbl_table[i].va, pbl_table[i].pa);
567 
568 	qedr_free_pbl(dev, pbl_info, pbl_table);
569 
570 	return ERR_PTR(-ENOMEM);
571 }
572 
qedr_prepare_pbl_tbl(struct qedr_dev * dev,struct qedr_pbl_info * pbl_info,u32 num_pbes,int two_layer_capable)573 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
574 				struct qedr_pbl_info *pbl_info,
575 				u32 num_pbes, int two_layer_capable)
576 {
577 	u32 pbl_capacity;
578 	u32 pbl_size;
579 	u32 num_pbls;
580 
581 	if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
582 		if (num_pbes > MAX_PBES_TWO_LAYER) {
583 			DP_ERR(dev, "prepare pbl table: too many pages %d\n",
584 			       num_pbes);
585 			return -EINVAL;
586 		}
587 
588 		/* calculate required pbl page size */
589 		pbl_size = MIN_FW_PBL_PAGE_SIZE;
590 		pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
591 			       NUM_PBES_ON_PAGE(pbl_size);
592 
593 		while (pbl_capacity < num_pbes) {
594 			pbl_size *= 2;
595 			pbl_capacity = pbl_size / sizeof(u64);
596 			pbl_capacity = pbl_capacity * pbl_capacity;
597 		}
598 
599 		num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
600 		num_pbls++;	/* One for the layer0 ( points to the pbls) */
601 		pbl_info->two_layered = true;
602 	} else {
603 		/* One layered PBL */
604 		num_pbls = 1;
605 		pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
606 				 roundup_pow_of_two((num_pbes * sizeof(u64))));
607 		pbl_info->two_layered = false;
608 	}
609 
610 	pbl_info->num_pbls = num_pbls;
611 	pbl_info->pbl_size = pbl_size;
612 	pbl_info->num_pbes = num_pbes;
613 
614 	DP_DEBUG(dev, QEDR_MSG_MR,
615 		 "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
616 		 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
617 
618 	return 0;
619 }
620 
qedr_populate_pbls(struct qedr_dev * dev,struct ib_umem * umem,struct qedr_pbl * pbl,struct qedr_pbl_info * pbl_info,u32 pg_shift)621 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
622 			       struct qedr_pbl *pbl,
623 			       struct qedr_pbl_info *pbl_info, u32 pg_shift)
624 {
625 	int pbe_cnt, total_num_pbes = 0;
626 	struct qedr_pbl *pbl_tbl;
627 	struct ib_block_iter biter;
628 	struct regpair *pbe;
629 
630 	if (!pbl_info->num_pbes)
631 		return;
632 
633 	/* If we have a two layered pbl, the first pbl points to the rest
634 	 * of the pbls and the first entry lays on the second pbl in the table
635 	 */
636 	if (pbl_info->two_layered)
637 		pbl_tbl = &pbl[1];
638 	else
639 		pbl_tbl = pbl;
640 
641 	pbe = (struct regpair *)pbl_tbl->va;
642 	if (!pbe) {
643 		DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
644 		return;
645 	}
646 
647 	pbe_cnt = 0;
648 
649 	rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
650 		u64 pg_addr = rdma_block_iter_dma_address(&biter);
651 
652 		pbe->lo = cpu_to_le32(pg_addr);
653 		pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
654 
655 		pbe_cnt++;
656 		total_num_pbes++;
657 		pbe++;
658 
659 		if (total_num_pbes == pbl_info->num_pbes)
660 			return;
661 
662 		/* If the given pbl is full storing the pbes, move to next pbl.
663 		 */
664 		if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
665 			pbl_tbl++;
666 			pbe = (struct regpair *)pbl_tbl->va;
667 			pbe_cnt = 0;
668 		}
669 	}
670 }
671 
qedr_db_recovery_add(struct qedr_dev * dev,void __iomem * db_addr,void * db_data,enum qed_db_rec_width db_width,enum qed_db_rec_space db_space)672 static int qedr_db_recovery_add(struct qedr_dev *dev,
673 				void __iomem *db_addr,
674 				void *db_data,
675 				enum qed_db_rec_width db_width,
676 				enum qed_db_rec_space db_space)
677 {
678 	if (!db_data) {
679 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
680 		return 0;
681 	}
682 
683 	return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
684 						 db_width, db_space);
685 }
686 
qedr_db_recovery_del(struct qedr_dev * dev,void __iomem * db_addr,void * db_data)687 static void qedr_db_recovery_del(struct qedr_dev *dev,
688 				 void __iomem *db_addr,
689 				 void *db_data)
690 {
691 	if (!db_data) {
692 		DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
693 		return;
694 	}
695 
696 	/* Ignore return code as there is not much we can do about it. Error
697 	 * log will be printed inside.
698 	 */
699 	dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
700 }
701 
qedr_copy_cq_uresp(struct qedr_dev * dev,struct qedr_cq * cq,struct ib_udata * udata,u32 db_offset)702 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
703 			      struct qedr_cq *cq, struct ib_udata *udata,
704 			      u32 db_offset)
705 {
706 	struct qedr_create_cq_uresp uresp;
707 	int rc;
708 
709 	memset(&uresp, 0, sizeof(uresp));
710 
711 	uresp.db_offset = db_offset;
712 	uresp.icid = cq->icid;
713 	if (cq->q.db_mmap_entry)
714 		uresp.db_rec_addr =
715 			rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
716 
717 	rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
718 	if (rc)
719 		DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
720 
721 	return rc;
722 }
723 
consume_cqe(struct qedr_cq * cq)724 static void consume_cqe(struct qedr_cq *cq)
725 {
726 	if (cq->latest_cqe == cq->toggle_cqe)
727 		cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
728 
729 	cq->latest_cqe = qed_chain_consume(&cq->pbl);
730 }
731 
qedr_align_cq_entries(int entries)732 static inline int qedr_align_cq_entries(int entries)
733 {
734 	u64 size, aligned_size;
735 
736 	/* We allocate an extra entry that we don't report to the FW. */
737 	size = (entries + 1) * QEDR_CQE_SIZE;
738 	aligned_size = ALIGN(size, PAGE_SIZE);
739 
740 	return aligned_size / QEDR_CQE_SIZE;
741 }
742 
qedr_init_user_db_rec(struct ib_udata * udata,struct qedr_dev * dev,struct qedr_userq * q,bool requires_db_rec)743 static int qedr_init_user_db_rec(struct ib_udata *udata,
744 				 struct qedr_dev *dev, struct qedr_userq *q,
745 				 bool requires_db_rec)
746 {
747 	struct qedr_ucontext *uctx =
748 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
749 					  ibucontext);
750 	struct qedr_user_mmap_entry *entry;
751 	int rc;
752 
753 	/* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
754 	if (requires_db_rec == 0 || !uctx->db_rec)
755 		return 0;
756 
757 	/* Allocate a page for doorbell recovery, add to mmap */
758 	q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
759 	if (!q->db_rec_data) {
760 		DP_ERR(dev, "get_zeroed_page failed\n");
761 		return -ENOMEM;
762 	}
763 
764 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
765 	if (!entry)
766 		goto err_free_db_data;
767 
768 	entry->address = q->db_rec_data;
769 	entry->length = PAGE_SIZE;
770 	entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
771 	rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
772 					 &entry->rdma_entry,
773 					 PAGE_SIZE);
774 	if (rc)
775 		goto err_free_entry;
776 
777 	q->db_mmap_entry = &entry->rdma_entry;
778 
779 	return 0;
780 
781 err_free_entry:
782 	kfree(entry);
783 
784 err_free_db_data:
785 	free_page((unsigned long)q->db_rec_data);
786 	q->db_rec_data = NULL;
787 	return -ENOMEM;
788 }
789 
qedr_init_user_queue(struct ib_udata * udata,struct qedr_dev * dev,struct qedr_userq * q,u64 buf_addr,size_t buf_len,bool requires_db_rec,int access,int alloc_and_init)790 static inline int qedr_init_user_queue(struct ib_udata *udata,
791 				       struct qedr_dev *dev,
792 				       struct qedr_userq *q, u64 buf_addr,
793 				       size_t buf_len, bool requires_db_rec,
794 				       int access,
795 				       int alloc_and_init)
796 {
797 	u32 fw_pages;
798 	int rc;
799 
800 	q->buf_addr = buf_addr;
801 	q->buf_len = buf_len;
802 	q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803 	if (IS_ERR(q->umem)) {
804 		DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
805 		       PTR_ERR(q->umem));
806 		return PTR_ERR(q->umem);
807 	}
808 
809 	fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810 	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
811 	if (rc)
812 		goto err0;
813 
814 	if (alloc_and_init) {
815 		q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816 		if (IS_ERR(q->pbl_tbl)) {
817 			rc = PTR_ERR(q->pbl_tbl);
818 			goto err0;
819 		}
820 		qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
821 				   FW_PAGE_SHIFT);
822 	} else {
823 		q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
824 		if (!q->pbl_tbl) {
825 			rc = -ENOMEM;
826 			goto err0;
827 		}
828 	}
829 
830 	/* mmap the user address used to store doorbell data for recovery */
831 	return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
832 
833 err0:
834 	ib_umem_release(q->umem);
835 	q->umem = NULL;
836 
837 	return rc;
838 }
839 
qedr_init_cq_params(struct qedr_cq * cq,struct qedr_ucontext * ctx,struct qedr_dev * dev,int vector,int chain_entries,int page_cnt,u64 pbl_ptr,struct qed_rdma_create_cq_in_params * params)840 static inline void qedr_init_cq_params(struct qedr_cq *cq,
841 				       struct qedr_ucontext *ctx,
842 				       struct qedr_dev *dev, int vector,
843 				       int chain_entries, int page_cnt,
844 				       u64 pbl_ptr,
845 				       struct qed_rdma_create_cq_in_params
846 				       *params)
847 {
848 	memset(params, 0, sizeof(*params));
849 	params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850 	params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851 	params->cnq_id = vector;
852 	params->cq_size = chain_entries - 1;
853 	params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854 	params->pbl_num_pages = page_cnt;
855 	params->pbl_ptr = pbl_ptr;
856 	params->pbl_two_level = 0;
857 }
858 
doorbell_cq(struct qedr_cq * cq,u32 cons,u8 flags)859 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
860 {
861 	cq->db.data.agg_flags = flags;
862 	cq->db.data.value = cpu_to_le32(cons);
863 	writeq(cq->db.raw, cq->db_addr);
864 }
865 
qedr_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)866 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
867 {
868 	struct qedr_cq *cq = get_qedr_cq(ibcq);
869 	unsigned long sflags;
870 	struct qedr_dev *dev;
871 
872 	dev = get_qedr_dev(ibcq->device);
873 
874 	if (cq->destroyed) {
875 		DP_ERR(dev,
876 		       "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
877 		       cq, cq->icid);
878 		return -EINVAL;
879 	}
880 
881 
882 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
883 		return 0;
884 
885 	spin_lock_irqsave(&cq->cq_lock, sflags);
886 
887 	cq->arm_flags = 0;
888 
889 	if (flags & IB_CQ_SOLICITED)
890 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
891 
892 	if (flags & IB_CQ_NEXT_COMP)
893 		cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
894 
895 	doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
896 
897 	spin_unlock_irqrestore(&cq->cq_lock, sflags);
898 
899 	return 0;
900 }
901 
qedr_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)902 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
903 		   struct uverbs_attr_bundle *attrs)
904 {
905 	struct ib_udata *udata = &attrs->driver_udata;
906 	struct ib_device *ibdev = ibcq->device;
907 	struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
908 		udata, struct qedr_ucontext, ibucontext);
909 	struct qed_rdma_destroy_cq_out_params destroy_oparams;
910 	struct qed_rdma_destroy_cq_in_params destroy_iparams;
911 	struct qed_chain_init_params chain_params = {
912 		.mode		= QED_CHAIN_MODE_PBL,
913 		.intended_use	= QED_CHAIN_USE_TO_CONSUME,
914 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
915 		.elem_size	= sizeof(union rdma_cqe),
916 	};
917 	struct qedr_dev *dev = get_qedr_dev(ibdev);
918 	struct qed_rdma_create_cq_in_params params;
919 	struct qedr_create_cq_ureq ureq = {};
920 	int vector = attr->comp_vector;
921 	int entries = attr->cqe;
922 	struct qedr_cq *cq = get_qedr_cq(ibcq);
923 	int chain_entries;
924 	u32 db_offset;
925 	int page_cnt;
926 	u64 pbl_ptr;
927 	u16 icid;
928 	int rc;
929 
930 	DP_DEBUG(dev, QEDR_MSG_INIT,
931 		 "create_cq: called from %s. entries=%d, vector=%d\n",
932 		 udata ? "User Lib" : "Kernel", entries, vector);
933 
934 	if (attr->flags)
935 		return -EOPNOTSUPP;
936 
937 	if (entries > QEDR_MAX_CQES) {
938 		DP_ERR(dev,
939 		       "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
940 		       entries, QEDR_MAX_CQES);
941 		return -EINVAL;
942 	}
943 
944 	chain_entries = qedr_align_cq_entries(entries);
945 	chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
946 	chain_params.num_elems = chain_entries;
947 
948 	/* calc db offset. user will add DPI base, kernel will add db addr */
949 	db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
950 
951 	if (udata) {
952 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
953 							 udata->inlen))) {
954 			DP_ERR(dev,
955 			       "create cq: problem copying data from user space\n");
956 			goto err0;
957 		}
958 
959 		if (!ureq.len) {
960 			DP_ERR(dev,
961 			       "create cq: cannot create a cq with 0 entries\n");
962 			goto err0;
963 		}
964 
965 		cq->cq_type = QEDR_CQ_TYPE_USER;
966 
967 		rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
968 					  ureq.len, true, IB_ACCESS_LOCAL_WRITE,
969 					  1);
970 		if (rc)
971 			goto err0;
972 
973 		pbl_ptr = cq->q.pbl_tbl->pa;
974 		page_cnt = cq->q.pbl_info.num_pbes;
975 
976 		cq->ibcq.cqe = chain_entries;
977 		cq->q.db_addr = ctx->dpi_addr + db_offset;
978 	} else {
979 		cq->cq_type = QEDR_CQ_TYPE_KERNEL;
980 
981 		rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
982 						   &chain_params);
983 		if (rc)
984 			goto err0;
985 
986 		page_cnt = qed_chain_get_page_cnt(&cq->pbl);
987 		pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
988 		cq->ibcq.cqe = cq->pbl.capacity;
989 	}
990 
991 	qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
992 			    pbl_ptr, &params);
993 
994 	rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
995 	if (rc)
996 		goto err1;
997 
998 	cq->icid = icid;
999 	cq->sig = QEDR_CQ_MAGIC_NUMBER;
1000 	spin_lock_init(&cq->cq_lock);
1001 
1002 	if (udata) {
1003 		rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
1004 		if (rc)
1005 			goto err2;
1006 
1007 		rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1008 					  &cq->q.db_rec_data->db_data,
1009 					  DB_REC_WIDTH_64B,
1010 					  DB_REC_USER);
1011 		if (rc)
1012 			goto err2;
1013 
1014 	} else {
1015 		/* Generate doorbell address. */
1016 		cq->db.data.icid = cq->icid;
1017 		cq->db_addr = dev->db_addr + db_offset;
1018 		cq->db.data.params = DB_AGG_CMD_MAX <<
1019 		    RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
1020 
1021 		/* point to the very last element, passing it we will toggle */
1022 		cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1023 		cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1024 		cq->latest_cqe = NULL;
1025 		consume_cqe(cq);
1026 		cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1027 
1028 		rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1029 					  DB_REC_WIDTH_64B, DB_REC_KERNEL);
1030 		if (rc)
1031 			goto err2;
1032 	}
1033 
1034 	DP_DEBUG(dev, QEDR_MSG_CQ,
1035 		 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1036 		 cq->icid, cq, params.cq_size);
1037 
1038 	return 0;
1039 
1040 err2:
1041 	destroy_iparams.icid = cq->icid;
1042 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1043 				  &destroy_oparams);
1044 err1:
1045 	if (udata) {
1046 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1047 		ib_umem_release(cq->q.umem);
1048 		if (cq->q.db_mmap_entry)
1049 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1050 	} else {
1051 		dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1052 	}
1053 err0:
1054 	return -EINVAL;
1055 }
1056 
1057 #define QEDR_DESTROY_CQ_MAX_ITERATIONS		(10)
1058 #define QEDR_DESTROY_CQ_ITER_DURATION		(10)
1059 
qedr_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)1060 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1061 {
1062 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1063 	struct qed_rdma_destroy_cq_out_params oparams;
1064 	struct qed_rdma_destroy_cq_in_params iparams;
1065 	struct qedr_cq *cq = get_qedr_cq(ibcq);
1066 	int iter;
1067 
1068 	DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1069 
1070 	cq->destroyed = 1;
1071 
1072 	/* GSIs CQs are handled by driver, so they don't exist in the FW */
1073 	if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1074 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1075 		return 0;
1076 	}
1077 
1078 	iparams.icid = cq->icid;
1079 	dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1080 	dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1081 
1082 	if (udata) {
1083 		qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1084 		ib_umem_release(cq->q.umem);
1085 
1086 		if (cq->q.db_rec_data) {
1087 			qedr_db_recovery_del(dev, cq->q.db_addr,
1088 					     &cq->q.db_rec_data->db_data);
1089 			rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1090 		}
1091 	} else {
1092 		qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1093 	}
1094 
1095 	/* We don't want the IRQ handler to handle a non-existing CQ so we
1096 	 * wait until all CNQ interrupts, if any, are received. This will always
1097 	 * happen and will always happen very fast. If not, then a serious error
1098 	 * has occured. That is why we can use a long delay.
1099 	 * We spin for a short time so we don’t lose time on context switching
1100 	 * in case all the completions are handled in that span. Otherwise
1101 	 * we sleep for a while and check again. Since the CNQ may be
1102 	 * associated with (only) the current CPU we use msleep to allow the
1103 	 * current CPU to be freed.
1104 	 * The CNQ notification is increased in qedr_irq_handler().
1105 	 */
1106 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1107 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1108 		udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1109 		iter--;
1110 	}
1111 
1112 	iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1113 	while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1114 		msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1115 		iter--;
1116 	}
1117 
1118 	/* Note that we don't need to have explicit code to wait for the
1119 	 * completion of the event handler because it is invoked from the EQ.
1120 	 * Since the destroy CQ ramrod has also been received on the EQ we can
1121 	 * be certain that there's no event handler in process.
1122 	 */
1123 	return 0;
1124 }
1125 
get_gid_info_from_table(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct qed_rdma_modify_qp_in_params * qp_params)1126 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1127 					  struct ib_qp_attr *attr,
1128 					  int attr_mask,
1129 					  struct qed_rdma_modify_qp_in_params
1130 					  *qp_params)
1131 {
1132 	const struct ib_gid_attr *gid_attr;
1133 	enum rdma_network_type nw_type;
1134 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1135 	u32 ipv4_addr;
1136 	int ret;
1137 	int i;
1138 
1139 	gid_attr = grh->sgid_attr;
1140 	ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1141 	if (ret)
1142 		return ret;
1143 
1144 	nw_type = rdma_gid_attr_network_type(gid_attr);
1145 	switch (nw_type) {
1146 	case RDMA_NETWORK_IPV6:
1147 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1148 		       sizeof(qp_params->sgid));
1149 		memcpy(&qp_params->dgid.bytes[0],
1150 		       &grh->dgid,
1151 		       sizeof(qp_params->dgid));
1152 		qp_params->roce_mode = ROCE_V2_IPV6;
1153 		SET_FIELD(qp_params->modify_flags,
1154 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1155 		break;
1156 	case RDMA_NETWORK_ROCE_V1:
1157 		memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1158 		       sizeof(qp_params->sgid));
1159 		memcpy(&qp_params->dgid.bytes[0],
1160 		       &grh->dgid,
1161 		       sizeof(qp_params->dgid));
1162 		qp_params->roce_mode = ROCE_V1;
1163 		break;
1164 	case RDMA_NETWORK_IPV4:
1165 		memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1166 		memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1167 		ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1168 		qp_params->sgid.ipv4_addr = ipv4_addr;
1169 		ipv4_addr =
1170 		    qedr_get_ipv4_from_gid(grh->dgid.raw);
1171 		qp_params->dgid.ipv4_addr = ipv4_addr;
1172 		SET_FIELD(qp_params->modify_flags,
1173 			  QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1174 		qp_params->roce_mode = ROCE_V2_IPV4;
1175 		break;
1176 	default:
1177 		return -EINVAL;
1178 	}
1179 
1180 	for (i = 0; i < 4; i++) {
1181 		qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1182 		qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1183 	}
1184 
1185 	if (qp_params->vlan_id >= VLAN_CFI_MASK)
1186 		qp_params->vlan_id = 0;
1187 
1188 	return 0;
1189 }
1190 
qedr_check_qp_attrs(struct ib_pd * ibpd,struct qedr_dev * dev,struct ib_qp_init_attr * attrs,struct ib_udata * udata)1191 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1192 			       struct ib_qp_init_attr *attrs,
1193 			       struct ib_udata *udata)
1194 {
1195 	struct qedr_device_attr *qattr = &dev->attr;
1196 
1197 	/* QP0... attrs->qp_type == IB_QPT_GSI */
1198 	if (attrs->qp_type != IB_QPT_RC &&
1199 	    attrs->qp_type != IB_QPT_GSI &&
1200 	    attrs->qp_type != IB_QPT_XRC_INI &&
1201 	    attrs->qp_type != IB_QPT_XRC_TGT) {
1202 		DP_DEBUG(dev, QEDR_MSG_QP,
1203 			 "create qp: unsupported qp type=0x%x requested\n",
1204 			 attrs->qp_type);
1205 		return -EOPNOTSUPP;
1206 	}
1207 
1208 	if (attrs->cap.max_send_wr > qattr->max_sqe) {
1209 		DP_ERR(dev,
1210 		       "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1211 		       attrs->cap.max_send_wr, qattr->max_sqe);
1212 		return -EINVAL;
1213 	}
1214 
1215 	if (attrs->cap.max_inline_data > qattr->max_inline) {
1216 		DP_ERR(dev,
1217 		       "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1218 		       attrs->cap.max_inline_data, qattr->max_inline);
1219 		return -EINVAL;
1220 	}
1221 
1222 	if (attrs->cap.max_send_sge > qattr->max_sge) {
1223 		DP_ERR(dev,
1224 		       "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1225 		       attrs->cap.max_send_sge, qattr->max_sge);
1226 		return -EINVAL;
1227 	}
1228 
1229 	if (attrs->cap.max_recv_sge > qattr->max_sge) {
1230 		DP_ERR(dev,
1231 		       "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1232 		       attrs->cap.max_recv_sge, qattr->max_sge);
1233 		return -EINVAL;
1234 	}
1235 
1236 	/* verify consumer QPs are not trying to use GSI QP's CQ.
1237 	 * TGT QP isn't associated with RQ/SQ
1238 	 */
1239 	if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1240 	    (attrs->qp_type != IB_QPT_XRC_TGT) &&
1241 	    (attrs->qp_type != IB_QPT_XRC_INI)) {
1242 		struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1243 		struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1244 
1245 		if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1246 		    (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1247 			DP_ERR(dev,
1248 			       "create qp: consumer QP cannot use GSI CQs.\n");
1249 			return -EINVAL;
1250 		}
1251 	}
1252 
1253 	return 0;
1254 }
1255 
qedr_copy_srq_uresp(struct qedr_dev * dev,struct qedr_srq * srq,struct ib_udata * udata)1256 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1257 			       struct qedr_srq *srq, struct ib_udata *udata)
1258 {
1259 	struct qedr_create_srq_uresp uresp = {};
1260 	int rc;
1261 
1262 	uresp.srq_id = srq->srq_id;
1263 
1264 	rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1265 	if (rc)
1266 		DP_ERR(dev, "create srq: problem copying data to user space\n");
1267 
1268 	return rc;
1269 }
1270 
qedr_copy_rq_uresp(struct qedr_dev * dev,struct qedr_create_qp_uresp * uresp,struct qedr_qp * qp)1271 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1272 			       struct qedr_create_qp_uresp *uresp,
1273 			       struct qedr_qp *qp)
1274 {
1275 	/* iWARP requires two doorbells per RQ. */
1276 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1277 		uresp->rq_db_offset =
1278 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1279 		uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1280 	} else {
1281 		uresp->rq_db_offset =
1282 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1283 	}
1284 
1285 	uresp->rq_icid = qp->icid;
1286 	if (qp->urq.db_mmap_entry)
1287 		uresp->rq_db_rec_addr =
1288 			rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1289 }
1290 
qedr_copy_sq_uresp(struct qedr_dev * dev,struct qedr_create_qp_uresp * uresp,struct qedr_qp * qp)1291 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1292 			       struct qedr_create_qp_uresp *uresp,
1293 			       struct qedr_qp *qp)
1294 {
1295 	uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1296 
1297 	/* iWARP uses the same cid for rq and sq */
1298 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1299 		uresp->sq_icid = qp->icid;
1300 	else
1301 		uresp->sq_icid = qp->icid + 1;
1302 
1303 	if (qp->usq.db_mmap_entry)
1304 		uresp->sq_db_rec_addr =
1305 			rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1306 }
1307 
qedr_copy_qp_uresp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_udata * udata,struct qedr_create_qp_uresp * uresp)1308 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1309 			      struct qedr_qp *qp, struct ib_udata *udata,
1310 			      struct qedr_create_qp_uresp *uresp)
1311 {
1312 	int rc;
1313 
1314 	memset(uresp, 0, sizeof(*uresp));
1315 
1316 	if (qedr_qp_has_sq(qp))
1317 		qedr_copy_sq_uresp(dev, uresp, qp);
1318 
1319 	if (qedr_qp_has_rq(qp))
1320 		qedr_copy_rq_uresp(dev, uresp, qp);
1321 
1322 	uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1323 	uresp->qp_id = qp->qp_id;
1324 
1325 	rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1326 	if (rc)
1327 		DP_ERR(dev,
1328 		       "create qp: failed a copy to user space with qp icid=0x%x.\n",
1329 		       qp->icid);
1330 
1331 	return rc;
1332 }
1333 
qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info * qph)1334 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1335 {
1336 	qed_chain_reset(&qph->pbl);
1337 	qph->prod = 0;
1338 	qph->cons = 0;
1339 	qph->wqe_cons = 0;
1340 	qph->db_data.data.value = cpu_to_le16(0);
1341 }
1342 
qedr_set_common_qp_params(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_pd * pd,struct ib_qp_init_attr * attrs)1343 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1344 				      struct qedr_qp *qp,
1345 				      struct qedr_pd *pd,
1346 				      struct ib_qp_init_attr *attrs)
1347 {
1348 	spin_lock_init(&qp->q_lock);
1349 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1350 		kref_init(&qp->refcnt);
1351 		init_completion(&qp->iwarp_cm_comp);
1352 		init_completion(&qp->qp_rel_comp);
1353 	}
1354 
1355 	qp->pd = pd;
1356 	qp->qp_type = attrs->qp_type;
1357 	qp->max_inline_data = attrs->cap.max_inline_data;
1358 	qp->state = QED_ROCE_QP_STATE_RESET;
1359 
1360 	qp->prev_wqe_size = 0;
1361 
1362 	qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1363 	qp->dev = dev;
1364 	if (qedr_qp_has_sq(qp)) {
1365 		qedr_reset_qp_hwq_info(&qp->sq);
1366 		qp->sq.max_sges = attrs->cap.max_send_sge;
1367 		qp->sq_cq = get_qedr_cq(attrs->send_cq);
1368 		DP_DEBUG(dev, QEDR_MSG_QP,
1369 			 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1370 			 qp->sq.max_sges, qp->sq_cq->icid);
1371 	}
1372 
1373 	if (attrs->srq)
1374 		qp->srq = get_qedr_srq(attrs->srq);
1375 
1376 	if (qedr_qp_has_rq(qp)) {
1377 		qedr_reset_qp_hwq_info(&qp->rq);
1378 		qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1379 		qp->rq.max_sges = attrs->cap.max_recv_sge;
1380 		DP_DEBUG(dev, QEDR_MSG_QP,
1381 			 "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1382 			 qp->rq.max_sges, qp->rq_cq->icid);
1383 	}
1384 
1385 	DP_DEBUG(dev, QEDR_MSG_QP,
1386 		 "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1387 		 pd->pd_id, qp->qp_type, qp->max_inline_data,
1388 		 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1389 	DP_DEBUG(dev, QEDR_MSG_QP,
1390 		 "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1391 		 qp->sq.max_sges, qp->sq_cq->icid);
1392 }
1393 
qedr_set_roce_db_info(struct qedr_dev * dev,struct qedr_qp * qp)1394 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1395 {
1396 	int rc = 0;
1397 
1398 	if (qedr_qp_has_sq(qp)) {
1399 		qp->sq.db = dev->db_addr +
1400 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1401 		qp->sq.db_data.data.icid = qp->icid + 1;
1402 		rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1403 					  DB_REC_WIDTH_32B, DB_REC_KERNEL);
1404 		if (rc)
1405 			return rc;
1406 	}
1407 
1408 	if (qedr_qp_has_rq(qp)) {
1409 		qp->rq.db = dev->db_addr +
1410 			    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1411 		qp->rq.db_data.data.icid = qp->icid;
1412 		rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1413 					  DB_REC_WIDTH_32B, DB_REC_KERNEL);
1414 		if (rc && qedr_qp_has_sq(qp))
1415 			qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1416 	}
1417 
1418 	return rc;
1419 }
1420 
qedr_check_srq_params(struct qedr_dev * dev,struct ib_srq_init_attr * attrs,struct ib_udata * udata)1421 static int qedr_check_srq_params(struct qedr_dev *dev,
1422 				 struct ib_srq_init_attr *attrs,
1423 				 struct ib_udata *udata)
1424 {
1425 	struct qedr_device_attr *qattr = &dev->attr;
1426 
1427 	if (attrs->attr.max_wr > qattr->max_srq_wr) {
1428 		DP_ERR(dev,
1429 		       "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1430 		       attrs->attr.max_wr, qattr->max_srq_wr);
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (attrs->attr.max_sge > qattr->max_sge) {
1435 		DP_ERR(dev,
1436 		       "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1437 		       attrs->attr.max_sge, qattr->max_sge);
1438 	}
1439 
1440 	if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1441 		DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1442 		return -EINVAL;
1443 	}
1444 
1445 	return 0;
1446 }
1447 
qedr_free_srq_user_params(struct qedr_srq * srq)1448 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1449 {
1450 	qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1451 	ib_umem_release(srq->usrq.umem);
1452 	ib_umem_release(srq->prod_umem);
1453 }
1454 
qedr_free_srq_kernel_params(struct qedr_srq * srq)1455 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1456 {
1457 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1458 	struct qedr_dev *dev = srq->dev;
1459 
1460 	dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1461 
1462 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1463 			  hw_srq->virt_prod_pair_addr,
1464 			  hw_srq->phy_prod_pair_addr);
1465 }
1466 
qedr_init_srq_user_params(struct ib_udata * udata,struct qedr_srq * srq,struct qedr_create_srq_ureq * ureq,int access)1467 static int qedr_init_srq_user_params(struct ib_udata *udata,
1468 				     struct qedr_srq *srq,
1469 				     struct qedr_create_srq_ureq *ureq,
1470 				     int access)
1471 {
1472 	struct scatterlist *sg;
1473 	int rc;
1474 
1475 	rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1476 				  ureq->srq_len, false, access, 1);
1477 	if (rc)
1478 		return rc;
1479 
1480 	srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1481 				     sizeof(struct rdma_srq_producers), access);
1482 	if (IS_ERR(srq->prod_umem)) {
1483 		qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1484 		ib_umem_release(srq->usrq.umem);
1485 		DP_ERR(srq->dev,
1486 		       "create srq: failed ib_umem_get for producer, got %ld\n",
1487 		       PTR_ERR(srq->prod_umem));
1488 		return PTR_ERR(srq->prod_umem);
1489 	}
1490 
1491 	sg = srq->prod_umem->sgt_append.sgt.sgl;
1492 	srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1493 
1494 	return 0;
1495 }
1496 
qedr_alloc_srq_kernel_params(struct qedr_srq * srq,struct qedr_dev * dev,struct ib_srq_init_attr * init_attr)1497 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1498 					struct qedr_dev *dev,
1499 					struct ib_srq_init_attr *init_attr)
1500 {
1501 	struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1502 	struct qed_chain_init_params params = {
1503 		.mode		= QED_CHAIN_MODE_PBL,
1504 		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1505 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
1506 		.elem_size	= QEDR_SRQ_WQE_ELEM_SIZE,
1507 	};
1508 	dma_addr_t phy_prod_pair_addr;
1509 	u32 num_elems;
1510 	void *va;
1511 	int rc;
1512 
1513 	va = dma_alloc_coherent(&dev->pdev->dev,
1514 				sizeof(struct rdma_srq_producers),
1515 				&phy_prod_pair_addr, GFP_KERNEL);
1516 	if (!va) {
1517 		DP_ERR(dev,
1518 		       "create srq: failed to allocate dma memory for producer\n");
1519 		return -ENOMEM;
1520 	}
1521 
1522 	hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1523 	hw_srq->virt_prod_pair_addr = va;
1524 
1525 	num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1526 	params.num_elems = num_elems;
1527 
1528 	rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
1529 	if (rc)
1530 		goto err0;
1531 
1532 	hw_srq->num_elems = num_elems;
1533 
1534 	return 0;
1535 
1536 err0:
1537 	dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1538 			  va, phy_prod_pair_addr);
1539 	return rc;
1540 }
1541 
qedr_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)1542 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1543 		    struct ib_udata *udata)
1544 {
1545 	struct qed_rdma_destroy_srq_in_params destroy_in_params;
1546 	struct qed_rdma_create_srq_in_params in_params = {};
1547 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1548 	struct qed_rdma_create_srq_out_params out_params;
1549 	struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1550 	struct qedr_create_srq_ureq ureq = {};
1551 	u64 pbl_base_addr, phy_prod_pair_addr;
1552 	struct qedr_srq_hwq_info *hw_srq;
1553 	u32 page_cnt, page_size;
1554 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1555 	int rc = 0;
1556 
1557 	DP_DEBUG(dev, QEDR_MSG_QP,
1558 		 "create SRQ called from %s (pd %p)\n",
1559 		 (udata) ? "User lib" : "kernel", pd);
1560 
1561 	if (init_attr->srq_type != IB_SRQT_BASIC &&
1562 	    init_attr->srq_type != IB_SRQT_XRC)
1563 		return -EOPNOTSUPP;
1564 
1565 	rc = qedr_check_srq_params(dev, init_attr, udata);
1566 	if (rc)
1567 		return -EINVAL;
1568 
1569 	srq->dev = dev;
1570 	srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1571 	hw_srq = &srq->hw_srq;
1572 	spin_lock_init(&srq->lock);
1573 
1574 	hw_srq->max_wr = init_attr->attr.max_wr;
1575 	hw_srq->max_sges = init_attr->attr.max_sge;
1576 
1577 	if (udata) {
1578 		if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1579 							 udata->inlen))) {
1580 			DP_ERR(dev,
1581 			       "create srq: problem copying data from user space\n");
1582 			goto err0;
1583 		}
1584 
1585 		rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1586 		if (rc)
1587 			goto err0;
1588 
1589 		page_cnt = srq->usrq.pbl_info.num_pbes;
1590 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
1591 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1592 		page_size = PAGE_SIZE;
1593 	} else {
1594 		struct qed_chain *pbl;
1595 
1596 		rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1597 		if (rc)
1598 			goto err0;
1599 
1600 		pbl = &hw_srq->pbl;
1601 		page_cnt = qed_chain_get_page_cnt(pbl);
1602 		pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1603 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1604 		page_size = QED_CHAIN_PAGE_SIZE;
1605 	}
1606 
1607 	in_params.pd_id = pd->pd_id;
1608 	in_params.pbl_base_addr = pbl_base_addr;
1609 	in_params.prod_pair_addr = phy_prod_pair_addr;
1610 	in_params.num_pages = page_cnt;
1611 	in_params.page_size = page_size;
1612 	if (srq->is_xrc) {
1613 		struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1614 		struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1615 
1616 		in_params.is_xrc = 1;
1617 		in_params.xrcd_id = xrcd->xrcd_id;
1618 		in_params.cq_cid = cq->icid;
1619 	}
1620 
1621 	rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1622 	if (rc)
1623 		goto err1;
1624 
1625 	srq->srq_id = out_params.srq_id;
1626 
1627 	if (udata) {
1628 		rc = qedr_copy_srq_uresp(dev, srq, udata);
1629 		if (rc)
1630 			goto err2;
1631 	}
1632 
1633 	rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1634 	if (rc)
1635 		goto err2;
1636 
1637 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1638 		 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1639 	return 0;
1640 
1641 err2:
1642 	destroy_in_params.srq_id = srq->srq_id;
1643 
1644 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1645 err1:
1646 	if (udata)
1647 		qedr_free_srq_user_params(srq);
1648 	else
1649 		qedr_free_srq_kernel_params(srq);
1650 err0:
1651 	return -EFAULT;
1652 }
1653 
qedr_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)1654 int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1655 {
1656 	struct qed_rdma_destroy_srq_in_params in_params = {};
1657 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1658 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1659 
1660 	xa_erase_irq(&dev->srqs, srq->srq_id);
1661 	in_params.srq_id = srq->srq_id;
1662 	in_params.is_xrc = srq->is_xrc;
1663 	dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1664 
1665 	if (ibsrq->uobject)
1666 		qedr_free_srq_user_params(srq);
1667 	else
1668 		qedr_free_srq_kernel_params(srq);
1669 
1670 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1671 		 "destroy srq: destroyed srq with srq_id=0x%0x\n",
1672 		 srq->srq_id);
1673 	return 0;
1674 }
1675 
qedr_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)1676 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1677 		    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1678 {
1679 	struct qed_rdma_modify_srq_in_params in_params = {};
1680 	struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1681 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
1682 	int rc;
1683 
1684 	if (attr_mask & IB_SRQ_MAX_WR) {
1685 		DP_ERR(dev,
1686 		       "modify srq: invalid attribute mask=0x%x specified for %p\n",
1687 		       attr_mask, srq);
1688 		return -EINVAL;
1689 	}
1690 
1691 	if (attr_mask & IB_SRQ_LIMIT) {
1692 		if (attr->srq_limit >= srq->hw_srq.max_wr) {
1693 			DP_ERR(dev,
1694 			       "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1695 			       attr->srq_limit, srq->hw_srq.max_wr);
1696 			return -EINVAL;
1697 		}
1698 
1699 		in_params.srq_id = srq->srq_id;
1700 		in_params.wqe_limit = attr->srq_limit;
1701 		rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1702 		if (rc)
1703 			return rc;
1704 	}
1705 
1706 	srq->srq_limit = attr->srq_limit;
1707 
1708 	DP_DEBUG(dev, QEDR_MSG_SRQ,
1709 		 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1710 
1711 	return 0;
1712 }
1713 
qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)1714 static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1715 {
1716 	switch (ib_qp_type) {
1717 	case IB_QPT_RC:
1718 		return QED_RDMA_QP_TYPE_RC;
1719 	case IB_QPT_XRC_INI:
1720 		return QED_RDMA_QP_TYPE_XRC_INI;
1721 	case IB_QPT_XRC_TGT:
1722 		return QED_RDMA_QP_TYPE_XRC_TGT;
1723 	default:
1724 		return QED_RDMA_QP_TYPE_INVAL;
1725 	}
1726 }
1727 
1728 static inline void
qedr_init_common_qp_in_params(struct qedr_dev * dev,struct qedr_pd * pd,struct qedr_qp * qp,struct ib_qp_init_attr * attrs,bool fmr_and_reserved_lkey,struct qed_rdma_create_qp_in_params * params)1729 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1730 			      struct qedr_pd *pd,
1731 			      struct qedr_qp *qp,
1732 			      struct ib_qp_init_attr *attrs,
1733 			      bool fmr_and_reserved_lkey,
1734 			      struct qed_rdma_create_qp_in_params *params)
1735 {
1736 	/* QP handle to be written in an async event */
1737 	params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1738 	params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1739 
1740 	params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1741 	params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1742 	params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1743 	params->stats_queue = 0;
1744 
1745 	if (pd) {
1746 		params->pd = pd->pd_id;
1747 		params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1748 	}
1749 
1750 	if (qedr_qp_has_sq(qp))
1751 		params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1752 
1753 	if (qedr_qp_has_rq(qp))
1754 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1755 
1756 	if (qedr_qp_has_srq(qp)) {
1757 		params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1758 		params->srq_id = qp->srq->srq_id;
1759 		params->use_srq = true;
1760 	} else {
1761 		params->srq_id = 0;
1762 		params->use_srq = false;
1763 	}
1764 }
1765 
qedr_qp_user_print(struct qedr_dev * dev,struct qedr_qp * qp)1766 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1767 {
1768 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1769 		 "qp=%p. "
1770 		 "sq_addr=0x%llx, "
1771 		 "sq_len=%zd, "
1772 		 "rq_addr=0x%llx, "
1773 		 "rq_len=%zd"
1774 		 "\n",
1775 		 qp,
1776 		 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1777 		 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1778 		 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1779 		 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1780 }
1781 
1782 static inline void
qedr_iwarp_populate_user_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_out_params * out_params)1783 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1784 			    struct qedr_qp *qp,
1785 			    struct qed_rdma_create_qp_out_params *out_params)
1786 {
1787 	qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1788 	qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1789 
1790 	qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1791 			   &qp->usq.pbl_info, FW_PAGE_SHIFT);
1792 	if (!qp->srq) {
1793 		qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1794 		qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1795 	}
1796 
1797 	qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1798 			   &qp->urq.pbl_info, FW_PAGE_SHIFT);
1799 }
1800 
qedr_cleanup_user(struct qedr_dev * dev,struct qedr_ucontext * ctx,struct qedr_qp * qp)1801 static void qedr_cleanup_user(struct qedr_dev *dev,
1802 			      struct qedr_ucontext *ctx,
1803 			      struct qedr_qp *qp)
1804 {
1805 	if (qedr_qp_has_sq(qp)) {
1806 		ib_umem_release(qp->usq.umem);
1807 		qp->usq.umem = NULL;
1808 	}
1809 
1810 	if (qedr_qp_has_rq(qp)) {
1811 		ib_umem_release(qp->urq.umem);
1812 		qp->urq.umem = NULL;
1813 	}
1814 
1815 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
1816 		qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1817 		qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1818 	} else {
1819 		kfree(qp->usq.pbl_tbl);
1820 		kfree(qp->urq.pbl_tbl);
1821 	}
1822 
1823 	if (qp->usq.db_rec_data) {
1824 		qedr_db_recovery_del(dev, qp->usq.db_addr,
1825 				     &qp->usq.db_rec_data->db_data);
1826 		rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1827 	}
1828 
1829 	if (qp->urq.db_rec_data) {
1830 		qedr_db_recovery_del(dev, qp->urq.db_addr,
1831 				     &qp->urq.db_rec_data->db_data);
1832 		rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1833 	}
1834 
1835 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1836 		qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1837 				     &qp->urq.db_rec_db2_data);
1838 }
1839 
qedr_create_user_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_pd * ibpd,struct ib_udata * udata,struct ib_qp_init_attr * attrs)1840 static int qedr_create_user_qp(struct qedr_dev *dev,
1841 			       struct qedr_qp *qp,
1842 			       struct ib_pd *ibpd,
1843 			       struct ib_udata *udata,
1844 			       struct ib_qp_init_attr *attrs)
1845 {
1846 	struct qed_rdma_create_qp_in_params in_params;
1847 	struct qed_rdma_create_qp_out_params out_params;
1848 	struct qedr_create_qp_uresp uresp = {};
1849 	struct qedr_create_qp_ureq ureq = {};
1850 	int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1851 	struct qedr_ucontext *ctx = NULL;
1852 	struct qedr_pd *pd = NULL;
1853 	int rc = 0;
1854 
1855 	qp->create_type = QEDR_QP_CREATE_USER;
1856 
1857 	if (ibpd) {
1858 		pd = get_qedr_pd(ibpd);
1859 		ctx = pd->uctx;
1860 	}
1861 
1862 	if (udata) {
1863 		rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1864 					udata->inlen));
1865 		if (rc) {
1866 			DP_ERR(dev, "Problem copying data from user space\n");
1867 			return rc;
1868 		}
1869 	}
1870 
1871 	if (qedr_qp_has_sq(qp)) {
1872 		/* SQ - read access only (0) */
1873 		rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1874 					  ureq.sq_len, true, 0, alloc_and_init);
1875 		if (rc)
1876 			return rc;
1877 	}
1878 
1879 	if (qedr_qp_has_rq(qp)) {
1880 		/* RQ - read access only (0) */
1881 		rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1882 					  ureq.rq_len, true, 0, alloc_and_init);
1883 		if (rc) {
1884 			ib_umem_release(qp->usq.umem);
1885 			qp->usq.umem = NULL;
1886 			if (rdma_protocol_roce(&dev->ibdev, 1)) {
1887 				qedr_free_pbl(dev, &qp->usq.pbl_info,
1888 					      qp->usq.pbl_tbl);
1889 			} else {
1890 				kfree(qp->usq.pbl_tbl);
1891 			}
1892 			return rc;
1893 		}
1894 	}
1895 
1896 	memset(&in_params, 0, sizeof(in_params));
1897 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1898 	in_params.qp_handle_lo = ureq.qp_handle_lo;
1899 	in_params.qp_handle_hi = ureq.qp_handle_hi;
1900 
1901 	if (qp->qp_type == IB_QPT_XRC_TGT) {
1902 		struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1903 
1904 		in_params.xrcd_id = xrcd->xrcd_id;
1905 		in_params.qp_handle_lo = qp->qp_id;
1906 		in_params.use_srq = 1;
1907 	}
1908 
1909 	if (qedr_qp_has_sq(qp)) {
1910 		in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1911 		in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1912 	}
1913 
1914 	if (qedr_qp_has_rq(qp)) {
1915 		in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1916 		in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1917 	}
1918 
1919 	if (ctx)
1920 		SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1921 
1922 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1923 					      &in_params, &out_params);
1924 
1925 	if (!qp->qed_qp) {
1926 		rc = -ENOMEM;
1927 		goto err1;
1928 	}
1929 
1930 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
1931 		qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1932 
1933 	qp->qp_id = out_params.qp_id;
1934 	qp->icid = out_params.icid;
1935 
1936 	if (udata) {
1937 		rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1938 		if (rc)
1939 			goto err;
1940 	}
1941 
1942 	/* db offset was calculated in copy_qp_uresp, now set in the user q */
1943 	if (qedr_qp_has_sq(qp)) {
1944 		qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1945 		qp->sq.max_wr = attrs->cap.max_send_wr;
1946 		rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1947 					  &qp->usq.db_rec_data->db_data,
1948 					  DB_REC_WIDTH_32B,
1949 					  DB_REC_USER);
1950 		if (rc)
1951 			goto err;
1952 	}
1953 
1954 	if (qedr_qp_has_rq(qp)) {
1955 		qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1956 		qp->rq.max_wr = attrs->cap.max_recv_wr;
1957 		rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1958 					  &qp->urq.db_rec_data->db_data,
1959 					  DB_REC_WIDTH_32B,
1960 					  DB_REC_USER);
1961 		if (rc)
1962 			goto err;
1963 	}
1964 
1965 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1966 		qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1967 
1968 		/* calculate the db_rec_db2 data since it is constant so no
1969 		 * need to reflect from user
1970 		 */
1971 		qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1972 		qp->urq.db_rec_db2_data.data.value =
1973 			cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1974 
1975 		rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1976 					  &qp->urq.db_rec_db2_data,
1977 					  DB_REC_WIDTH_32B,
1978 					  DB_REC_USER);
1979 		if (rc)
1980 			goto err;
1981 	}
1982 	qedr_qp_user_print(dev, qp);
1983 	return rc;
1984 err:
1985 	rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1986 	if (rc)
1987 		DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1988 
1989 err1:
1990 	qedr_cleanup_user(dev, ctx, qp);
1991 	return rc;
1992 }
1993 
qedr_set_iwarp_db_info(struct qedr_dev * dev,struct qedr_qp * qp)1994 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1995 {
1996 	int rc;
1997 
1998 	qp->sq.db = dev->db_addr +
1999 	    DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
2000 	qp->sq.db_data.data.icid = qp->icid;
2001 
2002 	rc = qedr_db_recovery_add(dev, qp->sq.db,
2003 				  &qp->sq.db_data,
2004 				  DB_REC_WIDTH_32B,
2005 				  DB_REC_KERNEL);
2006 	if (rc)
2007 		return rc;
2008 
2009 	qp->rq.db = dev->db_addr +
2010 		    DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
2011 	qp->rq.db_data.data.icid = qp->icid;
2012 	qp->rq.iwarp_db2 = dev->db_addr +
2013 			   DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
2014 	qp->rq.iwarp_db2_data.data.icid = qp->icid;
2015 	qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2016 
2017 	rc = qedr_db_recovery_add(dev, qp->rq.db,
2018 				  &qp->rq.db_data,
2019 				  DB_REC_WIDTH_32B,
2020 				  DB_REC_KERNEL);
2021 	if (rc)
2022 		return rc;
2023 
2024 	rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2025 				  &qp->rq.iwarp_db2_data,
2026 				  DB_REC_WIDTH_32B,
2027 				  DB_REC_KERNEL);
2028 	return rc;
2029 }
2030 
2031 static int
qedr_roce_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_in_params * in_params,u32 n_sq_elems,u32 n_rq_elems)2032 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
2033 			   struct qedr_qp *qp,
2034 			   struct qed_rdma_create_qp_in_params *in_params,
2035 			   u32 n_sq_elems, u32 n_rq_elems)
2036 {
2037 	struct qed_rdma_create_qp_out_params out_params;
2038 	struct qed_chain_init_params params = {
2039 		.mode		= QED_CHAIN_MODE_PBL,
2040 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
2041 	};
2042 	int rc;
2043 
2044 	params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2045 	params.num_elems = n_sq_elems;
2046 	params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2047 
2048 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2049 	if (rc)
2050 		return rc;
2051 
2052 	in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2053 	in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2054 
2055 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2056 	params.num_elems = n_rq_elems;
2057 	params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2058 
2059 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2060 	if (rc)
2061 		return rc;
2062 
2063 	in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2064 	in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2065 
2066 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2067 					      in_params, &out_params);
2068 
2069 	if (!qp->qed_qp)
2070 		return -EINVAL;
2071 
2072 	qp->qp_id = out_params.qp_id;
2073 	qp->icid = out_params.icid;
2074 
2075 	return qedr_set_roce_db_info(dev, qp);
2076 }
2077 
2078 static int
qedr_iwarp_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct qed_rdma_create_qp_in_params * in_params,u32 n_sq_elems,u32 n_rq_elems)2079 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
2080 			    struct qedr_qp *qp,
2081 			    struct qed_rdma_create_qp_in_params *in_params,
2082 			    u32 n_sq_elems, u32 n_rq_elems)
2083 {
2084 	struct qed_rdma_create_qp_out_params out_params;
2085 	struct qed_chain_init_params params = {
2086 		.mode		= QED_CHAIN_MODE_PBL,
2087 		.cnt_type	= QED_CHAIN_CNT_TYPE_U32,
2088 	};
2089 	int rc;
2090 
2091 	in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2092 						     QEDR_SQE_ELEMENT_SIZE,
2093 						     QED_CHAIN_PAGE_SIZE,
2094 						     QED_CHAIN_MODE_PBL);
2095 	in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2096 						     QEDR_RQE_ELEMENT_SIZE,
2097 						     QED_CHAIN_PAGE_SIZE,
2098 						     QED_CHAIN_MODE_PBL);
2099 
2100 	qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2101 					      in_params, &out_params);
2102 
2103 	if (!qp->qed_qp)
2104 		return -EINVAL;
2105 
2106 	/* Now we allocate the chain */
2107 
2108 	params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2109 	params.num_elems = n_sq_elems;
2110 	params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2111 	params.ext_pbl_virt = out_params.sq_pbl_virt;
2112 	params.ext_pbl_phys = out_params.sq_pbl_phys;
2113 
2114 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2115 	if (rc)
2116 		goto err;
2117 
2118 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2119 	params.num_elems = n_rq_elems;
2120 	params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2121 	params.ext_pbl_virt = out_params.rq_pbl_virt;
2122 	params.ext_pbl_phys = out_params.rq_pbl_phys;
2123 
2124 	rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2125 	if (rc)
2126 		goto err;
2127 
2128 	qp->qp_id = out_params.qp_id;
2129 	qp->icid = out_params.icid;
2130 
2131 	return qedr_set_iwarp_db_info(dev, qp);
2132 
2133 err:
2134 	dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2135 
2136 	return rc;
2137 }
2138 
qedr_cleanup_kernel(struct qedr_dev * dev,struct qedr_qp * qp)2139 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2140 {
2141 	dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2142 	kfree(qp->wqe_wr_id);
2143 
2144 	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2145 	kfree(qp->rqe_wr_id);
2146 
2147 	/* GSI qp is not registered to db mechanism so no need to delete */
2148 	if (qp->qp_type == IB_QPT_GSI)
2149 		return;
2150 
2151 	qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2152 
2153 	if (!qp->srq) {
2154 		qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2155 
2156 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2157 			qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2158 					     &qp->rq.iwarp_db2_data);
2159 	}
2160 }
2161 
qedr_create_kernel_qp(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_pd * ibpd,struct ib_qp_init_attr * attrs)2162 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2163 				 struct qedr_qp *qp,
2164 				 struct ib_pd *ibpd,
2165 				 struct ib_qp_init_attr *attrs)
2166 {
2167 	struct qed_rdma_create_qp_in_params in_params;
2168 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2169 	int rc = -EINVAL;
2170 	u32 n_rq_elems;
2171 	u32 n_sq_elems;
2172 	u32 n_sq_entries;
2173 
2174 	memset(&in_params, 0, sizeof(in_params));
2175 	qp->create_type = QEDR_QP_CREATE_KERNEL;
2176 
2177 	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2178 	 * the ring. The ring should allow at least a single WR, even if the
2179 	 * user requested none, due to allocation issues.
2180 	 * We should add an extra WR since the prod and cons indices of
2181 	 * wqe_wr_id are managed in such a way that the WQ is considered full
2182 	 * when (prod+1)%max_wr==cons. We currently don't do that because we
2183 	 * double the number of entries due an iSER issue that pushes far more
2184 	 * WRs than indicated. If we decline its ib_post_send() then we get
2185 	 * error prints in the dmesg we'd like to avoid.
2186 	 */
2187 	qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2188 			      dev->attr.max_sqe);
2189 
2190 	qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2191 				GFP_KERNEL);
2192 	if (!qp->wqe_wr_id) {
2193 		DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2194 		return -ENOMEM;
2195 	}
2196 
2197 	/* QP handle to be written in CQE */
2198 	in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2199 	in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2200 
2201 	/* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2202 	 * the ring. There ring should allow at least a single WR, even if the
2203 	 * user requested none, due to allocation issues.
2204 	 */
2205 	qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2206 
2207 	/* Allocate driver internal RQ array */
2208 	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2209 				GFP_KERNEL);
2210 	if (!qp->rqe_wr_id) {
2211 		DP_ERR(dev,
2212 		       "create qp: failed RQ shadow memory allocation\n");
2213 		kfree(qp->wqe_wr_id);
2214 		return -ENOMEM;
2215 	}
2216 
2217 	qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2218 
2219 	n_sq_entries = attrs->cap.max_send_wr;
2220 	n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2221 	n_sq_entries = max_t(u32, n_sq_entries, 1);
2222 	n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2223 
2224 	n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2225 
2226 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2227 		rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2228 						 n_sq_elems, n_rq_elems);
2229 	else
2230 		rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2231 						n_sq_elems, n_rq_elems);
2232 	if (rc)
2233 		qedr_cleanup_kernel(dev, qp);
2234 
2235 	return rc;
2236 }
2237 
qedr_free_qp_resources(struct qedr_dev * dev,struct qedr_qp * qp,struct ib_udata * udata)2238 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2239 				  struct ib_udata *udata)
2240 {
2241 	struct qedr_ucontext *ctx =
2242 		rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2243 					  ibucontext);
2244 	int rc;
2245 
2246 	if (qp->qp_type != IB_QPT_GSI) {
2247 		rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2248 		if (rc)
2249 			return rc;
2250 	}
2251 
2252 	if (qp->create_type == QEDR_QP_CREATE_USER)
2253 		qedr_cleanup_user(dev, ctx, qp);
2254 	else
2255 		qedr_cleanup_kernel(dev, qp);
2256 
2257 	return 0;
2258 }
2259 
qedr_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attrs,struct ib_udata * udata)2260 int qedr_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
2261 		   struct ib_udata *udata)
2262 {
2263 	struct qedr_xrcd *xrcd = NULL;
2264 	struct ib_pd *ibpd = ibqp->pd;
2265 	struct qedr_pd *pd = get_qedr_pd(ibpd);
2266 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2267 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2268 	int rc = 0;
2269 
2270 	if (attrs->create_flags)
2271 		return -EOPNOTSUPP;
2272 
2273 	if (attrs->qp_type == IB_QPT_XRC_TGT)
2274 		xrcd = get_qedr_xrcd(attrs->xrcd);
2275 	else
2276 		pd = get_qedr_pd(ibpd);
2277 
2278 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2279 		 udata ? "user library" : "kernel", pd);
2280 
2281 	rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2282 	if (rc)
2283 		return rc;
2284 
2285 	DP_DEBUG(dev, QEDR_MSG_QP,
2286 		 "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2287 		 udata ? "user library" : "kernel", attrs->event_handler, pd,
2288 		 get_qedr_cq(attrs->send_cq),
2289 		 get_qedr_cq(attrs->send_cq)->icid,
2290 		 get_qedr_cq(attrs->recv_cq),
2291 		 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2292 
2293 	qedr_set_common_qp_params(dev, qp, pd, attrs);
2294 
2295 	if (attrs->qp_type == IB_QPT_GSI)
2296 		return qedr_create_gsi_qp(dev, attrs, qp);
2297 
2298 	if (udata || xrcd)
2299 		rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2300 	else
2301 		rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2302 
2303 	if (rc)
2304 		return rc;
2305 
2306 	qp->ibqp.qp_num = qp->qp_id;
2307 
2308 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2309 		rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2310 		if (rc)
2311 			goto out_free_qp_resources;
2312 	}
2313 
2314 	return 0;
2315 
2316 out_free_qp_resources:
2317 	qedr_free_qp_resources(dev, qp, udata);
2318 	return -EFAULT;
2319 }
2320 
qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)2321 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2322 {
2323 	switch (qp_state) {
2324 	case QED_ROCE_QP_STATE_RESET:
2325 		return IB_QPS_RESET;
2326 	case QED_ROCE_QP_STATE_INIT:
2327 		return IB_QPS_INIT;
2328 	case QED_ROCE_QP_STATE_RTR:
2329 		return IB_QPS_RTR;
2330 	case QED_ROCE_QP_STATE_RTS:
2331 		return IB_QPS_RTS;
2332 	case QED_ROCE_QP_STATE_SQD:
2333 		return IB_QPS_SQD;
2334 	case QED_ROCE_QP_STATE_ERR:
2335 		return IB_QPS_ERR;
2336 	case QED_ROCE_QP_STATE_SQE:
2337 		return IB_QPS_SQE;
2338 	}
2339 	return IB_QPS_ERR;
2340 }
2341 
qedr_get_state_from_ibqp(enum ib_qp_state qp_state)2342 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2343 					enum ib_qp_state qp_state)
2344 {
2345 	switch (qp_state) {
2346 	case IB_QPS_RESET:
2347 		return QED_ROCE_QP_STATE_RESET;
2348 	case IB_QPS_INIT:
2349 		return QED_ROCE_QP_STATE_INIT;
2350 	case IB_QPS_RTR:
2351 		return QED_ROCE_QP_STATE_RTR;
2352 	case IB_QPS_RTS:
2353 		return QED_ROCE_QP_STATE_RTS;
2354 	case IB_QPS_SQD:
2355 		return QED_ROCE_QP_STATE_SQD;
2356 	case IB_QPS_ERR:
2357 		return QED_ROCE_QP_STATE_ERR;
2358 	default:
2359 		return QED_ROCE_QP_STATE_ERR;
2360 	}
2361 }
2362 
qedr_update_qp_state(struct qedr_dev * dev,struct qedr_qp * qp,enum qed_roce_qp_state cur_state,enum qed_roce_qp_state new_state)2363 static int qedr_update_qp_state(struct qedr_dev *dev,
2364 				struct qedr_qp *qp,
2365 				enum qed_roce_qp_state cur_state,
2366 				enum qed_roce_qp_state new_state)
2367 {
2368 	int status = 0;
2369 
2370 	if (new_state == cur_state)
2371 		return 0;
2372 
2373 	switch (cur_state) {
2374 	case QED_ROCE_QP_STATE_RESET:
2375 		switch (new_state) {
2376 		case QED_ROCE_QP_STATE_INIT:
2377 			break;
2378 		default:
2379 			status = -EINVAL;
2380 			break;
2381 		}
2382 		break;
2383 	case QED_ROCE_QP_STATE_INIT:
2384 		switch (new_state) {
2385 		case QED_ROCE_QP_STATE_RTR:
2386 			/* Update doorbell (in case post_recv was
2387 			 * done before move to RTR)
2388 			 */
2389 
2390 			if (rdma_protocol_roce(&dev->ibdev, 1)) {
2391 				writel(qp->rq.db_data.raw, qp->rq.db);
2392 			}
2393 			break;
2394 		case QED_ROCE_QP_STATE_ERR:
2395 			break;
2396 		default:
2397 			/* Invalid state change. */
2398 			status = -EINVAL;
2399 			break;
2400 		}
2401 		break;
2402 	case QED_ROCE_QP_STATE_RTR:
2403 		/* RTR->XXX */
2404 		switch (new_state) {
2405 		case QED_ROCE_QP_STATE_RTS:
2406 			break;
2407 		case QED_ROCE_QP_STATE_ERR:
2408 			break;
2409 		default:
2410 			/* Invalid state change. */
2411 			status = -EINVAL;
2412 			break;
2413 		}
2414 		break;
2415 	case QED_ROCE_QP_STATE_RTS:
2416 		/* RTS->XXX */
2417 		switch (new_state) {
2418 		case QED_ROCE_QP_STATE_SQD:
2419 			break;
2420 		case QED_ROCE_QP_STATE_ERR:
2421 			break;
2422 		default:
2423 			/* Invalid state change. */
2424 			status = -EINVAL;
2425 			break;
2426 		}
2427 		break;
2428 	case QED_ROCE_QP_STATE_SQD:
2429 		/* SQD->XXX */
2430 		switch (new_state) {
2431 		case QED_ROCE_QP_STATE_RTS:
2432 		case QED_ROCE_QP_STATE_ERR:
2433 			break;
2434 		default:
2435 			/* Invalid state change. */
2436 			status = -EINVAL;
2437 			break;
2438 		}
2439 		break;
2440 	case QED_ROCE_QP_STATE_ERR:
2441 		/* ERR->XXX */
2442 		switch (new_state) {
2443 		case QED_ROCE_QP_STATE_RESET:
2444 			if ((qp->rq.prod != qp->rq.cons) ||
2445 			    (qp->sq.prod != qp->sq.cons)) {
2446 				DP_NOTICE(dev,
2447 					  "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2448 					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
2449 					  qp->sq.cons);
2450 				status = -EINVAL;
2451 			}
2452 			break;
2453 		default:
2454 			status = -EINVAL;
2455 			break;
2456 		}
2457 		break;
2458 	default:
2459 		status = -EINVAL;
2460 		break;
2461 	}
2462 
2463 	return status;
2464 }
2465 
qedr_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2466 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2467 		   int attr_mask, struct ib_udata *udata)
2468 {
2469 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2470 	struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2471 	struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2472 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2473 	enum ib_qp_state old_qp_state, new_qp_state;
2474 	enum qed_roce_qp_state cur_state;
2475 	int rc = 0;
2476 
2477 	DP_DEBUG(dev, QEDR_MSG_QP,
2478 		 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2479 		 attr->qp_state);
2480 
2481 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2482 		return -EOPNOTSUPP;
2483 
2484 	old_qp_state = qedr_get_ibqp_state(qp->state);
2485 	if (attr_mask & IB_QP_STATE)
2486 		new_qp_state = attr->qp_state;
2487 	else
2488 		new_qp_state = old_qp_state;
2489 
2490 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2491 		if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2492 					ibqp->qp_type, attr_mask)) {
2493 			DP_ERR(dev,
2494 			       "modify qp: invalid attribute mask=0x%x specified for\n"
2495 			       "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2496 			       attr_mask, qp->qp_id, ibqp->qp_type,
2497 			       old_qp_state, new_qp_state);
2498 			rc = -EINVAL;
2499 			goto err;
2500 		}
2501 	}
2502 
2503 	/* Translate the masks... */
2504 	if (attr_mask & IB_QP_STATE) {
2505 		SET_FIELD(qp_params.modify_flags,
2506 			  QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2507 		qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2508 	}
2509 
2510 	if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2511 		qp_params.sqd_async = true;
2512 
2513 	if (attr_mask & IB_QP_PKEY_INDEX) {
2514 		SET_FIELD(qp_params.modify_flags,
2515 			  QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2516 		if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2517 			rc = -EINVAL;
2518 			goto err;
2519 		}
2520 
2521 		qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2522 	}
2523 
2524 	if (attr_mask & IB_QP_QKEY)
2525 		qp->qkey = attr->qkey;
2526 
2527 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
2528 		SET_FIELD(qp_params.modify_flags,
2529 			  QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2530 		qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2531 						  IB_ACCESS_REMOTE_READ;
2532 		qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2533 						   IB_ACCESS_REMOTE_WRITE;
2534 		qp_params.incoming_atomic_en = attr->qp_access_flags &
2535 					       IB_ACCESS_REMOTE_ATOMIC;
2536 	}
2537 
2538 	if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2539 		if (rdma_protocol_iwarp(&dev->ibdev, 1))
2540 			return -EINVAL;
2541 
2542 		if (attr_mask & IB_QP_PATH_MTU) {
2543 			if (attr->path_mtu < IB_MTU_256 ||
2544 			    attr->path_mtu > IB_MTU_4096) {
2545 				pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2546 				rc = -EINVAL;
2547 				goto err;
2548 			}
2549 			qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2550 				      ib_mtu_enum_to_int(iboe_get_mtu
2551 							 (dev->ndev->mtu)));
2552 		}
2553 
2554 		if (!qp->mtu) {
2555 			qp->mtu =
2556 			ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2557 			pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2558 		}
2559 
2560 		SET_FIELD(qp_params.modify_flags,
2561 			  QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2562 
2563 		qp_params.traffic_class_tos = grh->traffic_class;
2564 		qp_params.flow_label = grh->flow_label;
2565 		qp_params.hop_limit_ttl = grh->hop_limit;
2566 
2567 		qp->sgid_idx = grh->sgid_index;
2568 
2569 		rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2570 		if (rc) {
2571 			DP_ERR(dev,
2572 			       "modify qp: problems with GID index %d (rc=%d)\n",
2573 			       grh->sgid_index, rc);
2574 			return rc;
2575 		}
2576 
2577 		rc = qedr_get_dmac(dev, &attr->ah_attr,
2578 				   qp_params.remote_mac_addr);
2579 		if (rc)
2580 			return rc;
2581 
2582 		qp_params.use_local_mac = true;
2583 		ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2584 
2585 		DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2586 			 qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2587 			 qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2588 		DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2589 			 qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2590 			 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2591 		DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2592 			 qp_params.remote_mac_addr);
2593 
2594 		qp_params.mtu = qp->mtu;
2595 		qp_params.lb_indication = false;
2596 	}
2597 
2598 	if (!qp_params.mtu) {
2599 		/* Stay with current MTU */
2600 		if (qp->mtu)
2601 			qp_params.mtu = qp->mtu;
2602 		else
2603 			qp_params.mtu =
2604 			    ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2605 	}
2606 
2607 	if (attr_mask & IB_QP_TIMEOUT) {
2608 		SET_FIELD(qp_params.modify_flags,
2609 			  QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2610 
2611 		/* The received timeout value is an exponent used like this:
2612 		 *    "12.7.34 LOCAL ACK TIMEOUT
2613 		 *    Value representing the transport (ACK) timeout for use by
2614 		 *    the remote, expressed as: 4.096 * 2^timeout [usec]"
2615 		 * The FW expects timeout in msec so we need to divide the usec
2616 		 * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2617 		 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2618 		 * The value of zero means infinite so we use a 'max_t' to make
2619 		 * sure that sub 1 msec values will be configured as 1 msec.
2620 		 */
2621 		if (attr->timeout)
2622 			qp_params.ack_timeout =
2623 					1 << max_t(int, attr->timeout - 8, 0);
2624 		else
2625 			qp_params.ack_timeout = 0;
2626 
2627 		qp->timeout = attr->timeout;
2628 	}
2629 
2630 	if (attr_mask & IB_QP_RETRY_CNT) {
2631 		SET_FIELD(qp_params.modify_flags,
2632 			  QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2633 		qp_params.retry_cnt = attr->retry_cnt;
2634 	}
2635 
2636 	if (attr_mask & IB_QP_RNR_RETRY) {
2637 		SET_FIELD(qp_params.modify_flags,
2638 			  QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2639 		qp_params.rnr_retry_cnt = attr->rnr_retry;
2640 	}
2641 
2642 	if (attr_mask & IB_QP_RQ_PSN) {
2643 		SET_FIELD(qp_params.modify_flags,
2644 			  QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2645 		qp_params.rq_psn = attr->rq_psn;
2646 		qp->rq_psn = attr->rq_psn;
2647 	}
2648 
2649 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2650 		if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2651 			rc = -EINVAL;
2652 			DP_ERR(dev,
2653 			       "unsupported max_rd_atomic=%d, supported=%d\n",
2654 			       attr->max_rd_atomic,
2655 			       dev->attr.max_qp_req_rd_atomic_resc);
2656 			goto err;
2657 		}
2658 
2659 		SET_FIELD(qp_params.modify_flags,
2660 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2661 		qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2662 	}
2663 
2664 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2665 		SET_FIELD(qp_params.modify_flags,
2666 			  QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2667 		qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2668 	}
2669 
2670 	if (attr_mask & IB_QP_SQ_PSN) {
2671 		SET_FIELD(qp_params.modify_flags,
2672 			  QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2673 		qp_params.sq_psn = attr->sq_psn;
2674 		qp->sq_psn = attr->sq_psn;
2675 	}
2676 
2677 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2678 		if (attr->max_dest_rd_atomic >
2679 		    dev->attr.max_qp_resp_rd_atomic_resc) {
2680 			DP_ERR(dev,
2681 			       "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2682 			       attr->max_dest_rd_atomic,
2683 			       dev->attr.max_qp_resp_rd_atomic_resc);
2684 
2685 			rc = -EINVAL;
2686 			goto err;
2687 		}
2688 
2689 		SET_FIELD(qp_params.modify_flags,
2690 			  QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2691 		qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2692 	}
2693 
2694 	if (attr_mask & IB_QP_DEST_QPN) {
2695 		SET_FIELD(qp_params.modify_flags,
2696 			  QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2697 
2698 		qp_params.dest_qp = attr->dest_qp_num;
2699 		qp->dest_qp_num = attr->dest_qp_num;
2700 	}
2701 
2702 	cur_state = qp->state;
2703 
2704 	/* Update the QP state before the actual ramrod to prevent a race with
2705 	 * fast path. Modifying the QP state to error will cause the device to
2706 	 * flush the CQEs and while polling the flushed CQEs will considered as
2707 	 * a potential issue if the QP isn't in error state.
2708 	 */
2709 	if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2710 	    !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2711 		qp->state = QED_ROCE_QP_STATE_ERR;
2712 
2713 	if (qp->qp_type != IB_QPT_GSI)
2714 		rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2715 					      qp->qed_qp, &qp_params);
2716 
2717 	if (attr_mask & IB_QP_STATE) {
2718 		if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2719 			rc = qedr_update_qp_state(dev, qp, cur_state,
2720 						  qp_params.new_state);
2721 		qp->state = qp_params.new_state;
2722 	}
2723 
2724 err:
2725 	return rc;
2726 }
2727 
qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params * params)2728 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2729 {
2730 	int ib_qp_acc_flags = 0;
2731 
2732 	if (params->incoming_rdma_write_en)
2733 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2734 	if (params->incoming_rdma_read_en)
2735 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2736 	if (params->incoming_atomic_en)
2737 		ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2738 	ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2739 	return ib_qp_acc_flags;
2740 }
2741 
qedr_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int attr_mask,struct ib_qp_init_attr * qp_init_attr)2742 int qedr_query_qp(struct ib_qp *ibqp,
2743 		  struct ib_qp_attr *qp_attr,
2744 		  int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2745 {
2746 	struct qed_rdma_query_qp_out_params params;
2747 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2748 	struct qedr_dev *dev = qp->dev;
2749 	int rc = 0;
2750 
2751 	memset(&params, 0, sizeof(params));
2752 	memset(qp_attr, 0, sizeof(*qp_attr));
2753 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2754 
2755 	if (qp->qp_type != IB_QPT_GSI) {
2756 		rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2757 		if (rc)
2758 			goto err;
2759 		qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2760 	} else {
2761 		qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2762 	}
2763 
2764 	qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2765 	qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2766 	qp_attr->path_mig_state = IB_MIG_MIGRATED;
2767 	qp_attr->rq_psn = params.rq_psn;
2768 	qp_attr->sq_psn = params.sq_psn;
2769 	qp_attr->dest_qp_num = params.dest_qp;
2770 
2771 	qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2772 
2773 	qp_attr->cap.max_send_wr = qp->sq.max_wr;
2774 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2775 	qp_attr->cap.max_send_sge = qp->sq.max_sges;
2776 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2777 	qp_attr->cap.max_inline_data = dev->attr.max_inline;
2778 	qp_init_attr->cap = qp_attr->cap;
2779 
2780 	qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2781 	rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2782 			params.flow_label, qp->sgid_idx,
2783 			params.hop_limit_ttl, params.traffic_class_tos);
2784 	rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2785 	rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2786 	rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2787 	qp_attr->timeout = qp->timeout;
2788 	qp_attr->rnr_retry = params.rnr_retry;
2789 	qp_attr->retry_cnt = params.retry_cnt;
2790 	qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2791 	qp_attr->pkey_index = params.pkey_index;
2792 	qp_attr->port_num = 1;
2793 	rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2794 	rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2795 	qp_attr->alt_pkey_index = 0;
2796 	qp_attr->alt_port_num = 0;
2797 	qp_attr->alt_timeout = 0;
2798 	memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2799 
2800 	qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2801 	qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2802 	qp_attr->max_rd_atomic = params.max_rd_atomic;
2803 	qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2804 
2805 	DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2806 		 qp_attr->cap.max_inline_data);
2807 
2808 err:
2809 	return rc;
2810 }
2811 
qedr_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)2812 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2813 {
2814 	struct qedr_qp *qp = get_qedr_qp(ibqp);
2815 	struct qedr_dev *dev = qp->dev;
2816 	struct ib_qp_attr attr;
2817 	int attr_mask = 0;
2818 
2819 	DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2820 		 qp, qp->qp_type);
2821 
2822 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
2823 		if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2824 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
2825 		    (qp->state != QED_ROCE_QP_STATE_INIT)) {
2826 
2827 			attr.qp_state = IB_QPS_ERR;
2828 			attr_mask |= IB_QP_STATE;
2829 
2830 			/* Change the QP state to ERROR */
2831 			qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2832 		}
2833 	} else {
2834 		/* If connection establishment started the WAIT_FOR_CONNECT
2835 		 * bit will be on and we need to Wait for the establishment
2836 		 * to complete before destroying the qp.
2837 		 */
2838 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2839 				     &qp->iwarp_cm_flags))
2840 			wait_for_completion(&qp->iwarp_cm_comp);
2841 
2842 		/* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2843 		 * bit will be on, and we need to wait for the disconnect to
2844 		 * complete before continuing. We can use the same completion,
2845 		 * iwarp_cm_comp, since this is the only place that waits for
2846 		 * this completion and it is sequential. In addition,
2847 		 * disconnect can't occur before the connection is fully
2848 		 * established, therefore if WAIT_FOR_DISCONNECT is on it
2849 		 * means WAIT_FOR_CONNECT is also on and the completion for
2850 		 * CONNECT already occurred.
2851 		 */
2852 		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2853 				     &qp->iwarp_cm_flags))
2854 			wait_for_completion(&qp->iwarp_cm_comp);
2855 	}
2856 
2857 	if (qp->qp_type == IB_QPT_GSI)
2858 		qedr_destroy_gsi_qp(dev);
2859 
2860 	/* We need to remove the entry from the xarray before we release the
2861 	 * qp_id to avoid a race of the qp_id being reallocated and failing
2862 	 * on xa_insert
2863 	 */
2864 	if (rdma_protocol_iwarp(&dev->ibdev, 1))
2865 		xa_erase(&dev->qps, qp->qp_id);
2866 
2867 	qedr_free_qp_resources(dev, qp, udata);
2868 
2869 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2870 		qedr_iw_qp_rem_ref(&qp->ibqp);
2871 		wait_for_completion(&qp->qp_rel_comp);
2872 	}
2873 
2874 	return 0;
2875 }
2876 
qedr_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)2877 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2878 		   struct ib_udata *udata)
2879 {
2880 	struct qedr_ah *ah = get_qedr_ah(ibah);
2881 
2882 	rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2883 
2884 	return 0;
2885 }
2886 
qedr_destroy_ah(struct ib_ah * ibah,u32 flags)2887 int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2888 {
2889 	struct qedr_ah *ah = get_qedr_ah(ibah);
2890 
2891 	rdma_destroy_ah_attr(&ah->attr);
2892 	return 0;
2893 }
2894 
free_mr_info(struct qedr_dev * dev,struct mr_info * info)2895 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2896 {
2897 	struct qedr_pbl *pbl, *tmp;
2898 
2899 	if (info->pbl_table)
2900 		list_add_tail(&info->pbl_table->list_entry,
2901 			      &info->free_pbl_list);
2902 
2903 	if (!list_empty(&info->inuse_pbl_list))
2904 		list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2905 
2906 	list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2907 		list_del(&pbl->list_entry);
2908 		qedr_free_pbl(dev, &info->pbl_info, pbl);
2909 	}
2910 }
2911 
init_mr_info(struct qedr_dev * dev,struct mr_info * info,size_t page_list_len,bool two_layered)2912 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2913 			size_t page_list_len, bool two_layered)
2914 {
2915 	struct qedr_pbl *tmp;
2916 	int rc;
2917 
2918 	INIT_LIST_HEAD(&info->free_pbl_list);
2919 	INIT_LIST_HEAD(&info->inuse_pbl_list);
2920 
2921 	rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2922 				  page_list_len, two_layered);
2923 	if (rc)
2924 		goto done;
2925 
2926 	info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2927 	if (IS_ERR(info->pbl_table)) {
2928 		rc = PTR_ERR(info->pbl_table);
2929 		goto done;
2930 	}
2931 
2932 	DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2933 		 &info->pbl_table->pa);
2934 
2935 	/* in usual case we use 2 PBLs, so we add one to free
2936 	 * list and allocating another one
2937 	 */
2938 	tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2939 	if (IS_ERR(tmp)) {
2940 		DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2941 		goto done;
2942 	}
2943 
2944 	list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2945 
2946 	DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2947 
2948 done:
2949 	if (rc)
2950 		free_mr_info(dev, info);
2951 
2952 	return rc;
2953 }
2954 
qedr_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 len,u64 usr_addr,int acc,struct ib_udata * udata)2955 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2956 			       u64 usr_addr, int acc, struct ib_udata *udata)
2957 {
2958 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2959 	struct qedr_mr *mr;
2960 	struct qedr_pd *pd;
2961 	int rc = -ENOMEM;
2962 
2963 	pd = get_qedr_pd(ibpd);
2964 	DP_DEBUG(dev, QEDR_MSG_MR,
2965 		 "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2966 		 pd->pd_id, start, len, usr_addr, acc);
2967 
2968 	if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2969 		return ERR_PTR(-EINVAL);
2970 
2971 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2972 	if (!mr)
2973 		return ERR_PTR(rc);
2974 
2975 	mr->type = QEDR_MR_USER;
2976 
2977 	mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2978 	if (IS_ERR(mr->umem)) {
2979 		rc = -EFAULT;
2980 		goto err0;
2981 	}
2982 
2983 	rc = init_mr_info(dev, &mr->info,
2984 			  ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2985 	if (rc)
2986 		goto err1;
2987 
2988 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2989 			   &mr->info.pbl_info, PAGE_SHIFT);
2990 
2991 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2992 	if (rc) {
2993 		if (rc == -EINVAL)
2994 			DP_ERR(dev, "Out of MR resources\n");
2995 		else
2996 			DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2997 
2998 		goto err1;
2999 	}
3000 
3001 	/* Index only, 18 bit long, lkey = itid << 8 | key */
3002 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3003 	mr->hw_mr.key = 0;
3004 	mr->hw_mr.pd = pd->pd_id;
3005 	mr->hw_mr.local_read = 1;
3006 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3007 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3008 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3009 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3010 	mr->hw_mr.mw_bind = false;
3011 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3012 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3013 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3014 	mr->hw_mr.page_size_log = PAGE_SHIFT;
3015 	mr->hw_mr.length = len;
3016 	mr->hw_mr.vaddr = usr_addr;
3017 	mr->hw_mr.phy_mr = false;
3018 	mr->hw_mr.dma_mr = false;
3019 
3020 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3021 	if (rc) {
3022 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3023 		goto err2;
3024 	}
3025 
3026 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3027 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3028 	    mr->hw_mr.remote_atomic)
3029 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3030 
3031 	DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
3032 		 mr->ibmr.lkey);
3033 	return &mr->ibmr;
3034 
3035 err2:
3036 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3037 err1:
3038 	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3039 err0:
3040 	kfree(mr);
3041 	return ERR_PTR(rc);
3042 }
3043 
qedr_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3044 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3045 {
3046 	struct qedr_mr *mr = get_qedr_mr(ib_mr);
3047 	struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3048 	int rc = 0;
3049 
3050 	rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3051 	if (rc)
3052 		return rc;
3053 
3054 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3055 
3056 	if (mr->type != QEDR_MR_DMA)
3057 		free_mr_info(dev, &mr->info);
3058 
3059 	/* it could be user registered memory. */
3060 	ib_umem_release(mr->umem);
3061 
3062 	kfree(mr);
3063 
3064 	return rc;
3065 }
3066 
__qedr_alloc_mr(struct ib_pd * ibpd,int max_page_list_len)3067 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
3068 				       int max_page_list_len)
3069 {
3070 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3071 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3072 	struct qedr_mr *mr;
3073 	int rc = -ENOMEM;
3074 
3075 	DP_DEBUG(dev, QEDR_MSG_MR,
3076 		 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3077 		 max_page_list_len);
3078 
3079 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3080 	if (!mr)
3081 		return ERR_PTR(rc);
3082 
3083 	mr->dev = dev;
3084 	mr->type = QEDR_MR_FRMR;
3085 
3086 	rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3087 	if (rc)
3088 		goto err0;
3089 
3090 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3091 	if (rc) {
3092 		if (rc == -EINVAL)
3093 			DP_ERR(dev, "Out of MR resources\n");
3094 		else
3095 			DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3096 
3097 		goto err1;
3098 	}
3099 
3100 	/* Index only, 18 bit long, lkey = itid << 8 | key */
3101 	mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3102 	mr->hw_mr.key = 0;
3103 	mr->hw_mr.pd = pd->pd_id;
3104 	mr->hw_mr.local_read = 1;
3105 	mr->hw_mr.local_write = 0;
3106 	mr->hw_mr.remote_read = 0;
3107 	mr->hw_mr.remote_write = 0;
3108 	mr->hw_mr.remote_atomic = 0;
3109 	mr->hw_mr.mw_bind = false;
3110 	mr->hw_mr.pbl_ptr = 0;
3111 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3112 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3113 	mr->hw_mr.length = 0;
3114 	mr->hw_mr.vaddr = 0;
3115 	mr->hw_mr.phy_mr = true;
3116 	mr->hw_mr.dma_mr = false;
3117 
3118 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3119 	if (rc) {
3120 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3121 		goto err2;
3122 	}
3123 
3124 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3125 	mr->ibmr.rkey = mr->ibmr.lkey;
3126 
3127 	DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3128 	return mr;
3129 
3130 err2:
3131 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3132 err1:
3133 	qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3134 err0:
3135 	kfree(mr);
3136 	return ERR_PTR(rc);
3137 }
3138 
qedr_alloc_mr(struct ib_pd * ibpd,enum ib_mr_type mr_type,u32 max_num_sg)3139 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3140 			    u32 max_num_sg)
3141 {
3142 	struct qedr_mr *mr;
3143 
3144 	if (mr_type != IB_MR_TYPE_MEM_REG)
3145 		return ERR_PTR(-EINVAL);
3146 
3147 	mr = __qedr_alloc_mr(ibpd, max_num_sg);
3148 
3149 	if (IS_ERR(mr))
3150 		return ERR_PTR(-EINVAL);
3151 
3152 	return &mr->ibmr;
3153 }
3154 
qedr_set_page(struct ib_mr * ibmr,u64 addr)3155 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3156 {
3157 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3158 	struct qedr_pbl *pbl_table;
3159 	struct regpair *pbe;
3160 	u32 pbes_in_page;
3161 
3162 	if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3163 		DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3164 		return -ENOMEM;
3165 	}
3166 
3167 	DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3168 		 mr->npages, addr);
3169 
3170 	pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3171 	pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3172 	pbe = (struct regpair *)pbl_table->va;
3173 	pbe +=  mr->npages % pbes_in_page;
3174 	pbe->lo = cpu_to_le32((u32)addr);
3175 	pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3176 
3177 	mr->npages++;
3178 
3179 	return 0;
3180 }
3181 
handle_completed_mrs(struct qedr_dev * dev,struct mr_info * info)3182 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3183 {
3184 	int work = info->completed - info->completed_handled - 1;
3185 
3186 	DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3187 	while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3188 		struct qedr_pbl *pbl;
3189 
3190 		/* Free all the page list that are possible to be freed
3191 		 * (all the ones that were invalidated), under the assumption
3192 		 * that if an FMR was completed successfully that means that
3193 		 * if there was an invalidate operation before it also ended
3194 		 */
3195 		pbl = list_first_entry(&info->inuse_pbl_list,
3196 				       struct qedr_pbl, list_entry);
3197 		list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3198 		info->completed_handled++;
3199 	}
3200 }
3201 
qedr_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3202 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3203 		   int sg_nents, unsigned int *sg_offset)
3204 {
3205 	struct qedr_mr *mr = get_qedr_mr(ibmr);
3206 
3207 	mr->npages = 0;
3208 
3209 	handle_completed_mrs(mr->dev, &mr->info);
3210 	return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3211 }
3212 
qedr_get_dma_mr(struct ib_pd * ibpd,int acc)3213 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3214 {
3215 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3216 	struct qedr_pd *pd = get_qedr_pd(ibpd);
3217 	struct qedr_mr *mr;
3218 	int rc;
3219 
3220 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3221 	if (!mr)
3222 		return ERR_PTR(-ENOMEM);
3223 
3224 	mr->type = QEDR_MR_DMA;
3225 
3226 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3227 	if (rc) {
3228 		if (rc == -EINVAL)
3229 			DP_ERR(dev, "Out of MR resources\n");
3230 		else
3231 			DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3232 
3233 		goto err1;
3234 	}
3235 
3236 	/* index only, 18 bit long, lkey = itid << 8 | key */
3237 	mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3238 	mr->hw_mr.pd = pd->pd_id;
3239 	mr->hw_mr.local_read = 1;
3240 	mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3241 	mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3242 	mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3243 	mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3244 	mr->hw_mr.dma_mr = true;
3245 
3246 	rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3247 	if (rc) {
3248 		DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3249 		goto err2;
3250 	}
3251 
3252 	mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3253 	if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3254 	    mr->hw_mr.remote_atomic)
3255 		mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3256 
3257 	DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3258 	return &mr->ibmr;
3259 
3260 err2:
3261 	dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3262 err1:
3263 	kfree(mr);
3264 	return ERR_PTR(rc);
3265 }
3266 
qedr_wq_is_full(struct qedr_qp_hwq_info * wq)3267 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3268 {
3269 	return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3270 }
3271 
sge_data_len(struct ib_sge * sg_list,int num_sge)3272 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3273 {
3274 	int i, len = 0;
3275 
3276 	for (i = 0; i < num_sge; i++)
3277 		len += sg_list[i].length;
3278 
3279 	return len;
3280 }
3281 
swap_wqe_data64(u64 * p)3282 static void swap_wqe_data64(u64 *p)
3283 {
3284 	int i;
3285 
3286 	for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3287 		*p = cpu_to_be64(cpu_to_le64(*p));
3288 }
3289 
qedr_prepare_sq_inline_data(struct qedr_dev * dev,struct qedr_qp * qp,u8 * wqe_size,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr,u8 * bits,u8 bit)3290 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3291 				       struct qedr_qp *qp, u8 *wqe_size,
3292 				       const struct ib_send_wr *wr,
3293 				       const struct ib_send_wr **bad_wr,
3294 				       u8 *bits, u8 bit)
3295 {
3296 	u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3297 	char *seg_prt, *wqe;
3298 	int i, seg_siz;
3299 
3300 	if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3301 		DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3302 		*bad_wr = wr;
3303 		return 0;
3304 	}
3305 
3306 	if (!data_size)
3307 		return data_size;
3308 
3309 	*bits |= bit;
3310 
3311 	seg_prt = NULL;
3312 	wqe = NULL;
3313 	seg_siz = 0;
3314 
3315 	/* Copy data inline */
3316 	for (i = 0; i < wr->num_sge; i++) {
3317 		u32 len = wr->sg_list[i].length;
3318 		void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3319 
3320 		while (len > 0) {
3321 			u32 cur;
3322 
3323 			/* New segment required */
3324 			if (!seg_siz) {
3325 				wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3326 				seg_prt = wqe;
3327 				seg_siz = sizeof(struct rdma_sq_common_wqe);
3328 				(*wqe_size)++;
3329 			}
3330 
3331 			/* Calculate currently allowed length */
3332 			cur = min_t(u32, len, seg_siz);
3333 			memcpy(seg_prt, src, cur);
3334 
3335 			/* Update segment variables */
3336 			seg_prt += cur;
3337 			seg_siz -= cur;
3338 
3339 			/* Update sge variables */
3340 			src += cur;
3341 			len -= cur;
3342 
3343 			/* Swap fully-completed segments */
3344 			if (!seg_siz)
3345 				swap_wqe_data64((u64 *)wqe);
3346 		}
3347 	}
3348 
3349 	/* swap last not completed segment */
3350 	if (seg_siz)
3351 		swap_wqe_data64((u64 *)wqe);
3352 
3353 	return data_size;
3354 }
3355 
3356 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)			\
3357 	do {							\
3358 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3359 		(sge)->length = cpu_to_le32(vlength);		\
3360 		(sge)->flags = cpu_to_le32(vflags);		\
3361 	} while (0)
3362 
3363 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)			\
3364 	do {							\
3365 		DMA_REGPAIR_LE(hdr->wr_id, vwr_id);		\
3366 		(hdr)->num_sges = num_sge;			\
3367 	} while (0)
3368 
3369 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)			\
3370 	do {							\
3371 		DMA_REGPAIR_LE(sge->addr, vaddr);		\
3372 		(sge)->length = cpu_to_le32(vlength);		\
3373 		(sge)->l_key = cpu_to_le32(vlkey);		\
3374 	} while (0)
3375 
qedr_prepare_sq_sges(struct qedr_qp * qp,u8 * wqe_size,const struct ib_send_wr * wr)3376 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3377 				const struct ib_send_wr *wr)
3378 {
3379 	u32 data_size = 0;
3380 	int i;
3381 
3382 	for (i = 0; i < wr->num_sge; i++) {
3383 		struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3384 
3385 		DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3386 		sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3387 		sge->length = cpu_to_le32(wr->sg_list[i].length);
3388 		data_size += wr->sg_list[i].length;
3389 	}
3390 
3391 	if (wqe_size)
3392 		*wqe_size += wr->num_sge;
3393 
3394 	return data_size;
3395 }
3396 
qedr_prepare_sq_rdma_data(struct qedr_dev * dev,struct qedr_qp * qp,struct rdma_sq_rdma_wqe_1st * rwqe,struct rdma_sq_rdma_wqe_2nd * rwqe2,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3397 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3398 				     struct qedr_qp *qp,
3399 				     struct rdma_sq_rdma_wqe_1st *rwqe,
3400 				     struct rdma_sq_rdma_wqe_2nd *rwqe2,
3401 				     const struct ib_send_wr *wr,
3402 				     const struct ib_send_wr **bad_wr)
3403 {
3404 	rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3405 	DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3406 
3407 	if (wr->send_flags & IB_SEND_INLINE &&
3408 	    (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3409 	     wr->opcode == IB_WR_RDMA_WRITE)) {
3410 		u8 flags = 0;
3411 
3412 		SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3413 		return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3414 						   bad_wr, &rwqe->flags, flags);
3415 	}
3416 
3417 	return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3418 }
3419 
qedr_prepare_sq_send_data(struct qedr_dev * dev,struct qedr_qp * qp,struct rdma_sq_send_wqe_1st * swqe,struct rdma_sq_send_wqe_2st * swqe2,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3420 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3421 				     struct qedr_qp *qp,
3422 				     struct rdma_sq_send_wqe_1st *swqe,
3423 				     struct rdma_sq_send_wqe_2st *swqe2,
3424 				     const struct ib_send_wr *wr,
3425 				     const struct ib_send_wr **bad_wr)
3426 {
3427 	memset(swqe2, 0, sizeof(*swqe2));
3428 	if (wr->send_flags & IB_SEND_INLINE) {
3429 		u8 flags = 0;
3430 
3431 		SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3432 		return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3433 						   bad_wr, &swqe->flags, flags);
3434 	}
3435 
3436 	return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3437 }
3438 
qedr_prepare_reg(struct qedr_qp * qp,struct rdma_sq_fmr_wqe_1st * fwqe1,const struct ib_reg_wr * wr)3439 static int qedr_prepare_reg(struct qedr_qp *qp,
3440 			    struct rdma_sq_fmr_wqe_1st *fwqe1,
3441 			    const struct ib_reg_wr *wr)
3442 {
3443 	struct qedr_mr *mr = get_qedr_mr(wr->mr);
3444 	struct rdma_sq_fmr_wqe_2nd *fwqe2;
3445 
3446 	fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3447 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3448 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3449 	fwqe1->l_key = wr->key;
3450 
3451 	fwqe2->access_ctrl = 0;
3452 
3453 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3454 		   !!(wr->access & IB_ACCESS_REMOTE_READ));
3455 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3456 		   !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3457 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3458 		   !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3459 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3460 	SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3461 		   !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3462 	fwqe2->fmr_ctrl = 0;
3463 
3464 	SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3465 		   ilog2(mr->ibmr.page_size) - 12);
3466 
3467 	fwqe2->length_hi = 0;
3468 	fwqe2->length_lo = mr->ibmr.length;
3469 	fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3470 	fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3471 
3472 	qp->wqe_wr_id[qp->sq.prod].mr = mr;
3473 
3474 	return 0;
3475 }
3476 
qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)3477 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3478 {
3479 	switch (opcode) {
3480 	case IB_WR_RDMA_WRITE:
3481 	case IB_WR_RDMA_WRITE_WITH_IMM:
3482 		return IB_WC_RDMA_WRITE;
3483 	case IB_WR_SEND_WITH_IMM:
3484 	case IB_WR_SEND:
3485 	case IB_WR_SEND_WITH_INV:
3486 		return IB_WC_SEND;
3487 	case IB_WR_RDMA_READ:
3488 	case IB_WR_RDMA_READ_WITH_INV:
3489 		return IB_WC_RDMA_READ;
3490 	case IB_WR_ATOMIC_CMP_AND_SWP:
3491 		return IB_WC_COMP_SWAP;
3492 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3493 		return IB_WC_FETCH_ADD;
3494 	case IB_WR_REG_MR:
3495 		return IB_WC_REG_MR;
3496 	case IB_WR_LOCAL_INV:
3497 		return IB_WC_LOCAL_INV;
3498 	default:
3499 		return IB_WC_SEND;
3500 	}
3501 }
3502 
qedr_can_post_send(struct qedr_qp * qp,const struct ib_send_wr * wr)3503 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3504 				      const struct ib_send_wr *wr)
3505 {
3506 	int wq_is_full, err_wr, pbl_is_full;
3507 	struct qedr_dev *dev = qp->dev;
3508 
3509 	/* prevent SQ overflow and/or processing of a bad WR */
3510 	err_wr = wr->num_sge > qp->sq.max_sges;
3511 	wq_is_full = qedr_wq_is_full(&qp->sq);
3512 	pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3513 		      QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3514 	if (wq_is_full || err_wr || pbl_is_full) {
3515 		if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3516 			DP_ERR(dev,
3517 			       "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3518 			       qp);
3519 			qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3520 		}
3521 
3522 		if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3523 			DP_ERR(dev,
3524 			       "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3525 			       qp);
3526 			qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3527 		}
3528 
3529 		if (pbl_is_full &&
3530 		    !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3531 			DP_ERR(dev,
3532 			       "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3533 			       qp);
3534 			qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3535 		}
3536 		return false;
3537 	}
3538 	return true;
3539 }
3540 
__qedr_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3541 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3542 			    const struct ib_send_wr **bad_wr)
3543 {
3544 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3545 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3546 	struct rdma_sq_atomic_wqe_1st *awqe1;
3547 	struct rdma_sq_atomic_wqe_2nd *awqe2;
3548 	struct rdma_sq_atomic_wqe_3rd *awqe3;
3549 	struct rdma_sq_send_wqe_2st *swqe2;
3550 	struct rdma_sq_local_inv_wqe *iwqe;
3551 	struct rdma_sq_rdma_wqe_2nd *rwqe2;
3552 	struct rdma_sq_send_wqe_1st *swqe;
3553 	struct rdma_sq_rdma_wqe_1st *rwqe;
3554 	struct rdma_sq_fmr_wqe_1st *fwqe1;
3555 	struct rdma_sq_common_wqe *wqe;
3556 	u32 length;
3557 	int rc = 0;
3558 	bool comp;
3559 
3560 	if (!qedr_can_post_send(qp, wr)) {
3561 		*bad_wr = wr;
3562 		return -ENOMEM;
3563 	}
3564 
3565 	wqe = qed_chain_produce(&qp->sq.pbl);
3566 	qp->wqe_wr_id[qp->sq.prod].signaled =
3567 		!!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3568 
3569 	wqe->flags = 0;
3570 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3571 		   !!(wr->send_flags & IB_SEND_SOLICITED));
3572 	comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3573 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3574 	SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3575 		   !!(wr->send_flags & IB_SEND_FENCE));
3576 	wqe->prev_wqe_size = qp->prev_wqe_size;
3577 
3578 	qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3579 
3580 	switch (wr->opcode) {
3581 	case IB_WR_SEND_WITH_IMM:
3582 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3583 			rc = -EINVAL;
3584 			*bad_wr = wr;
3585 			break;
3586 		}
3587 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3588 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3589 		swqe->wqe_size = 2;
3590 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3591 
3592 		swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3593 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3594 						   wr, bad_wr);
3595 		swqe->length = cpu_to_le32(length);
3596 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3597 		qp->prev_wqe_size = swqe->wqe_size;
3598 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3599 		break;
3600 	case IB_WR_SEND:
3601 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3602 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3603 
3604 		swqe->wqe_size = 2;
3605 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3606 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3607 						   wr, bad_wr);
3608 		swqe->length = cpu_to_le32(length);
3609 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3610 		qp->prev_wqe_size = swqe->wqe_size;
3611 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3612 		break;
3613 	case IB_WR_SEND_WITH_INV:
3614 		wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3615 		swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3616 		swqe2 = qed_chain_produce(&qp->sq.pbl);
3617 		swqe->wqe_size = 2;
3618 		swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3619 		length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3620 						   wr, bad_wr);
3621 		swqe->length = cpu_to_le32(length);
3622 		qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3623 		qp->prev_wqe_size = swqe->wqe_size;
3624 		qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3625 		break;
3626 
3627 	case IB_WR_RDMA_WRITE_WITH_IMM:
3628 		if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3629 			rc = -EINVAL;
3630 			*bad_wr = wr;
3631 			break;
3632 		}
3633 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3634 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3635 
3636 		rwqe->wqe_size = 2;
3637 		rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3638 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3639 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3640 						   wr, bad_wr);
3641 		rwqe->length = cpu_to_le32(length);
3642 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3643 		qp->prev_wqe_size = rwqe->wqe_size;
3644 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3645 		break;
3646 	case IB_WR_RDMA_WRITE:
3647 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3648 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3649 
3650 		rwqe->wqe_size = 2;
3651 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3652 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3653 						   wr, bad_wr);
3654 		rwqe->length = cpu_to_le32(length);
3655 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3656 		qp->prev_wqe_size = rwqe->wqe_size;
3657 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3658 		break;
3659 	case IB_WR_RDMA_READ_WITH_INV:
3660 		SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3661 		fallthrough;	/* same is identical to RDMA READ */
3662 
3663 	case IB_WR_RDMA_READ:
3664 		wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3665 		rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3666 
3667 		rwqe->wqe_size = 2;
3668 		rwqe2 = qed_chain_produce(&qp->sq.pbl);
3669 		length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3670 						   wr, bad_wr);
3671 		rwqe->length = cpu_to_le32(length);
3672 		qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3673 		qp->prev_wqe_size = rwqe->wqe_size;
3674 		qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3675 		break;
3676 
3677 	case IB_WR_ATOMIC_CMP_AND_SWP:
3678 	case IB_WR_ATOMIC_FETCH_AND_ADD:
3679 		awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3680 		awqe1->wqe_size = 4;
3681 
3682 		awqe2 = qed_chain_produce(&qp->sq.pbl);
3683 		DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3684 		awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3685 
3686 		awqe3 = qed_chain_produce(&qp->sq.pbl);
3687 
3688 		if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3689 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3690 			DMA_REGPAIR_LE(awqe3->swap_data,
3691 				       atomic_wr(wr)->compare_add);
3692 		} else {
3693 			wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3694 			DMA_REGPAIR_LE(awqe3->swap_data,
3695 				       atomic_wr(wr)->swap);
3696 			DMA_REGPAIR_LE(awqe3->cmp_data,
3697 				       atomic_wr(wr)->compare_add);
3698 		}
3699 
3700 		qedr_prepare_sq_sges(qp, NULL, wr);
3701 
3702 		qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3703 		qp->prev_wqe_size = awqe1->wqe_size;
3704 		break;
3705 
3706 	case IB_WR_LOCAL_INV:
3707 		iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3708 		iwqe->wqe_size = 1;
3709 
3710 		iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3711 		iwqe->inv_l_key = wr->ex.invalidate_rkey;
3712 		qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3713 		qp->prev_wqe_size = iwqe->wqe_size;
3714 		break;
3715 	case IB_WR_REG_MR:
3716 		DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3717 		wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3718 		fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3719 		fwqe1->wqe_size = 2;
3720 
3721 		rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3722 		if (rc) {
3723 			DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3724 			*bad_wr = wr;
3725 			break;
3726 		}
3727 
3728 		qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3729 		qp->prev_wqe_size = fwqe1->wqe_size;
3730 		break;
3731 	default:
3732 		DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3733 		rc = -EINVAL;
3734 		*bad_wr = wr;
3735 		break;
3736 	}
3737 
3738 	if (*bad_wr) {
3739 		u16 value;
3740 
3741 		/* Restore prod to its position before
3742 		 * this WR was processed
3743 		 */
3744 		value = le16_to_cpu(qp->sq.db_data.data.value);
3745 		qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3746 
3747 		/* Restore prev_wqe_size */
3748 		qp->prev_wqe_size = wqe->prev_wqe_size;
3749 		rc = -EINVAL;
3750 		DP_ERR(dev, "POST SEND FAILED\n");
3751 	}
3752 
3753 	return rc;
3754 }
3755 
qedr_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)3756 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3757 		   const struct ib_send_wr **bad_wr)
3758 {
3759 	struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3760 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3761 	unsigned long flags;
3762 	int rc = 0;
3763 
3764 	*bad_wr = NULL;
3765 
3766 	if (qp->qp_type == IB_QPT_GSI)
3767 		return qedr_gsi_post_send(ibqp, wr, bad_wr);
3768 
3769 	spin_lock_irqsave(&qp->q_lock, flags);
3770 
3771 	if (rdma_protocol_roce(&dev->ibdev, 1)) {
3772 		if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3773 		    (qp->state != QED_ROCE_QP_STATE_ERR) &&
3774 		    (qp->state != QED_ROCE_QP_STATE_SQD)) {
3775 			spin_unlock_irqrestore(&qp->q_lock, flags);
3776 			*bad_wr = wr;
3777 			DP_DEBUG(dev, QEDR_MSG_CQ,
3778 				 "QP in wrong state! QP icid=0x%x state %d\n",
3779 				 qp->icid, qp->state);
3780 			return -EINVAL;
3781 		}
3782 	}
3783 
3784 	while (wr) {
3785 		rc = __qedr_post_send(ibqp, wr, bad_wr);
3786 		if (rc)
3787 			break;
3788 
3789 		qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3790 
3791 		qedr_inc_sw_prod(&qp->sq);
3792 
3793 		qp->sq.db_data.data.value++;
3794 
3795 		wr = wr->next;
3796 	}
3797 
3798 	/* Trigger doorbell
3799 	 * If there was a failure in the first WR then it will be triggered in
3800 	 * vane. However this is not harmful (as long as the producer value is
3801 	 * unchanged). For performance reasons we avoid checking for this
3802 	 * redundant doorbell.
3803 	 *
3804 	 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3805 	 * soon as we give the doorbell, we could get a completion
3806 	 * for this wr, therefore we need to make sure that the
3807 	 * memory is updated before giving the doorbell.
3808 	 * During qedr_poll_cq, rmb is called before accessing the
3809 	 * cqe. This covers for the smp_rmb as well.
3810 	 */
3811 	smp_wmb();
3812 	writel(qp->sq.db_data.raw, qp->sq.db);
3813 
3814 	spin_unlock_irqrestore(&qp->q_lock, flags);
3815 
3816 	return rc;
3817 }
3818 
qedr_srq_elem_left(struct qedr_srq_hwq_info * hw_srq)3819 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3820 {
3821 	u32 used;
3822 
3823 	/* Calculate number of elements used based on producer
3824 	 * count and consumer count and subtract it from max
3825 	 * work request supported so that we get elements left.
3826 	 */
3827 	used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3828 
3829 	return hw_srq->max_wr - used;
3830 }
3831 
qedr_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3832 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3833 		       const struct ib_recv_wr **bad_wr)
3834 {
3835 	struct qedr_srq *srq = get_qedr_srq(ibsrq);
3836 	struct qedr_srq_hwq_info *hw_srq;
3837 	struct qedr_dev *dev = srq->dev;
3838 	struct qed_chain *pbl;
3839 	unsigned long flags;
3840 	int status = 0;
3841 	u32 num_sge;
3842 
3843 	spin_lock_irqsave(&srq->lock, flags);
3844 
3845 	hw_srq = &srq->hw_srq;
3846 	pbl = &srq->hw_srq.pbl;
3847 	while (wr) {
3848 		struct rdma_srq_wqe_header *hdr;
3849 		int i;
3850 
3851 		if (!qedr_srq_elem_left(hw_srq) ||
3852 		    wr->num_sge > srq->hw_srq.max_sges) {
3853 			DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
3854 			       hw_srq->wr_prod_cnt,
3855 			       atomic_read(&hw_srq->wr_cons_cnt),
3856 			       wr->num_sge, srq->hw_srq.max_sges);
3857 			status = -ENOMEM;
3858 			*bad_wr = wr;
3859 			break;
3860 		}
3861 
3862 		hdr = qed_chain_produce(pbl);
3863 		num_sge = wr->num_sge;
3864 		/* Set number of sge and work request id in header */
3865 		SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3866 
3867 		srq->hw_srq.wr_prod_cnt++;
3868 		hw_srq->wqe_prod++;
3869 		hw_srq->sge_prod++;
3870 
3871 		DP_DEBUG(dev, QEDR_MSG_SRQ,
3872 			 "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3873 			 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3874 
3875 		for (i = 0; i < wr->num_sge; i++) {
3876 			struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3877 
3878 			/* Set SGE length, lkey and address */
3879 			SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3880 				    wr->sg_list[i].length, wr->sg_list[i].lkey);
3881 
3882 			DP_DEBUG(dev, QEDR_MSG_SRQ,
3883 				 "[%d]: len %d key %x addr %x:%x\n",
3884 				 i, srq_sge->length, srq_sge->l_key,
3885 				 srq_sge->addr.hi, srq_sge->addr.lo);
3886 			hw_srq->sge_prod++;
3887 		}
3888 
3889 		/* Update WQE and SGE information before
3890 		 * updating producer.
3891 		 */
3892 		dma_wmb();
3893 
3894 		/* SRQ producer is 8 bytes. Need to update SGE producer index
3895 		 * in first 4 bytes and need to update WQE producer in
3896 		 * next 4 bytes.
3897 		 */
3898 		srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3899 		/* Make sure sge producer is updated first */
3900 		dma_wmb();
3901 		srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3902 
3903 		wr = wr->next;
3904 	}
3905 
3906 	DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3907 		 qed_chain_get_elem_left(pbl));
3908 	spin_unlock_irqrestore(&srq->lock, flags);
3909 
3910 	return status;
3911 }
3912 
qedr_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)3913 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3914 		   const struct ib_recv_wr **bad_wr)
3915 {
3916 	struct qedr_qp *qp = get_qedr_qp(ibqp);
3917 	struct qedr_dev *dev = qp->dev;
3918 	unsigned long flags;
3919 	int status = 0;
3920 
3921 	if (qp->qp_type == IB_QPT_GSI)
3922 		return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3923 
3924 	spin_lock_irqsave(&qp->q_lock, flags);
3925 
3926 	while (wr) {
3927 		int i;
3928 
3929 		if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3930 		    QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3931 		    wr->num_sge > qp->rq.max_sges) {
3932 			DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3933 			       qed_chain_get_elem_left_u32(&qp->rq.pbl),
3934 			       QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3935 			       qp->rq.max_sges);
3936 			status = -ENOMEM;
3937 			*bad_wr = wr;
3938 			break;
3939 		}
3940 		for (i = 0; i < wr->num_sge; i++) {
3941 			u32 flags = 0;
3942 			struct rdma_rq_sge *rqe =
3943 			    qed_chain_produce(&qp->rq.pbl);
3944 
3945 			/* First one must include the number
3946 			 * of SGE in the list
3947 			 */
3948 			if (!i)
3949 				SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3950 					  wr->num_sge);
3951 
3952 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3953 				  wr->sg_list[i].lkey);
3954 
3955 			RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3956 				   wr->sg_list[i].length, flags);
3957 		}
3958 
3959 		/* Special case of no sges. FW requires between 1-4 sges...
3960 		 * in this case we need to post 1 sge with length zero. this is
3961 		 * because rdma write with immediate consumes an RQ.
3962 		 */
3963 		if (!wr->num_sge) {
3964 			u32 flags = 0;
3965 			struct rdma_rq_sge *rqe =
3966 			    qed_chain_produce(&qp->rq.pbl);
3967 
3968 			/* First one must include the number
3969 			 * of SGE in the list
3970 			 */
3971 			SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3972 			SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3973 
3974 			RQ_SGE_SET(rqe, 0, 0, flags);
3975 			i = 1;
3976 		}
3977 
3978 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3979 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3980 
3981 		qedr_inc_sw_prod(&qp->rq);
3982 
3983 		/* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3984 		 * soon as we give the doorbell, we could get a completion
3985 		 * for this wr, therefore we need to make sure that the
3986 		 * memory is update before giving the doorbell.
3987 		 * During qedr_poll_cq, rmb is called before accessing the
3988 		 * cqe. This covers for the smp_rmb as well.
3989 		 */
3990 		smp_wmb();
3991 
3992 		qp->rq.db_data.data.value++;
3993 
3994 		writel(qp->rq.db_data.raw, qp->rq.db);
3995 
3996 		if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3997 			writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3998 		}
3999 
4000 		wr = wr->next;
4001 	}
4002 
4003 	spin_unlock_irqrestore(&qp->q_lock, flags);
4004 
4005 	return status;
4006 }
4007 
is_valid_cqe(struct qedr_cq * cq,union rdma_cqe * cqe)4008 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
4009 {
4010 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4011 
4012 	return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4013 		cq->pbl_toggle;
4014 }
4015 
cqe_get_qp(union rdma_cqe * cqe)4016 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
4017 {
4018 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4019 	struct qedr_qp *qp;
4020 
4021 	qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4022 						   resp_cqe->qp_handle.lo,
4023 						   u64);
4024 	return qp;
4025 }
4026 
cqe_get_type(union rdma_cqe * cqe)4027 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
4028 {
4029 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
4030 
4031 	return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4032 }
4033 
4034 /* Return latest CQE (needs processing) */
get_cqe(struct qedr_cq * cq)4035 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
4036 {
4037 	return cq->latest_cqe;
4038 }
4039 
4040 /* In fmr we need to increase the number of fmr completed counter for the fmr
4041  * algorithm determining whether we can free a pbl or not.
4042  * we need to perform this whether the work request was signaled or not. for
4043  * this purpose we call this function from the condition that checks if a wr
4044  * should be skipped, to make sure we don't miss it ( possibly this fmr
4045  * operation was not signalted)
4046  */
qedr_chk_if_fmr(struct qedr_qp * qp)4047 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
4048 {
4049 	if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4050 		qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4051 }
4052 
process_req(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,u16 hw_cons,enum ib_wc_status status,int force)4053 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
4054 		       struct qedr_cq *cq, int num_entries,
4055 		       struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
4056 		       int force)
4057 {
4058 	u16 cnt = 0;
4059 
4060 	while (num_entries && qp->sq.wqe_cons != hw_cons) {
4061 		if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4062 			qedr_chk_if_fmr(qp);
4063 			/* skip WC */
4064 			goto next_cqe;
4065 		}
4066 
4067 		/* fill WC */
4068 		wc->status = status;
4069 		wc->vendor_err = 0;
4070 		wc->wc_flags = 0;
4071 		wc->src_qp = qp->id;
4072 		wc->qp = &qp->ibqp;
4073 
4074 		wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4075 		wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4076 
4077 		switch (wc->opcode) {
4078 		case IB_WC_RDMA_WRITE:
4079 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4080 			break;
4081 		case IB_WC_COMP_SWAP:
4082 		case IB_WC_FETCH_ADD:
4083 			wc->byte_len = 8;
4084 			break;
4085 		case IB_WC_REG_MR:
4086 			qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4087 			break;
4088 		case IB_WC_RDMA_READ:
4089 		case IB_WC_SEND:
4090 			wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4091 			break;
4092 		default:
4093 			break;
4094 		}
4095 
4096 		num_entries--;
4097 		wc++;
4098 		cnt++;
4099 next_cqe:
4100 		while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4101 			qed_chain_consume(&qp->sq.pbl);
4102 		qedr_inc_sw_cons(&qp->sq);
4103 	}
4104 
4105 	return cnt;
4106 }
4107 
qedr_poll_cq_req(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_requester * req)4108 static int qedr_poll_cq_req(struct qedr_dev *dev,
4109 			    struct qedr_qp *qp, struct qedr_cq *cq,
4110 			    int num_entries, struct ib_wc *wc,
4111 			    struct rdma_cqe_requester *req)
4112 {
4113 	int cnt = 0;
4114 
4115 	switch (req->status) {
4116 	case RDMA_CQE_REQ_STS_OK:
4117 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4118 				  IB_WC_SUCCESS, 0);
4119 		break;
4120 	case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
4121 		if (qp->state != QED_ROCE_QP_STATE_ERR)
4122 			DP_DEBUG(dev, QEDR_MSG_CQ,
4123 				 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4124 				 cq->icid, qp->icid);
4125 		cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4126 				  IB_WC_WR_FLUSH_ERR, 1);
4127 		break;
4128 	default:
4129 		/* process all WQE before the cosumer */
4130 		qp->state = QED_ROCE_QP_STATE_ERR;
4131 		cnt = process_req(dev, qp, cq, num_entries, wc,
4132 				  req->sq_cons - 1, IB_WC_SUCCESS, 0);
4133 		wc += cnt;
4134 		/* if we have extra WC fill it with actual error info */
4135 		if (cnt < num_entries) {
4136 			enum ib_wc_status wc_status;
4137 
4138 			switch (req->status) {
4139 			case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4140 				DP_ERR(dev,
4141 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4142 				       cq->icid, qp->icid);
4143 				wc_status = IB_WC_BAD_RESP_ERR;
4144 				break;
4145 			case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4146 				DP_ERR(dev,
4147 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4148 				       cq->icid, qp->icid);
4149 				wc_status = IB_WC_LOC_LEN_ERR;
4150 				break;
4151 			case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4152 				DP_ERR(dev,
4153 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4154 				       cq->icid, qp->icid);
4155 				wc_status = IB_WC_LOC_QP_OP_ERR;
4156 				break;
4157 			case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4158 				DP_ERR(dev,
4159 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4160 				       cq->icid, qp->icid);
4161 				wc_status = IB_WC_LOC_PROT_ERR;
4162 				break;
4163 			case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4164 				DP_ERR(dev,
4165 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4166 				       cq->icid, qp->icid);
4167 				wc_status = IB_WC_MW_BIND_ERR;
4168 				break;
4169 			case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4170 				DP_ERR(dev,
4171 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4172 				       cq->icid, qp->icid);
4173 				wc_status = IB_WC_REM_INV_REQ_ERR;
4174 				break;
4175 			case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4176 				DP_ERR(dev,
4177 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4178 				       cq->icid, qp->icid);
4179 				wc_status = IB_WC_REM_ACCESS_ERR;
4180 				break;
4181 			case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4182 				DP_ERR(dev,
4183 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4184 				       cq->icid, qp->icid);
4185 				wc_status = IB_WC_REM_OP_ERR;
4186 				break;
4187 			case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4188 				DP_ERR(dev,
4189 				       "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4190 				       cq->icid, qp->icid);
4191 				wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4192 				break;
4193 			case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4194 				DP_ERR(dev,
4195 				       "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4196 				       cq->icid, qp->icid);
4197 				wc_status = IB_WC_RETRY_EXC_ERR;
4198 				break;
4199 			default:
4200 				DP_ERR(dev,
4201 				       "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4202 				       cq->icid, qp->icid);
4203 				wc_status = IB_WC_GENERAL_ERR;
4204 			}
4205 			cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4206 					   wc_status, 1);
4207 		}
4208 	}
4209 
4210 	return cnt;
4211 }
4212 
qedr_cqe_resp_status_to_ib(u8 status)4213 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4214 {
4215 	switch (status) {
4216 	case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4217 		return IB_WC_LOC_ACCESS_ERR;
4218 	case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4219 		return IB_WC_LOC_LEN_ERR;
4220 	case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4221 		return IB_WC_LOC_QP_OP_ERR;
4222 	case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4223 		return IB_WC_LOC_PROT_ERR;
4224 	case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4225 		return IB_WC_MW_BIND_ERR;
4226 	case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4227 		return IB_WC_REM_INV_RD_REQ_ERR;
4228 	case RDMA_CQE_RESP_STS_OK:
4229 		return IB_WC_SUCCESS;
4230 	default:
4231 		return IB_WC_GENERAL_ERR;
4232 	}
4233 }
4234 
qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder * resp,struct ib_wc * wc)4235 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4236 					  struct ib_wc *wc)
4237 {
4238 	wc->status = IB_WC_SUCCESS;
4239 	wc->byte_len = le32_to_cpu(resp->length);
4240 
4241 	if (resp->flags & QEDR_RESP_IMM) {
4242 		wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4243 		wc->wc_flags |= IB_WC_WITH_IMM;
4244 
4245 		if (resp->flags & QEDR_RESP_RDMA)
4246 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4247 
4248 		if (resp->flags & QEDR_RESP_INV)
4249 			return -EINVAL;
4250 
4251 	} else if (resp->flags & QEDR_RESP_INV) {
4252 		wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4253 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4254 
4255 		if (resp->flags & QEDR_RESP_RDMA)
4256 			return -EINVAL;
4257 
4258 	} else if (resp->flags & QEDR_RESP_RDMA) {
4259 		return -EINVAL;
4260 	}
4261 
4262 	return 0;
4263 }
4264 
__process_resp_one(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp,u64 wr_id)4265 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4266 			       struct qedr_cq *cq, struct ib_wc *wc,
4267 			       struct rdma_cqe_responder *resp, u64 wr_id)
4268 {
4269 	/* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4270 	wc->opcode = IB_WC_RECV;
4271 	wc->wc_flags = 0;
4272 
4273 	if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4274 		if (qedr_set_ok_cqe_resp_wc(resp, wc))
4275 			DP_ERR(dev,
4276 			       "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4277 			       cq, cq->icid, resp->flags);
4278 
4279 	} else {
4280 		wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4281 		if (wc->status == IB_WC_GENERAL_ERR)
4282 			DP_ERR(dev,
4283 			       "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4284 			       cq, cq->icid, resp->status);
4285 	}
4286 
4287 	/* Fill the rest of the WC */
4288 	wc->vendor_err = 0;
4289 	wc->src_qp = qp->id;
4290 	wc->qp = &qp->ibqp;
4291 	wc->wr_id = wr_id;
4292 }
4293 
process_resp_one_srq(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp)4294 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4295 				struct qedr_cq *cq, struct ib_wc *wc,
4296 				struct rdma_cqe_responder *resp)
4297 {
4298 	struct qedr_srq *srq = qp->srq;
4299 	u64 wr_id;
4300 
4301 	wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4302 			 le32_to_cpu(resp->srq_wr_id.lo), u64);
4303 
4304 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4305 		wc->status = IB_WC_WR_FLUSH_ERR;
4306 		wc->vendor_err = 0;
4307 		wc->wr_id = wr_id;
4308 		wc->byte_len = 0;
4309 		wc->src_qp = qp->id;
4310 		wc->qp = &qp->ibqp;
4311 		wc->wr_id = wr_id;
4312 	} else {
4313 		__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4314 	}
4315 	atomic_inc(&srq->hw_srq.wr_cons_cnt);
4316 
4317 	return 1;
4318 }
process_resp_one(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,struct ib_wc * wc,struct rdma_cqe_responder * resp)4319 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4320 			    struct qedr_cq *cq, struct ib_wc *wc,
4321 			    struct rdma_cqe_responder *resp)
4322 {
4323 	u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4324 
4325 	__process_resp_one(dev, qp, cq, wc, resp, wr_id);
4326 
4327 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4328 		qed_chain_consume(&qp->rq.pbl);
4329 	qedr_inc_sw_cons(&qp->rq);
4330 
4331 	return 1;
4332 }
4333 
process_resp_flush(struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,u16 hw_cons)4334 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4335 			      int num_entries, struct ib_wc *wc, u16 hw_cons)
4336 {
4337 	u16 cnt = 0;
4338 
4339 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
4340 		/* fill WC */
4341 		wc->status = IB_WC_WR_FLUSH_ERR;
4342 		wc->vendor_err = 0;
4343 		wc->wc_flags = 0;
4344 		wc->src_qp = qp->id;
4345 		wc->byte_len = 0;
4346 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4347 		wc->qp = &qp->ibqp;
4348 		num_entries--;
4349 		wc++;
4350 		cnt++;
4351 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4352 			qed_chain_consume(&qp->rq.pbl);
4353 		qedr_inc_sw_cons(&qp->rq);
4354 	}
4355 
4356 	return cnt;
4357 }
4358 
try_consume_resp_cqe(struct qedr_cq * cq,struct qedr_qp * qp,struct rdma_cqe_responder * resp,int * update)4359 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4360 				 struct rdma_cqe_responder *resp, int *update)
4361 {
4362 	if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4363 		consume_cqe(cq);
4364 		*update |= 1;
4365 	}
4366 }
4367 
qedr_poll_cq_resp_srq(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_responder * resp)4368 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4369 				 struct qedr_cq *cq, int num_entries,
4370 				 struct ib_wc *wc,
4371 				 struct rdma_cqe_responder *resp)
4372 {
4373 	int cnt;
4374 
4375 	cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4376 	consume_cqe(cq);
4377 
4378 	return cnt;
4379 }
4380 
qedr_poll_cq_resp(struct qedr_dev * dev,struct qedr_qp * qp,struct qedr_cq * cq,int num_entries,struct ib_wc * wc,struct rdma_cqe_responder * resp,int * update)4381 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4382 			     struct qedr_cq *cq, int num_entries,
4383 			     struct ib_wc *wc, struct rdma_cqe_responder *resp,
4384 			     int *update)
4385 {
4386 	int cnt;
4387 
4388 	if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4389 		cnt = process_resp_flush(qp, cq, num_entries, wc,
4390 					 resp->rq_cons_or_srq_id);
4391 		try_consume_resp_cqe(cq, qp, resp, update);
4392 	} else {
4393 		cnt = process_resp_one(dev, qp, cq, wc, resp);
4394 		consume_cqe(cq);
4395 		*update |= 1;
4396 	}
4397 
4398 	return cnt;
4399 }
4400 
try_consume_req_cqe(struct qedr_cq * cq,struct qedr_qp * qp,struct rdma_cqe_requester * req,int * update)4401 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4402 				struct rdma_cqe_requester *req, int *update)
4403 {
4404 	if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4405 		consume_cqe(cq);
4406 		*update |= 1;
4407 	}
4408 }
4409 
qedr_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)4410 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4411 {
4412 	struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4413 	struct qedr_cq *cq = get_qedr_cq(ibcq);
4414 	union rdma_cqe *cqe;
4415 	u32 old_cons, new_cons;
4416 	unsigned long flags;
4417 	int update = 0;
4418 	int done = 0;
4419 
4420 	if (cq->destroyed) {
4421 		DP_ERR(dev,
4422 		       "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4423 		       cq, cq->icid);
4424 		return 0;
4425 	}
4426 
4427 	if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4428 		return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4429 
4430 	spin_lock_irqsave(&cq->cq_lock, flags);
4431 	cqe = cq->latest_cqe;
4432 	old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4433 	while (num_entries && is_valid_cqe(cq, cqe)) {
4434 		struct qedr_qp *qp;
4435 		int cnt = 0;
4436 
4437 		/* prevent speculative reads of any field of CQE */
4438 		rmb();
4439 
4440 		qp = cqe_get_qp(cqe);
4441 		if (!qp) {
4442 			WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4443 			break;
4444 		}
4445 
4446 		wc->qp = &qp->ibqp;
4447 
4448 		switch (cqe_get_type(cqe)) {
4449 		case RDMA_CQE_TYPE_REQUESTER:
4450 			cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4451 					       &cqe->req);
4452 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
4453 			break;
4454 		case RDMA_CQE_TYPE_RESPONDER_RQ:
4455 			cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4456 						&cqe->resp, &update);
4457 			break;
4458 		case RDMA_CQE_TYPE_RESPONDER_SRQ:
4459 			cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4460 						    wc, &cqe->resp);
4461 			update = 1;
4462 			break;
4463 		case RDMA_CQE_TYPE_INVALID:
4464 		default:
4465 			DP_ERR(dev, "Error: invalid CQE type = %d\n",
4466 			       cqe_get_type(cqe));
4467 		}
4468 		num_entries -= cnt;
4469 		wc += cnt;
4470 		done += cnt;
4471 
4472 		cqe = get_cqe(cq);
4473 	}
4474 	new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4475 
4476 	cq->cq_cons += new_cons - old_cons;
4477 
4478 	if (update)
4479 		/* doorbell notifies abount latest VALID entry,
4480 		 * but chain already point to the next INVALID one
4481 		 */
4482 		doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4483 
4484 	spin_unlock_irqrestore(&cq->cq_lock, flags);
4485 	return done;
4486 }
4487 
qedr_process_mad(struct ib_device * ibdev,int process_mad_flags,u32 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in,struct ib_mad * out_mad,size_t * out_mad_size,u16 * out_mad_pkey_index)4488 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4489 		     u32 port_num, const struct ib_wc *in_wc,
4490 		     const struct ib_grh *in_grh, const struct ib_mad *in,
4491 		     struct ib_mad *out_mad, size_t *out_mad_size,
4492 		     u16 *out_mad_pkey_index)
4493 {
4494 	return IB_MAD_RESULT_SUCCESS;
4495 }
4496