xref: /linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50 
51 #include "roce_hsi.h"
52 
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59 
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 	qp->sq.condition = false;
63 	qp->sq.send_phantom = false;
64 	qp->sq.single = false;
65 }
66 
67 /* Flush list */
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 	struct bnxt_qplib_cq *scq, *rcq;
71 
72 	scq = qp->scq;
73 	rcq = qp->rcq;
74 
75 	if (!qp->sq.flushed) {
76 		dev_dbg(&scq->hwq.pdev->dev,
77 			"FP: Adding to SQ Flush list = %p\n", qp);
78 		bnxt_qplib_cancel_phantom_processing(qp);
79 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 		qp->sq.flushed = true;
81 	}
82 	if (!qp->srq) {
83 		if (!qp->rq.flushed) {
84 			dev_dbg(&rcq->hwq.pdev->dev,
85 				"FP: Adding to RQ Flush list = %p\n", qp);
86 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 			qp->rq.flushed = true;
88 		}
89 	}
90 }
91 
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 				       unsigned long *flags)
94 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 	if (qp->scq == qp->rcq)
98 		__acquire(&qp->rcq->flush_lock);
99 	else
100 		spin_lock(&qp->rcq->flush_lock);
101 }
102 
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 				       unsigned long *flags)
105 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 	if (qp->scq == qp->rcq)
108 		__release(&qp->rcq->flush_lock);
109 	else
110 		spin_unlock(&qp->rcq->flush_lock);
111 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113 
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 	unsigned long flags;
117 
118 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 	__bnxt_qplib_add_flush_qp(qp);
120 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122 
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 	if (qp->sq.flushed) {
126 		qp->sq.flushed = false;
127 		list_del(&qp->sq_flush);
128 	}
129 	if (!qp->srq) {
130 		if (qp->rq.flushed) {
131 			qp->rq.flushed = false;
132 			list_del(&qp->rq_flush);
133 		}
134 	}
135 }
136 
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 	unsigned long flags;
140 
141 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143 	qp->sq.hwq.prod = 0;
144 	qp->sq.hwq.cons = 0;
145 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 	qp->rq.hwq.prod = 0;
147 	qp->rq.hwq.cons = 0;
148 
149 	__bnxt_qplib_del_flush_qp(qp);
150 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152 
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 	struct bnxt_qplib_nq_work *nq_work =
156 			container_of(work, struct bnxt_qplib_nq_work, work);
157 
158 	struct bnxt_qplib_cq *cq = nq_work->cq;
159 	struct bnxt_qplib_nq *nq = nq_work->nq;
160 
161 	if (cq && nq) {
162 		spin_lock_bh(&cq->compl_lock);
163 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 			dev_dbg(&nq->pdev->dev,
165 				"%s:Trigger cq  = %p event nq = %p\n",
166 				__func__, cq, nq);
167 			nq->cqn_handler(nq, cq);
168 		}
169 		spin_unlock_bh(&cq->compl_lock);
170 	}
171 	kfree(nq_work);
172 }
173 
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 				       struct bnxt_qplib_qp *qp)
176 {
177 	struct bnxt_qplib_q *rq = &qp->rq;
178 	struct bnxt_qplib_q *sq = &qp->sq;
179 
180 	if (qp->rq_hdr_buf)
181 		dma_free_coherent(&res->pdev->dev,
182 				  rq->max_wqe * qp->rq_hdr_buf_size,
183 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 	if (qp->sq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  sq->max_wqe * qp->sq_hdr_buf_size,
187 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 	qp->rq_hdr_buf = NULL;
189 	qp->sq_hdr_buf = NULL;
190 	qp->rq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_map = 0;
192 	qp->sq_hdr_buf_size = 0;
193 	qp->rq_hdr_buf_size = 0;
194 }
195 
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 				       struct bnxt_qplib_qp *qp)
198 {
199 	struct bnxt_qplib_q *rq = &qp->rq;
200 	struct bnxt_qplib_q *sq = &qp->sq;
201 	int rc = 0;
202 
203 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 					sq->max_wqe * qp->sq_hdr_buf_size,
206 					&qp->sq_hdr_buf_map, GFP_KERNEL);
207 		if (!qp->sq_hdr_buf) {
208 			rc = -ENOMEM;
209 			dev_err(&res->pdev->dev,
210 				"Failed to create sq_hdr_buf\n");
211 			goto fail;
212 		}
213 	}
214 
215 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 						    rq->max_wqe *
218 						    qp->rq_hdr_buf_size,
219 						    &qp->rq_hdr_buf_map,
220 						    GFP_KERNEL);
221 		if (!qp->rq_hdr_buf) {
222 			rc = -ENOMEM;
223 			dev_err(&res->pdev->dev,
224 				"Failed to create rq_hdr_buf\n");
225 			goto fail;
226 		}
227 	}
228 	return 0;
229 
230 fail:
231 	bnxt_qplib_free_qp_hdr_buf(res, qp);
232 	return rc;
233 }
234 
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 	struct nq_base *nqe, **nq_ptr;
239 	int budget = nq->budget;
240 	u32 sw_cons, raw_cons;
241 	uintptr_t q_handle;
242 	u16 type;
243 
244 	spin_lock_bh(&hwq->lock);
245 	/* Service the NQ until empty */
246 	raw_cons = hwq->cons;
247 	while (budget--) {
248 		sw_cons = HWQ_CMP(raw_cons, hwq);
249 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250 		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252 			break;
253 
254 		/*
255 		 * The valid test of the entry must be done first before
256 		 * reading any further.
257 		 */
258 		dma_rmb();
259 
260 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261 		switch (type) {
262 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
263 		{
264 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
265 
266 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
267 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268 						     << 32;
269 			if ((unsigned long)cq == q_handle) {
270 				nqcne->cq_handle_low = 0;
271 				nqcne->cq_handle_high = 0;
272 				cq->cnq_events++;
273 			}
274 			break;
275 		}
276 		default:
277 			break;
278 		}
279 		raw_cons++;
280 	}
281 	spin_unlock_bh(&hwq->lock);
282 }
283 
284 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285  * this CQ.
286  */
287 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
288 {
289 	u32 retry_cnt = 100;
290 
291 	while (retry_cnt--) {
292 		if (cnq_events == cq->cnq_events)
293 			return;
294 		usleep_range(50, 100);
295 		clean_nq(cq->nq, cq);
296 	}
297 }
298 
299 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
300 {
301 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
303 	int num_srqne_processed = 0;
304 	int num_cqne_processed = 0;
305 	struct bnxt_qplib_cq *cq;
306 	int budget = nq->budget;
307 	u32 sw_cons, raw_cons;
308 	struct nq_base *nqe;
309 	uintptr_t q_handle;
310 	u16 type;
311 
312 	spin_lock_bh(&hwq->lock);
313 	/* Service the NQ until empty */
314 	raw_cons = hwq->cons;
315 	while (budget--) {
316 		sw_cons = HWQ_CMP(raw_cons, hwq);
317 		nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
318 		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
319 			break;
320 
321 		/*
322 		 * The valid test of the entry must be done first before
323 		 * reading any further.
324 		 */
325 		dma_rmb();
326 
327 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
328 		switch (type) {
329 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
330 		{
331 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
332 
333 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
334 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
335 						     << 32;
336 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
337 			if (!cq)
338 				break;
339 			bnxt_qplib_armen_db(&cq->dbinfo,
340 					    DBC_DBC_TYPE_CQ_ARMENA);
341 			spin_lock_bh(&cq->compl_lock);
342 			atomic_set(&cq->arm_state, 0);
343 			if (!nq->cqn_handler(nq, (cq)))
344 				num_cqne_processed++;
345 			else
346 				dev_warn(&nq->pdev->dev,
347 					 "cqn - type 0x%x not handled\n", type);
348 			cq->cnq_events++;
349 			spin_unlock_bh(&cq->compl_lock);
350 			break;
351 		}
352 		case NQ_BASE_TYPE_SRQ_EVENT:
353 		{
354 			struct bnxt_qplib_srq *srq;
355 			struct nq_srq_event *nqsrqe =
356 						(struct nq_srq_event *)nqe;
357 
358 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
359 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
360 				     << 32;
361 			srq = (struct bnxt_qplib_srq *)q_handle;
362 			bnxt_qplib_armen_db(&srq->dbinfo,
363 					    DBC_DBC_TYPE_SRQ_ARMENA);
364 			if (!nq->srqn_handler(nq,
365 					      (struct bnxt_qplib_srq *)q_handle,
366 					      nqsrqe->event))
367 				num_srqne_processed++;
368 			else
369 				dev_warn(&nq->pdev->dev,
370 					 "SRQ event 0x%x not handled\n",
371 					 nqsrqe->event);
372 			break;
373 		}
374 		case NQ_BASE_TYPE_DBQ_EVENT:
375 			break;
376 		default:
377 			dev_warn(&nq->pdev->dev,
378 				 "nqe with type = 0x%x not handled\n", type);
379 			break;
380 		}
381 		raw_cons++;
382 	}
383 	if (hwq->cons != raw_cons) {
384 		hwq->cons = raw_cons;
385 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
386 	}
387 	spin_unlock_bh(&hwq->lock);
388 }
389 
390 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
391 {
392 	struct bnxt_qplib_nq *nq = dev_instance;
393 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
394 	u32 sw_cons;
395 
396 	/* Prefetch the NQ element */
397 	sw_cons = HWQ_CMP(hwq->cons, hwq);
398 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
399 
400 	/* Fan out to CPU affinitized kthreads? */
401 	tasklet_schedule(&nq->nq_tasklet);
402 
403 	return IRQ_HANDLED;
404 }
405 
406 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
407 {
408 	tasklet_disable(&nq->nq_tasklet);
409 	/* Mask h/w interrupt */
410 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
411 	/* Sync with last running IRQ handler */
412 	synchronize_irq(nq->msix_vec);
413 	if (kill)
414 		tasklet_kill(&nq->nq_tasklet);
415 	if (nq->requested) {
416 		irq_set_affinity_hint(nq->msix_vec, NULL);
417 		free_irq(nq->msix_vec, nq);
418 		nq->requested = false;
419 	}
420 }
421 
422 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
423 {
424 	if (nq->cqn_wq) {
425 		destroy_workqueue(nq->cqn_wq);
426 		nq->cqn_wq = NULL;
427 	}
428 
429 	/* Make sure the HW is stopped! */
430 	bnxt_qplib_nq_stop_irq(nq, true);
431 
432 	if (nq->nq_db.reg.bar_reg) {
433 		iounmap(nq->nq_db.reg.bar_reg);
434 		nq->nq_db.reg.bar_reg = NULL;
435 	}
436 
437 	nq->cqn_handler = NULL;
438 	nq->srqn_handler = NULL;
439 	nq->msix_vec = 0;
440 }
441 
442 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
443 			    int msix_vector, bool need_init)
444 {
445 	int rc;
446 
447 	if (nq->requested)
448 		return -EFAULT;
449 
450 	nq->msix_vec = msix_vector;
451 	if (need_init)
452 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
453 	else
454 		tasklet_enable(&nq->nq_tasklet);
455 
456 	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
457 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
458 	if (rc)
459 		return rc;
460 
461 	cpumask_clear(&nq->mask);
462 	cpumask_set_cpu(nq_indx, &nq->mask);
463 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
464 	if (rc) {
465 		dev_warn(&nq->pdev->dev,
466 			 "set affinity failed; vector: %d nq_idx: %d\n",
467 			 nq->msix_vec, nq_indx);
468 	}
469 	nq->requested = true;
470 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
471 
472 	return rc;
473 }
474 
475 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
476 {
477 	resource_size_t reg_base;
478 	struct bnxt_qplib_nq_db *nq_db;
479 	struct pci_dev *pdev;
480 	int rc = 0;
481 
482 	pdev = nq->pdev;
483 	nq_db = &nq->nq_db;
484 
485 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
486 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
487 	if (!nq_db->reg.bar_base) {
488 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
489 			nq_db->reg.bar_id);
490 		rc = -ENOMEM;
491 		goto fail;
492 	}
493 
494 	reg_base = nq_db->reg.bar_base + reg_offt;
495 	/* Unconditionally map 8 bytes to support 57500 series */
496 	nq_db->reg.len = 8;
497 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
498 	if (!nq_db->reg.bar_reg) {
499 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
500 			nq_db->reg.bar_id);
501 		rc = -ENOMEM;
502 		goto fail;
503 	}
504 
505 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
506 	nq_db->dbinfo.hwq = &nq->hwq;
507 	nq_db->dbinfo.xid = nq->ring_id;
508 fail:
509 	return rc;
510 }
511 
512 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
513 			 int nq_idx, int msix_vector, int bar_reg_offset,
514 			 cqn_handler_t cqn_handler,
515 			 srqn_handler_t srqn_handler)
516 {
517 	int rc = -1;
518 
519 	nq->pdev = pdev;
520 	nq->cqn_handler = cqn_handler;
521 	nq->srqn_handler = srqn_handler;
522 
523 	/* Have a task to schedule CQ notifiers in post send case */
524 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
525 	if (!nq->cqn_wq)
526 		return -ENOMEM;
527 
528 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
529 	if (rc)
530 		goto fail;
531 
532 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
533 	if (rc) {
534 		dev_err(&nq->pdev->dev,
535 			"Failed to request irq for nq-idx %d\n", nq_idx);
536 		goto fail;
537 	}
538 
539 	return 0;
540 fail:
541 	bnxt_qplib_disable_nq(nq);
542 	return rc;
543 }
544 
545 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
546 {
547 	if (nq->hwq.max_elements) {
548 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
549 		nq->hwq.max_elements = 0;
550 	}
551 }
552 
553 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
554 {
555 	struct bnxt_qplib_hwq_attr hwq_attr = {};
556 	struct bnxt_qplib_sg_info sginfo = {};
557 
558 	nq->pdev = res->pdev;
559 	nq->res = res;
560 	if (!nq->hwq.max_elements ||
561 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
562 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
563 
564 	sginfo.pgsize = PAGE_SIZE;
565 	sginfo.pgshft = PAGE_SHIFT;
566 	hwq_attr.res = res;
567 	hwq_attr.sginfo = &sginfo;
568 	hwq_attr.depth = nq->hwq.max_elements;
569 	hwq_attr.stride = sizeof(struct nq_base);
570 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
571 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
572 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
573 		return -ENOMEM;
574 	}
575 	nq->budget = 8;
576 	return 0;
577 }
578 
579 /* SRQ */
580 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
581 			   struct bnxt_qplib_srq *srq)
582 {
583 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
584 	struct cmdq_destroy_srq req;
585 	struct creq_destroy_srq_resp resp;
586 	u16 cmd_flags = 0;
587 	int rc;
588 
589 	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
590 
591 	/* Configure the request */
592 	req.srq_cid = cpu_to_le32(srq->id);
593 
594 	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
595 					  (struct creq_base *)&resp, NULL, 0);
596 	kfree(srq->swq);
597 	if (rc)
598 		return;
599 	bnxt_qplib_free_hwq(res, &srq->hwq);
600 }
601 
602 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
603 			  struct bnxt_qplib_srq *srq)
604 {
605 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
606 	struct bnxt_qplib_hwq_attr hwq_attr = {};
607 	struct creq_create_srq_resp resp;
608 	struct cmdq_create_srq req;
609 	struct bnxt_qplib_pbl *pbl;
610 	u16 cmd_flags = 0;
611 	u16 pg_sz_lvl;
612 	int rc, idx;
613 
614 	hwq_attr.res = res;
615 	hwq_attr.sginfo = &srq->sg_info;
616 	hwq_attr.depth = srq->max_wqe;
617 	hwq_attr.stride = srq->wqe_size;
618 	hwq_attr.type = HWQ_TYPE_QUEUE;
619 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
620 	if (rc)
621 		goto exit;
622 
623 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
624 			   GFP_KERNEL);
625 	if (!srq->swq) {
626 		rc = -ENOMEM;
627 		goto fail;
628 	}
629 
630 	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
631 
632 	/* Configure the request */
633 	req.dpi = cpu_to_le32(srq->dpi->dpi);
634 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
635 
636 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
637 	pbl = &srq->hwq.pbl[PBL_LVL_0];
638 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
639 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
640 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
641 		      CMDQ_CREATE_SRQ_LVL_SFT;
642 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
643 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
644 	req.pd_id = cpu_to_le32(srq->pd->id);
645 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
646 
647 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
648 					  (void *)&resp, NULL, 0);
649 	if (rc)
650 		goto fail;
651 
652 	spin_lock_init(&srq->lock);
653 	srq->start_idx = 0;
654 	srq->last_idx = srq->hwq.max_elements - 1;
655 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
656 		srq->swq[idx].next_idx = idx + 1;
657 	srq->swq[srq->last_idx].next_idx = -1;
658 
659 	srq->id = le32_to_cpu(resp.xid);
660 	srq->dbinfo.hwq = &srq->hwq;
661 	srq->dbinfo.xid = srq->id;
662 	srq->dbinfo.db = srq->dpi->dbr;
663 	srq->dbinfo.max_slot = 1;
664 	srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
665 	if (srq->threshold)
666 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
667 	srq->arm_req = false;
668 
669 	return 0;
670 fail:
671 	bnxt_qplib_free_hwq(res, &srq->hwq);
672 	kfree(srq->swq);
673 exit:
674 	return rc;
675 }
676 
677 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
678 			  struct bnxt_qplib_srq *srq)
679 {
680 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
681 	u32 sw_prod, sw_cons, count = 0;
682 
683 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
684 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
685 
686 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
687 				    srq_hwq->max_elements - sw_cons + sw_prod;
688 	if (count > srq->threshold) {
689 		srq->arm_req = false;
690 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
691 	} else {
692 		/* Deferred arming */
693 		srq->arm_req = true;
694 	}
695 
696 	return 0;
697 }
698 
699 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
700 			 struct bnxt_qplib_srq *srq)
701 {
702 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
703 	struct cmdq_query_srq req;
704 	struct creq_query_srq_resp resp;
705 	struct bnxt_qplib_rcfw_sbuf *sbuf;
706 	struct creq_query_srq_resp_sb *sb;
707 	u16 cmd_flags = 0;
708 	int rc = 0;
709 
710 	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
711 
712 	/* Configure the request */
713 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
714 	if (!sbuf)
715 		return -ENOMEM;
716 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
717 	req.srq_cid = cpu_to_le32(srq->id);
718 	sb = sbuf->sb;
719 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
720 					  (void *)sbuf, 0);
721 	srq->threshold = le16_to_cpu(sb->srq_limit);
722 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
723 
724 	return rc;
725 }
726 
727 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
728 			     struct bnxt_qplib_swqe *wqe)
729 {
730 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
731 	struct rq_wqe *srqe;
732 	struct sq_sge *hw_sge;
733 	u32 sw_prod, sw_cons, count = 0;
734 	int i, rc = 0, next;
735 
736 	spin_lock(&srq_hwq->lock);
737 	if (srq->start_idx == srq->last_idx) {
738 		dev_err(&srq_hwq->pdev->dev,
739 			"FP: SRQ (0x%x) is full!\n", srq->id);
740 		rc = -EINVAL;
741 		spin_unlock(&srq_hwq->lock);
742 		goto done;
743 	}
744 	next = srq->start_idx;
745 	srq->start_idx = srq->swq[next].next_idx;
746 	spin_unlock(&srq_hwq->lock);
747 
748 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
749 	srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
750 	memset(srqe, 0, srq->wqe_size);
751 	/* Calculate wqe_size16 and data_len */
752 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
753 	     i < wqe->num_sge; i++, hw_sge++) {
754 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
755 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
756 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
757 	}
758 	srqe->wqe_type = wqe->type;
759 	srqe->flags = wqe->flags;
760 	srqe->wqe_size = wqe->num_sge +
761 			((offsetof(typeof(*srqe), data) + 15) >> 4);
762 	srqe->wr_id[0] = cpu_to_le32((u32)next);
763 	srq->swq[next].wr_id = wqe->wr_id;
764 
765 	srq_hwq->prod++;
766 
767 	spin_lock(&srq_hwq->lock);
768 	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
769 	/* retaining srq_hwq->cons for this logic
770 	 * actually the lock is only required to
771 	 * read srq_hwq->cons.
772 	 */
773 	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
774 	count = sw_prod > sw_cons ? sw_prod - sw_cons :
775 				    srq_hwq->max_elements - sw_cons + sw_prod;
776 	spin_unlock(&srq_hwq->lock);
777 	/* Ring DB */
778 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
779 	if (srq->arm_req == true && count > srq->threshold) {
780 		srq->arm_req = false;
781 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
782 	}
783 done:
784 	return rc;
785 }
786 
787 /* QP */
788 
789 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
790 {
791 	int rc = 0;
792 	int indx;
793 
794 	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
795 	if (!que->swq) {
796 		rc = -ENOMEM;
797 		goto out;
798 	}
799 
800 	que->swq_start = 0;
801 	que->swq_last = que->max_wqe - 1;
802 	for (indx = 0; indx < que->max_wqe; indx++)
803 		que->swq[indx].next_idx = indx + 1;
804 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
805 	que->swq_last = 0;
806 out:
807 	return rc;
808 }
809 
810 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
811 {
812 	struct bnxt_qplib_hwq_attr hwq_attr = {};
813 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
814 	struct bnxt_qplib_q *sq = &qp->sq;
815 	struct bnxt_qplib_q *rq = &qp->rq;
816 	struct creq_create_qp1_resp resp;
817 	struct cmdq_create_qp1 req;
818 	struct bnxt_qplib_pbl *pbl;
819 	u16 cmd_flags = 0;
820 	u32 qp_flags = 0;
821 	u8 pg_sz_lvl;
822 	u32 tbl_indx;
823 	int rc;
824 
825 	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
826 
827 	/* General */
828 	req.type = qp->type;
829 	req.dpi = cpu_to_le32(qp->dpi->dpi);
830 	req.qp_handle = cpu_to_le64(qp->qp_handle);
831 
832 	/* SQ */
833 	hwq_attr.res = res;
834 	hwq_attr.sginfo = &sq->sg_info;
835 	hwq_attr.stride = sizeof(struct sq_sge);
836 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
837 	hwq_attr.type = HWQ_TYPE_QUEUE;
838 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
839 	if (rc)
840 		goto exit;
841 
842 	rc = bnxt_qplib_alloc_init_swq(sq);
843 	if (rc)
844 		goto fail_sq;
845 
846 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
847 	pbl = &sq->hwq.pbl[PBL_LVL_0];
848 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
849 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
850 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
851 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
852 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
853 	req.sq_fwo_sq_sge =
854 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
855 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
856 	req.scq_cid = cpu_to_le32(qp->scq->id);
857 
858 	/* RQ */
859 	if (rq->max_wqe) {
860 		hwq_attr.res = res;
861 		hwq_attr.sginfo = &rq->sg_info;
862 		hwq_attr.stride = sizeof(struct sq_sge);
863 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
864 		hwq_attr.type = HWQ_TYPE_QUEUE;
865 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
866 		if (rc)
867 			goto sq_swq;
868 		rc = bnxt_qplib_alloc_init_swq(rq);
869 		if (rc)
870 			goto fail_rq;
871 		req.rq_size = cpu_to_le32(rq->max_wqe);
872 		pbl = &rq->hwq.pbl[PBL_LVL_0];
873 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
874 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
875 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
876 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
877 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
878 		req.rq_fwo_rq_sge =
879 			cpu_to_le16((rq->max_sge &
880 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
881 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
882 	}
883 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
884 	/* Header buffer - allow hdr_buf pass in */
885 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
886 	if (rc) {
887 		rc = -ENOMEM;
888 		goto rq_rwq;
889 	}
890 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
891 	req.qp_flags = cpu_to_le32(qp_flags);
892 	req.pd_id = cpu_to_le32(qp->pd->id);
893 
894 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
895 					  (void *)&resp, NULL, 0);
896 	if (rc)
897 		goto fail;
898 
899 	qp->id = le32_to_cpu(resp.xid);
900 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
901 	qp->cctx = res->cctx;
902 	sq->dbinfo.hwq = &sq->hwq;
903 	sq->dbinfo.xid = qp->id;
904 	sq->dbinfo.db = qp->dpi->dbr;
905 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
906 	if (rq->max_wqe) {
907 		rq->dbinfo.hwq = &rq->hwq;
908 		rq->dbinfo.xid = qp->id;
909 		rq->dbinfo.db = qp->dpi->dbr;
910 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
911 	}
912 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
913 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
914 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
915 
916 	return 0;
917 
918 fail:
919 	bnxt_qplib_free_qp_hdr_buf(res, qp);
920 rq_rwq:
921 	kfree(rq->swq);
922 fail_rq:
923 	bnxt_qplib_free_hwq(res, &rq->hwq);
924 sq_swq:
925 	kfree(sq->swq);
926 fail_sq:
927 	bnxt_qplib_free_hwq(res, &sq->hwq);
928 exit:
929 	return rc;
930 }
931 
932 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
933 {
934 	struct bnxt_qplib_hwq *hwq;
935 	struct bnxt_qplib_q *sq;
936 	u64 fpsne, psn_pg;
937 	u16 indx_pad = 0;
938 
939 	sq = &qp->sq;
940 	hwq = &sq->hwq;
941 	/* First psn entry */
942 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
943 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
944 		indx_pad = (fpsne & ~PAGE_MASK) / size;
945 	hwq->pad_pgofft = indx_pad;
946 	hwq->pad_pg = (u64 *)psn_pg;
947 	hwq->pad_stride = size;
948 }
949 
950 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
951 {
952 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
953 	struct bnxt_qplib_hwq_attr hwq_attr = {};
954 	struct bnxt_qplib_sg_info sginfo = {};
955 	struct bnxt_qplib_q *sq = &qp->sq;
956 	struct bnxt_qplib_q *rq = &qp->rq;
957 	struct creq_create_qp_resp resp;
958 	int rc, req_size, psn_sz = 0;
959 	struct bnxt_qplib_hwq *xrrq;
960 	struct bnxt_qplib_pbl *pbl;
961 	struct cmdq_create_qp req;
962 	u16 cmd_flags = 0;
963 	u32 qp_flags = 0;
964 	u8 pg_sz_lvl;
965 	u32 tbl_indx;
966 	u16 nsge;
967 
968 	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
969 
970 	/* General */
971 	req.type = qp->type;
972 	req.dpi = cpu_to_le32(qp->dpi->dpi);
973 	req.qp_handle = cpu_to_le64(qp->qp_handle);
974 
975 	/* SQ */
976 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
977 		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
978 			 sizeof(struct sq_psn_search_ext) :
979 			 sizeof(struct sq_psn_search);
980 	}
981 
982 	hwq_attr.res = res;
983 	hwq_attr.sginfo = &sq->sg_info;
984 	hwq_attr.stride = sizeof(struct sq_sge);
985 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
986 	hwq_attr.aux_stride = psn_sz;
987 	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
988 	hwq_attr.type = HWQ_TYPE_QUEUE;
989 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
990 	if (rc)
991 		goto exit;
992 
993 	rc = bnxt_qplib_alloc_init_swq(sq);
994 	if (rc)
995 		goto fail_sq;
996 
997 	if (psn_sz)
998 		bnxt_qplib_init_psn_ptr(qp, psn_sz);
999 
1000 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1001 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1002 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1003 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1004 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1005 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1006 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1007 	req.sq_fwo_sq_sge =
1008 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1009 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1010 	req.scq_cid = cpu_to_le32(qp->scq->id);
1011 
1012 	/* RQ */
1013 	if (!qp->srq) {
1014 		hwq_attr.res = res;
1015 		hwq_attr.sginfo = &rq->sg_info;
1016 		hwq_attr.stride = sizeof(struct sq_sge);
1017 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1018 		hwq_attr.aux_stride = 0;
1019 		hwq_attr.aux_depth = 0;
1020 		hwq_attr.type = HWQ_TYPE_QUEUE;
1021 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1022 		if (rc)
1023 			goto sq_swq;
1024 		rc = bnxt_qplib_alloc_init_swq(rq);
1025 		if (rc)
1026 			goto fail_rq;
1027 
1028 		req.rq_size = cpu_to_le32(rq->max_wqe);
1029 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1030 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1031 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1032 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1033 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1034 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1035 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1036 			6 : rq->max_sge;
1037 		req.rq_fwo_rq_sge =
1038 			cpu_to_le16(((nsge &
1039 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1040 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1041 	} else {
1042 		/* SRQ */
1043 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1044 		req.srq_cid = cpu_to_le32(qp->srq->id);
1045 	}
1046 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1047 
1048 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1049 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1050 	if (qp->sig_type)
1051 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1052 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1053 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1054 	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1055 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1056 
1057 	req.qp_flags = cpu_to_le32(qp_flags);
1058 
1059 	/* ORRQ and IRRQ */
1060 	if (psn_sz) {
1061 		xrrq = &qp->orrq;
1062 		xrrq->max_elements =
1063 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1064 		req_size = xrrq->max_elements *
1065 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1066 		req_size &= ~(PAGE_SIZE - 1);
1067 		sginfo.pgsize = req_size;
1068 		sginfo.pgshft = PAGE_SHIFT;
1069 
1070 		hwq_attr.res = res;
1071 		hwq_attr.sginfo = &sginfo;
1072 		hwq_attr.depth = xrrq->max_elements;
1073 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1074 		hwq_attr.aux_stride = 0;
1075 		hwq_attr.aux_depth = 0;
1076 		hwq_attr.type = HWQ_TYPE_CTX;
1077 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1078 		if (rc)
1079 			goto rq_swq;
1080 		pbl = &xrrq->pbl[PBL_LVL_0];
1081 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1082 
1083 		xrrq = &qp->irrq;
1084 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1085 						qp->max_dest_rd_atomic);
1086 		req_size = xrrq->max_elements *
1087 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1088 		req_size &= ~(PAGE_SIZE - 1);
1089 		sginfo.pgsize = req_size;
1090 		hwq_attr.depth =  xrrq->max_elements;
1091 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1092 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1093 		if (rc)
1094 			goto fail_orrq;
1095 
1096 		pbl = &xrrq->pbl[PBL_LVL_0];
1097 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1098 	}
1099 	req.pd_id = cpu_to_le32(qp->pd->id);
1100 
1101 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1102 					  (void *)&resp, NULL, 0);
1103 	if (rc)
1104 		goto fail;
1105 
1106 	qp->id = le32_to_cpu(resp.xid);
1107 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1108 	INIT_LIST_HEAD(&qp->sq_flush);
1109 	INIT_LIST_HEAD(&qp->rq_flush);
1110 	qp->cctx = res->cctx;
1111 	sq->dbinfo.hwq = &sq->hwq;
1112 	sq->dbinfo.xid = qp->id;
1113 	sq->dbinfo.db = qp->dpi->dbr;
1114 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1115 	if (rq->max_wqe) {
1116 		rq->dbinfo.hwq = &rq->hwq;
1117 		rq->dbinfo.xid = qp->id;
1118 		rq->dbinfo.db = qp->dpi->dbr;
1119 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1120 	}
1121 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1122 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1123 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1124 
1125 	return 0;
1126 fail:
1127 	bnxt_qplib_free_hwq(res, &qp->irrq);
1128 fail_orrq:
1129 	bnxt_qplib_free_hwq(res, &qp->orrq);
1130 rq_swq:
1131 	kfree(rq->swq);
1132 fail_rq:
1133 	bnxt_qplib_free_hwq(res, &rq->hwq);
1134 sq_swq:
1135 	kfree(sq->swq);
1136 fail_sq:
1137 	bnxt_qplib_free_hwq(res, &sq->hwq);
1138 exit:
1139 	return rc;
1140 }
1141 
1142 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1143 {
1144 	switch (qp->state) {
1145 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1146 		/* INIT->RTR, configure the path_mtu to the default
1147 		 * 2048 if not being requested
1148 		 */
1149 		if (!(qp->modify_flags &
1150 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1151 			qp->modify_flags |=
1152 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1153 			qp->path_mtu =
1154 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1155 		}
1156 		qp->modify_flags &=
1157 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1158 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1159 		if (qp->max_dest_rd_atomic < 1)
1160 			qp->max_dest_rd_atomic = 1;
1161 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1162 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1163 		if (!(qp->modify_flags &
1164 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1165 			qp->modify_flags |=
1166 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1167 			qp->ah.sgid_index = 0;
1168 		}
1169 		break;
1170 	default:
1171 		break;
1172 	}
1173 }
1174 
1175 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1176 {
1177 	switch (qp->state) {
1178 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1179 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1180 		if (qp->max_rd_atomic < 1)
1181 			qp->max_rd_atomic = 1;
1182 		/* Bono FW does not allow PKEY_INDEX,
1183 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1184 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1185 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1186 		 * modification
1187 		 */
1188 		qp->modify_flags &=
1189 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1190 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1191 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1192 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1193 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1194 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1195 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1196 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1197 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1198 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1199 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1200 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1201 		break;
1202 	default:
1203 		break;
1204 	}
1205 }
1206 
1207 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1208 {
1209 	switch (qp->cur_qp_state) {
1210 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1211 		break;
1212 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1213 		__modify_flags_from_init_state(qp);
1214 		break;
1215 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1216 		__modify_flags_from_rtr_state(qp);
1217 		break;
1218 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1219 		break;
1220 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1221 		break;
1222 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1223 		break;
1224 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1225 		break;
1226 	default:
1227 		break;
1228 	}
1229 }
1230 
1231 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1232 {
1233 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1234 	struct cmdq_modify_qp req;
1235 	struct creq_modify_qp_resp resp;
1236 	u16 cmd_flags = 0;
1237 	u32 temp32[4];
1238 	u32 bmask;
1239 	int rc;
1240 
1241 	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1242 
1243 	/* Filter out the qp_attr_mask based on the state->new transition */
1244 	__filter_modify_flags(qp);
1245 	bmask = qp->modify_flags;
1246 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1247 	req.qp_cid = cpu_to_le32(qp->id);
1248 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1249 		req.network_type_en_sqd_async_notify_new_state =
1250 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1251 				(qp->en_sqd_async_notify ?
1252 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1253 	}
1254 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1255 
1256 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1257 		req.access = qp->access;
1258 
1259 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1260 		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1261 
1262 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1263 		req.qkey = cpu_to_le32(qp->qkey);
1264 
1265 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1266 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1267 		req.dgid[0] = cpu_to_le32(temp32[0]);
1268 		req.dgid[1] = cpu_to_le32(temp32[1]);
1269 		req.dgid[2] = cpu_to_le32(temp32[2]);
1270 		req.dgid[3] = cpu_to_le32(temp32[3]);
1271 	}
1272 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1273 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1274 
1275 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1276 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1277 					     [qp->ah.sgid_index]);
1278 
1279 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1280 		req.hop_limit = qp->ah.hop_limit;
1281 
1282 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1283 		req.traffic_class = qp->ah.traffic_class;
1284 
1285 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1286 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1287 
1288 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1289 		req.path_mtu = qp->path_mtu;
1290 
1291 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1292 		req.timeout = qp->timeout;
1293 
1294 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1295 		req.retry_cnt = qp->retry_cnt;
1296 
1297 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1298 		req.rnr_retry = qp->rnr_retry;
1299 
1300 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1301 		req.min_rnr_timer = qp->min_rnr_timer;
1302 
1303 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1304 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1305 
1306 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1307 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1308 
1309 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1310 		req.max_rd_atomic =
1311 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1312 
1313 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1314 		req.max_dest_rd_atomic =
1315 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1316 
1317 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1318 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1319 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1320 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1321 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1322 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1323 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1324 
1325 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1326 
1327 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1328 					  (void *)&resp, NULL, 0);
1329 	if (rc)
1330 		return rc;
1331 	qp->cur_qp_state = qp->state;
1332 	return 0;
1333 }
1334 
1335 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1336 {
1337 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1338 	struct cmdq_query_qp req;
1339 	struct creq_query_qp_resp resp;
1340 	struct bnxt_qplib_rcfw_sbuf *sbuf;
1341 	struct creq_query_qp_resp_sb *sb;
1342 	u16 cmd_flags = 0;
1343 	u32 temp32[4];
1344 	int i, rc = 0;
1345 
1346 	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1347 
1348 	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1349 	if (!sbuf)
1350 		return -ENOMEM;
1351 	sb = sbuf->sb;
1352 
1353 	req.qp_cid = cpu_to_le32(qp->id);
1354 	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1355 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1356 					  (void *)sbuf, 0);
1357 	if (rc)
1358 		goto bail;
1359 	/* Extract the context from the side buffer */
1360 	qp->state = sb->en_sqd_async_notify_state &
1361 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1362 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1363 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1364 				  true : false;
1365 	qp->access = sb->access;
1366 	qp->pkey_index = le16_to_cpu(sb->pkey);
1367 	qp->qkey = le32_to_cpu(sb->qkey);
1368 
1369 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1370 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1371 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1372 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1373 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1374 
1375 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1376 
1377 	qp->ah.sgid_index = 0;
1378 	for (i = 0; i < res->sgid_tbl.max; i++) {
1379 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1380 			qp->ah.sgid_index = i;
1381 			break;
1382 		}
1383 	}
1384 	if (i == res->sgid_tbl.max)
1385 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1386 
1387 	qp->ah.hop_limit = sb->hop_limit;
1388 	qp->ah.traffic_class = sb->traffic_class;
1389 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1390 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1391 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1392 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1393 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1394 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1395 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1396 	qp->timeout = sb->timeout;
1397 	qp->retry_cnt = sb->retry_cnt;
1398 	qp->rnr_retry = sb->rnr_retry;
1399 	qp->min_rnr_timer = sb->min_rnr_timer;
1400 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1401 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1402 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1403 	qp->max_dest_rd_atomic =
1404 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1405 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1406 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1407 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1408 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1409 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1410 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1411 	memcpy(qp->smac, sb->src_mac, 6);
1412 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1413 bail:
1414 	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1415 	return rc;
1416 }
1417 
1418 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1419 {
1420 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1421 	struct cq_base *hw_cqe;
1422 	int i;
1423 
1424 	for (i = 0; i < cq_hwq->max_elements; i++) {
1425 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1426 		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1427 			continue;
1428 		/*
1429 		 * The valid test of the entry must be done first before
1430 		 * reading any further.
1431 		 */
1432 		dma_rmb();
1433 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1434 		case CQ_BASE_CQE_TYPE_REQ:
1435 		case CQ_BASE_CQE_TYPE_TERMINAL:
1436 		{
1437 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1438 
1439 			if (qp == le64_to_cpu(cqe->qp_handle))
1440 				cqe->qp_handle = 0;
1441 			break;
1442 		}
1443 		case CQ_BASE_CQE_TYPE_RES_RC:
1444 		case CQ_BASE_CQE_TYPE_RES_UD:
1445 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1446 		{
1447 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1448 
1449 			if (qp == le64_to_cpu(cqe->qp_handle))
1450 				cqe->qp_handle = 0;
1451 			break;
1452 		}
1453 		default:
1454 			break;
1455 		}
1456 	}
1457 }
1458 
1459 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1460 			  struct bnxt_qplib_qp *qp)
1461 {
1462 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1463 	struct cmdq_destroy_qp req;
1464 	struct creq_destroy_qp_resp resp;
1465 	u16 cmd_flags = 0;
1466 	u32 tbl_indx;
1467 	int rc;
1468 
1469 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1470 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1471 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1472 
1473 	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1474 
1475 	req.qp_cid = cpu_to_le32(qp->id);
1476 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1477 					  (void *)&resp, NULL, 0);
1478 	if (rc) {
1479 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1480 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1481 		return rc;
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1488 			    struct bnxt_qplib_qp *qp)
1489 {
1490 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1491 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1492 	kfree(qp->sq.swq);
1493 
1494 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1495 	kfree(qp->rq.swq);
1496 
1497 	if (qp->irrq.max_elements)
1498 		bnxt_qplib_free_hwq(res, &qp->irrq);
1499 	if (qp->orrq.max_elements)
1500 		bnxt_qplib_free_hwq(res, &qp->orrq);
1501 
1502 }
1503 
1504 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1505 				struct bnxt_qplib_sge *sge)
1506 {
1507 	struct bnxt_qplib_q *sq = &qp->sq;
1508 	u32 sw_prod;
1509 
1510 	memset(sge, 0, sizeof(*sge));
1511 
1512 	if (qp->sq_hdr_buf) {
1513 		sw_prod = sq->swq_start;
1514 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1515 					 sw_prod * qp->sq_hdr_buf_size);
1516 		sge->lkey = 0xFFFFFFFF;
1517 		sge->size = qp->sq_hdr_buf_size;
1518 		return qp->sq_hdr_buf + sw_prod * sge->size;
1519 	}
1520 	return NULL;
1521 }
1522 
1523 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1524 {
1525 	struct bnxt_qplib_q *rq = &qp->rq;
1526 
1527 	return rq->swq_start;
1528 }
1529 
1530 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1531 {
1532 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1533 }
1534 
1535 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1536 				struct bnxt_qplib_sge *sge)
1537 {
1538 	struct bnxt_qplib_q *rq = &qp->rq;
1539 	u32 sw_prod;
1540 
1541 	memset(sge, 0, sizeof(*sge));
1542 
1543 	if (qp->rq_hdr_buf) {
1544 		sw_prod = rq->swq_start;
1545 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1546 					 sw_prod * qp->rq_hdr_buf_size);
1547 		sge->lkey = 0xFFFFFFFF;
1548 		sge->size = qp->rq_hdr_buf_size;
1549 		return qp->rq_hdr_buf + sw_prod * sge->size;
1550 	}
1551 	return NULL;
1552 }
1553 
1554 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1555 				       struct bnxt_qplib_swqe *wqe,
1556 				       struct bnxt_qplib_swq *swq)
1557 {
1558 	struct sq_psn_search_ext *psns_ext;
1559 	struct sq_psn_search *psns;
1560 	u32 flg_npsn;
1561 	u32 op_spsn;
1562 
1563 	if (!swq->psn_search)
1564 		return;
1565 	psns = swq->psn_search;
1566 	psns_ext = swq->psn_ext;
1567 
1568 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1569 		    SQ_PSN_SEARCH_START_PSN_MASK);
1570 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1571 		     SQ_PSN_SEARCH_OPCODE_MASK);
1572 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1573 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1574 
1575 	if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1576 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1577 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1578 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1579 	} else {
1580 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1581 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1582 	}
1583 }
1584 
1585 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1586 				 struct bnxt_qplib_swqe *wqe,
1587 				 u16 *idx)
1588 {
1589 	struct bnxt_qplib_hwq *hwq;
1590 	int len, t_len, offt;
1591 	bool pull_dst = true;
1592 	void *il_dst = NULL;
1593 	void *il_src = NULL;
1594 	int t_cplen, cplen;
1595 	int indx;
1596 
1597 	hwq = &qp->sq.hwq;
1598 	t_len = 0;
1599 	for (indx = 0; indx < wqe->num_sge; indx++) {
1600 		len = wqe->sg_list[indx].size;
1601 		il_src = (void *)wqe->sg_list[indx].addr;
1602 		t_len += len;
1603 		if (t_len > qp->max_inline_data)
1604 			goto bad;
1605 		while (len) {
1606 			if (pull_dst) {
1607 				pull_dst = false;
1608 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1609 				(*idx)++;
1610 				t_cplen = 0;
1611 				offt = 0;
1612 			}
1613 			cplen = min_t(int, len, sizeof(struct sq_sge));
1614 			cplen = min_t(int, cplen,
1615 					(sizeof(struct sq_sge) - offt));
1616 			memcpy(il_dst, il_src, cplen);
1617 			t_cplen += cplen;
1618 			il_src += cplen;
1619 			il_dst += cplen;
1620 			offt += cplen;
1621 			len -= cplen;
1622 			if (t_cplen == sizeof(struct sq_sge))
1623 				pull_dst = true;
1624 		}
1625 	}
1626 
1627 	return t_len;
1628 bad:
1629 	return -ENOMEM;
1630 }
1631 
1632 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1633 			       struct bnxt_qplib_sge *ssge,
1634 			       u16 nsge, u16 *idx)
1635 {
1636 	struct sq_sge *dsge;
1637 	int indx, len = 0;
1638 
1639 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1640 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1641 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1642 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1643 		dsge->size = cpu_to_le32(ssge[indx].size);
1644 		len += ssge[indx].size;
1645 	}
1646 
1647 	return len;
1648 }
1649 
1650 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1651 				     struct bnxt_qplib_swqe *wqe,
1652 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1653 {
1654 	u32 ilsize, bytes;
1655 	u16 nsge;
1656 	u16 slot;
1657 
1658 	nsge = wqe->num_sge;
1659 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1660 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1661 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1662 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1663 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1664 		bytes += sizeof(struct sq_send_hdr);
1665 	}
1666 
1667 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1668 	slot = bytes >> 4;
1669 	*wqe_sz = slot;
1670 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1671 		slot = 8;
1672 	return slot;
1673 }
1674 
1675 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1676 				     struct bnxt_qplib_swq *swq)
1677 {
1678 	struct bnxt_qplib_hwq *hwq;
1679 	u32 pg_num, pg_indx;
1680 	void *buff;
1681 	u32 tail;
1682 
1683 	hwq = &sq->hwq;
1684 	if (!hwq->pad_pg)
1685 		return;
1686 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1687 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1688 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1689 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1690 	swq->psn_ext = buff;
1691 	swq->psn_search = buff;
1692 }
1693 
1694 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1695 {
1696 	struct bnxt_qplib_q *sq = &qp->sq;
1697 
1698 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1699 }
1700 
1701 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1702 			 struct bnxt_qplib_swqe *wqe)
1703 {
1704 	struct bnxt_qplib_nq_work *nq_work = NULL;
1705 	int i, rc = 0, data_len = 0, pkt_num = 0;
1706 	struct bnxt_qplib_q *sq = &qp->sq;
1707 	struct bnxt_qplib_hwq *hwq;
1708 	struct bnxt_qplib_swq *swq;
1709 	bool sch_handler = false;
1710 	u16 wqe_sz, qdf = 0;
1711 	void *base_hdr;
1712 	void *ext_hdr;
1713 	__le32 temp32;
1714 	u32 wqe_idx;
1715 	u32 slots;
1716 	u16 idx;
1717 
1718 	hwq = &sq->hwq;
1719 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1720 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1721 		dev_err(&hwq->pdev->dev,
1722 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1723 			qp->id, qp->state);
1724 		rc = -EINVAL;
1725 		goto done;
1726 	}
1727 
1728 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1729 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1730 		dev_err(&hwq->pdev->dev,
1731 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1732 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1733 		rc = -ENOMEM;
1734 		goto done;
1735 	}
1736 
1737 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1738 	bnxt_qplib_pull_psn_buff(sq, swq);
1739 
1740 	idx = 0;
1741 	swq->slot_idx = hwq->prod;
1742 	swq->slots = slots;
1743 	swq->wr_id = wqe->wr_id;
1744 	swq->type = wqe->type;
1745 	swq->flags = wqe->flags;
1746 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1747 	if (qp->sig_type)
1748 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1749 
1750 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1751 		sch_handler = true;
1752 		dev_dbg(&hwq->pdev->dev,
1753 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1754 		goto queue_err;
1755 	}
1756 
1757 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1758 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1759 	memset(base_hdr, 0, sizeof(struct sq_sge));
1760 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1761 
1762 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1763 		/* Copy the inline data */
1764 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1765 	else
1766 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1767 					       &idx);
1768 	if (data_len < 0)
1769 		goto queue_err;
1770 	/* Specifics */
1771 	switch (wqe->type) {
1772 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1773 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1774 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1775 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1776 			/* Assemble info for Raw Ethertype QPs */
1777 
1778 			sqe->wqe_type = wqe->type;
1779 			sqe->flags = wqe->flags;
1780 			sqe->wqe_size = wqe_sz;
1781 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1782 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1783 			sqe->length = cpu_to_le32(data_len);
1784 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1785 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1786 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1787 
1788 			break;
1789 		}
1790 		fallthrough;
1791 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1792 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1793 	{
1794 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1795 		struct sq_send_hdr *sqe = base_hdr;
1796 
1797 		sqe->wqe_type = wqe->type;
1798 		sqe->flags = wqe->flags;
1799 		sqe->wqe_size = wqe_sz;
1800 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1801 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1802 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1803 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1804 			sqe->length = cpu_to_le32(data_len);
1805 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1806 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1807 						      SQ_SEND_DST_QP_MASK);
1808 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1809 						    SQ_SEND_AVID_MASK);
1810 		} else {
1811 			sqe->length = cpu_to_le32(data_len);
1812 			if (qp->mtu)
1813 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1814 			if (!pkt_num)
1815 				pkt_num = 1;
1816 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1817 		}
1818 		break;
1819 	}
1820 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1821 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1822 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1823 	{
1824 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1825 		struct sq_rdma_hdr *sqe = base_hdr;
1826 
1827 		sqe->wqe_type = wqe->type;
1828 		sqe->flags = wqe->flags;
1829 		sqe->wqe_size = wqe_sz;
1830 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1831 		sqe->length = cpu_to_le32((u32)data_len);
1832 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1833 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1834 		if (qp->mtu)
1835 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1836 		if (!pkt_num)
1837 			pkt_num = 1;
1838 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1839 		break;
1840 	}
1841 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1842 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1843 	{
1844 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1845 		struct sq_atomic_hdr *sqe = base_hdr;
1846 
1847 		sqe->wqe_type = wqe->type;
1848 		sqe->flags = wqe->flags;
1849 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1850 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1851 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1852 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1853 		if (qp->mtu)
1854 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1855 		if (!pkt_num)
1856 			pkt_num = 1;
1857 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1858 		break;
1859 	}
1860 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1861 	{
1862 		struct sq_localinvalidate *sqe = base_hdr;
1863 
1864 		sqe->wqe_type = wqe->type;
1865 		sqe->flags = wqe->flags;
1866 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1867 
1868 		break;
1869 	}
1870 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1871 	{
1872 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1873 		struct sq_fr_pmr_hdr *sqe = base_hdr;
1874 
1875 		sqe->wqe_type = wqe->type;
1876 		sqe->flags = wqe->flags;
1877 		sqe->access_cntl = wqe->frmr.access_cntl |
1878 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1879 		sqe->zero_based_page_size_log =
1880 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1881 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1882 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1883 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1884 		temp32 = cpu_to_le32(wqe->frmr.length);
1885 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1886 		sqe->numlevels_pbl_page_size_log =
1887 			((wqe->frmr.pbl_pg_sz_log <<
1888 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1889 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1890 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1891 					SQ_FR_PMR_NUMLEVELS_MASK);
1892 
1893 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1894 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1895 						wqe->frmr.page_list[i] |
1896 						PTU_PTE_VALID);
1897 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1898 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1899 
1900 		break;
1901 	}
1902 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1903 	{
1904 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1905 		struct sq_bind_hdr *sqe = base_hdr;
1906 
1907 		sqe->wqe_type = wqe->type;
1908 		sqe->flags = wqe->flags;
1909 		sqe->access_cntl = wqe->bind.access_cntl;
1910 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1911 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1912 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1913 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1914 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
1915 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1916 		break;
1917 	}
1918 	default:
1919 		/* Bad wqe, return error */
1920 		rc = -EINVAL;
1921 		goto done;
1922 	}
1923 	swq->next_psn = sq->psn & BTH_PSN_MASK;
1924 	bnxt_qplib_fill_psn_search(qp, wqe, swq);
1925 queue_err:
1926 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
1927 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1928 	qp->wqe_cnt++;
1929 done:
1930 	if (sch_handler) {
1931 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1932 		if (nq_work) {
1933 			nq_work->cq = qp->scq;
1934 			nq_work->nq = qp->scq->nq;
1935 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1936 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1937 		} else {
1938 			dev_err(&hwq->pdev->dev,
1939 				"FP: Failed to allocate SQ nq_work!\n");
1940 			rc = -ENOMEM;
1941 		}
1942 	}
1943 	return rc;
1944 }
1945 
1946 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1947 {
1948 	struct bnxt_qplib_q *rq = &qp->rq;
1949 
1950 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1951 }
1952 
1953 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1954 			 struct bnxt_qplib_swqe *wqe)
1955 {
1956 	struct bnxt_qplib_nq_work *nq_work = NULL;
1957 	struct bnxt_qplib_q *rq = &qp->rq;
1958 	struct rq_wqe_hdr *base_hdr;
1959 	struct rq_ext_hdr *ext_hdr;
1960 	struct bnxt_qplib_hwq *hwq;
1961 	struct bnxt_qplib_swq *swq;
1962 	bool sch_handler = false;
1963 	u16 wqe_sz, idx;
1964 	u32 wqe_idx;
1965 	int rc = 0;
1966 
1967 	hwq = &rq->hwq;
1968 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1969 		dev_err(&hwq->pdev->dev,
1970 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1971 			qp->id, qp->state);
1972 		rc = -EINVAL;
1973 		goto done;
1974 	}
1975 
1976 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
1977 		dev_err(&hwq->pdev->dev,
1978 			"FP: QP (0x%x) RQ is full!\n", qp->id);
1979 		rc = -EINVAL;
1980 		goto done;
1981 	}
1982 
1983 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
1984 	swq->wr_id = wqe->wr_id;
1985 	swq->slots = rq->dbinfo.max_slot;
1986 
1987 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1988 		sch_handler = true;
1989 		dev_dbg(&hwq->pdev->dev,
1990 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
1991 		goto queue_err;
1992 	}
1993 
1994 	idx = 0;
1995 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1996 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1997 	memset(base_hdr, 0, sizeof(struct sq_sge));
1998 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1999 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2000 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2001 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2002 	if (!wqe->num_sge) {
2003 		struct sq_sge *sge;
2004 
2005 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2006 		sge->size = 0;
2007 		wqe_sz++;
2008 	}
2009 	base_hdr->wqe_type = wqe->type;
2010 	base_hdr->flags = wqe->flags;
2011 	base_hdr->wqe_size = wqe_sz;
2012 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2013 queue_err:
2014 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2015 	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2016 done:
2017 	if (sch_handler) {
2018 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2019 		if (nq_work) {
2020 			nq_work->cq = qp->rcq;
2021 			nq_work->nq = qp->rcq->nq;
2022 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2023 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2024 		} else {
2025 			dev_err(&hwq->pdev->dev,
2026 				"FP: Failed to allocate RQ nq_work!\n");
2027 			rc = -ENOMEM;
2028 		}
2029 	}
2030 
2031 	return rc;
2032 }
2033 
2034 /* CQ */
2035 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2036 {
2037 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2038 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2039 	struct creq_create_cq_resp resp;
2040 	struct bnxt_qplib_pbl *pbl;
2041 	struct cmdq_create_cq req;
2042 	u16 cmd_flags = 0;
2043 	u32 pg_sz_lvl;
2044 	int rc;
2045 
2046 	hwq_attr.res = res;
2047 	hwq_attr.depth = cq->max_wqe;
2048 	hwq_attr.stride = sizeof(struct cq_base);
2049 	hwq_attr.type = HWQ_TYPE_QUEUE;
2050 	hwq_attr.sginfo = &cq->sg_info;
2051 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2052 	if (rc)
2053 		goto exit;
2054 
2055 	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
2056 
2057 	if (!cq->dpi) {
2058 		dev_err(&rcfw->pdev->dev,
2059 			"FP: CREATE_CQ failed due to NULL DPI\n");
2060 		return -EINVAL;
2061 	}
2062 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2063 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2064 	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2065 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2066 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2067 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2068 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2069 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2070 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2071 	req.cq_fco_cnq_id = cpu_to_le32(
2072 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2073 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2074 
2075 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2076 					  (void *)&resp, NULL, 0);
2077 	if (rc)
2078 		goto fail;
2079 
2080 	cq->id = le32_to_cpu(resp.xid);
2081 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2082 	init_waitqueue_head(&cq->waitq);
2083 	INIT_LIST_HEAD(&cq->sqf_head);
2084 	INIT_LIST_HEAD(&cq->rqf_head);
2085 	spin_lock_init(&cq->compl_lock);
2086 	spin_lock_init(&cq->flush_lock);
2087 
2088 	cq->dbinfo.hwq = &cq->hwq;
2089 	cq->dbinfo.xid = cq->id;
2090 	cq->dbinfo.db = cq->dpi->dbr;
2091 	cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
2092 
2093 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2094 
2095 	return 0;
2096 
2097 fail:
2098 	bnxt_qplib_free_hwq(res, &cq->hwq);
2099 exit:
2100 	return rc;
2101 }
2102 
2103 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2104 {
2105 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2106 	struct cmdq_destroy_cq req;
2107 	struct creq_destroy_cq_resp resp;
2108 	u16 total_cnq_events;
2109 	u16 cmd_flags = 0;
2110 	int rc;
2111 
2112 	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
2113 
2114 	req.cq_cid = cpu_to_le32(cq->id);
2115 	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2116 					  (void *)&resp, NULL, 0);
2117 	if (rc)
2118 		return rc;
2119 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2120 	__wait_for_all_nqes(cq, total_cnq_events);
2121 	bnxt_qplib_free_hwq(res, &cq->hwq);
2122 	return 0;
2123 }
2124 
2125 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2126 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2127 {
2128 	struct bnxt_qplib_cqe *cqe;
2129 	u32 start, last;
2130 	int rc = 0;
2131 
2132 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2133 	start = sq->swq_start;
2134 	cqe = *pcqe;
2135 	while (*budget) {
2136 		last = sq->swq_last;
2137 		if (start == last)
2138 			break;
2139 		/* Skip the FENCE WQE completions */
2140 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2141 			bnxt_qplib_cancel_phantom_processing(qp);
2142 			goto skip_compl;
2143 		}
2144 		memset(cqe, 0, sizeof(*cqe));
2145 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2146 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2147 		cqe->qp_handle = (u64)(unsigned long)qp;
2148 		cqe->wr_id = sq->swq[last].wr_id;
2149 		cqe->src_qp = qp->id;
2150 		cqe->type = sq->swq[last].type;
2151 		cqe++;
2152 		(*budget)--;
2153 skip_compl:
2154 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2155 		sq->swq_last = sq->swq[last].next_idx;
2156 	}
2157 	*pcqe = cqe;
2158 	if (!(*budget) && sq->swq_last != start)
2159 		/* Out of budget */
2160 		rc = -EAGAIN;
2161 
2162 	return rc;
2163 }
2164 
2165 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2166 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2167 {
2168 	struct bnxt_qplib_cqe *cqe;
2169 	u32 start, last;
2170 	int opcode = 0;
2171 	int rc = 0;
2172 
2173 	switch (qp->type) {
2174 	case CMDQ_CREATE_QP1_TYPE_GSI:
2175 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2176 		break;
2177 	case CMDQ_CREATE_QP_TYPE_RC:
2178 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2179 		break;
2180 	case CMDQ_CREATE_QP_TYPE_UD:
2181 	case CMDQ_CREATE_QP_TYPE_GSI:
2182 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2183 		break;
2184 	}
2185 
2186 	/* Flush the rest of the RQ */
2187 	start = rq->swq_start;
2188 	cqe = *pcqe;
2189 	while (*budget) {
2190 		last = rq->swq_last;
2191 		if (last == start)
2192 			break;
2193 		memset(cqe, 0, sizeof(*cqe));
2194 		cqe->status =
2195 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2196 		cqe->opcode = opcode;
2197 		cqe->qp_handle = (unsigned long)qp;
2198 		cqe->wr_id = rq->swq[last].wr_id;
2199 		cqe++;
2200 		(*budget)--;
2201 		bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2202 		rq->swq_last = rq->swq[last].next_idx;
2203 	}
2204 	*pcqe = cqe;
2205 	if (!*budget && rq->swq_last != start)
2206 		/* Out of budget */
2207 		rc = -EAGAIN;
2208 
2209 	return rc;
2210 }
2211 
2212 void bnxt_qplib_mark_qp_error(void *qp_handle)
2213 {
2214 	struct bnxt_qplib_qp *qp = qp_handle;
2215 
2216 	if (!qp)
2217 		return;
2218 
2219 	/* Must block new posting of SQ and RQ */
2220 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2221 	bnxt_qplib_cancel_phantom_processing(qp);
2222 }
2223 
2224 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2225  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2226  */
2227 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2228 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2229 {
2230 	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2231 	struct bnxt_qplib_q *sq = &qp->sq;
2232 	struct cq_req *peek_req_hwcqe;
2233 	struct bnxt_qplib_qp *peek_qp;
2234 	struct bnxt_qplib_q *peek_sq;
2235 	struct bnxt_qplib_swq *swq;
2236 	struct cq_base *peek_hwcqe;
2237 	int i, rc = 0;
2238 
2239 	/* Normal mode */
2240 	/* Check for the psn_search marking before completing */
2241 	swq = &sq->swq[swq_last];
2242 	if (swq->psn_search &&
2243 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2244 		/* Unmark */
2245 		swq->psn_search->flags_next_psn = cpu_to_le32
2246 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2247 				     & ~0x80000000);
2248 		dev_dbg(&cq->hwq.pdev->dev,
2249 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2250 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2251 		sq->condition = true;
2252 		sq->send_phantom = true;
2253 
2254 		/* TODO: Only ARM if the previous SQE is ARMALL */
2255 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2256 		rc = -EAGAIN;
2257 		goto out;
2258 	}
2259 	if (sq->condition) {
2260 		/* Peek at the completions */
2261 		peek_raw_cq_cons = cq->hwq.cons;
2262 		peek_sw_cq_cons = cq_cons;
2263 		i = cq->hwq.max_elements;
2264 		while (i--) {
2265 			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2266 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2267 						       peek_sw_cq_cons, NULL);
2268 			/* If the next hwcqe is VALID */
2269 			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2270 					  cq->hwq.max_elements)) {
2271 			/*
2272 			 * The valid test of the entry must be done first before
2273 			 * reading any further.
2274 			 */
2275 				dma_rmb();
2276 				/* If the next hwcqe is a REQ */
2277 				if ((peek_hwcqe->cqe_type_toggle &
2278 				    CQ_BASE_CQE_TYPE_MASK) ==
2279 				    CQ_BASE_CQE_TYPE_REQ) {
2280 					peek_req_hwcqe = (struct cq_req *)
2281 							 peek_hwcqe;
2282 					peek_qp = (struct bnxt_qplib_qp *)
2283 						((unsigned long)
2284 						 le64_to_cpu
2285 						 (peek_req_hwcqe->qp_handle));
2286 					peek_sq = &peek_qp->sq;
2287 					peek_sq_cons_idx =
2288 						((le16_to_cpu(
2289 						  peek_req_hwcqe->sq_cons_idx)
2290 						  - 1) % sq->max_wqe);
2291 					/* If the hwcqe's sq's wr_id matches */
2292 					if (peek_sq == sq &&
2293 					    sq->swq[peek_sq_cons_idx].wr_id ==
2294 					    BNXT_QPLIB_FENCE_WRID) {
2295 						/*
2296 						 *  Unbreak only if the phantom
2297 						 *  comes back
2298 						 */
2299 						dev_dbg(&cq->hwq.pdev->dev,
2300 							"FP: Got Phantom CQE\n");
2301 						sq->condition = false;
2302 						sq->single = true;
2303 						rc = 0;
2304 						goto out;
2305 					}
2306 				}
2307 				/* Valid but not the phantom, so keep looping */
2308 			} else {
2309 				/* Not valid yet, just exit and wait */
2310 				rc = -EINVAL;
2311 				goto out;
2312 			}
2313 			peek_sw_cq_cons++;
2314 			peek_raw_cq_cons++;
2315 		}
2316 		dev_err(&cq->hwq.pdev->dev,
2317 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2318 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2319 		rc = -EINVAL;
2320 	}
2321 out:
2322 	return rc;
2323 }
2324 
2325 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2326 				     struct cq_req *hwcqe,
2327 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2328 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2329 {
2330 	struct bnxt_qplib_swq *swq;
2331 	struct bnxt_qplib_cqe *cqe;
2332 	struct bnxt_qplib_qp *qp;
2333 	struct bnxt_qplib_q *sq;
2334 	u32 cqe_sq_cons;
2335 	int rc = 0;
2336 
2337 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2338 				      le64_to_cpu(hwcqe->qp_handle));
2339 	if (!qp) {
2340 		dev_err(&cq->hwq.pdev->dev,
2341 			"FP: Process Req qp is NULL\n");
2342 		return -EINVAL;
2343 	}
2344 	sq = &qp->sq;
2345 
2346 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2347 	if (qp->sq.flushed) {
2348 		dev_dbg(&cq->hwq.pdev->dev,
2349 			"%s: QP in Flush QP = %p\n", __func__, qp);
2350 		goto done;
2351 	}
2352 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2353 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2354 	 * to the cqe_sq_cons
2355 	 */
2356 	cqe = *pcqe;
2357 	while (*budget) {
2358 		if (sq->swq_last == cqe_sq_cons)
2359 			/* Done */
2360 			break;
2361 
2362 		swq = &sq->swq[sq->swq_last];
2363 		memset(cqe, 0, sizeof(*cqe));
2364 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2365 		cqe->qp_handle = (u64)(unsigned long)qp;
2366 		cqe->src_qp = qp->id;
2367 		cqe->wr_id = swq->wr_id;
2368 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2369 			goto skip;
2370 		cqe->type = swq->type;
2371 
2372 		/* For the last CQE, check for status.  For errors, regardless
2373 		 * of the request being signaled or not, it must complete with
2374 		 * the hwcqe error status
2375 		 */
2376 		if (swq->next_idx == cqe_sq_cons &&
2377 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2378 			cqe->status = hwcqe->status;
2379 			dev_err(&cq->hwq.pdev->dev,
2380 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2381 				sq->swq_last, cqe->wr_id, cqe->status);
2382 			cqe++;
2383 			(*budget)--;
2384 			bnxt_qplib_mark_qp_error(qp);
2385 			/* Add qp to flush list of the CQ */
2386 			bnxt_qplib_add_flush_qp(qp);
2387 		} else {
2388 			/* Before we complete, do WA 9060 */
2389 			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2390 				      cqe_sq_cons)) {
2391 				*lib_qp = qp;
2392 				goto out;
2393 			}
2394 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2395 				cqe->status = CQ_REQ_STATUS_OK;
2396 				cqe++;
2397 				(*budget)--;
2398 			}
2399 		}
2400 skip:
2401 		bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2402 		sq->swq_last = swq->next_idx;
2403 		if (sq->single)
2404 			break;
2405 	}
2406 out:
2407 	*pcqe = cqe;
2408 	if (sq->swq_last != cqe_sq_cons) {
2409 		/* Out of budget */
2410 		rc = -EAGAIN;
2411 		goto done;
2412 	}
2413 	/*
2414 	 * Back to normal completion mode only after it has completed all of
2415 	 * the WC for this CQE
2416 	 */
2417 	sq->single = false;
2418 done:
2419 	return rc;
2420 }
2421 
2422 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2423 {
2424 	spin_lock(&srq->hwq.lock);
2425 	srq->swq[srq->last_idx].next_idx = (int)tag;
2426 	srq->last_idx = (int)tag;
2427 	srq->swq[srq->last_idx].next_idx = -1;
2428 	srq->hwq.cons++; /* Support for SRQE counter */
2429 	spin_unlock(&srq->hwq.lock);
2430 }
2431 
2432 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2433 					struct cq_res_rc *hwcqe,
2434 					struct bnxt_qplib_cqe **pcqe,
2435 					int *budget)
2436 {
2437 	struct bnxt_qplib_srq *srq;
2438 	struct bnxt_qplib_cqe *cqe;
2439 	struct bnxt_qplib_qp *qp;
2440 	struct bnxt_qplib_q *rq;
2441 	u32 wr_id_idx;
2442 	int rc = 0;
2443 
2444 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2445 				      le64_to_cpu(hwcqe->qp_handle));
2446 	if (!qp) {
2447 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2448 		return -EINVAL;
2449 	}
2450 	if (qp->rq.flushed) {
2451 		dev_dbg(&cq->hwq.pdev->dev,
2452 			"%s: QP in Flush QP = %p\n", __func__, qp);
2453 		goto done;
2454 	}
2455 
2456 	cqe = *pcqe;
2457 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2458 	cqe->length = le32_to_cpu(hwcqe->length);
2459 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2460 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2461 	cqe->flags = le16_to_cpu(hwcqe->flags);
2462 	cqe->status = hwcqe->status;
2463 	cqe->qp_handle = (u64)(unsigned long)qp;
2464 
2465 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2466 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2467 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2468 		srq = qp->srq;
2469 		if (!srq)
2470 			return -EINVAL;
2471 		if (wr_id_idx >= srq->hwq.max_elements) {
2472 			dev_err(&cq->hwq.pdev->dev,
2473 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2474 				wr_id_idx, srq->hwq.max_elements);
2475 			return -EINVAL;
2476 		}
2477 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2478 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2479 		cqe++;
2480 		(*budget)--;
2481 		*pcqe = cqe;
2482 	} else {
2483 		struct bnxt_qplib_swq *swq;
2484 
2485 		rq = &qp->rq;
2486 		if (wr_id_idx > (rq->max_wqe - 1)) {
2487 			dev_err(&cq->hwq.pdev->dev,
2488 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2489 				wr_id_idx, rq->max_wqe);
2490 			return -EINVAL;
2491 		}
2492 		if (wr_id_idx != rq->swq_last)
2493 			return -EINVAL;
2494 		swq = &rq->swq[rq->swq_last];
2495 		cqe->wr_id = swq->wr_id;
2496 		cqe++;
2497 		(*budget)--;
2498 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2499 		rq->swq_last = swq->next_idx;
2500 		*pcqe = cqe;
2501 
2502 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2503 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2504 			/* Add qp to flush list of the CQ */
2505 			bnxt_qplib_add_flush_qp(qp);
2506 		}
2507 	}
2508 
2509 done:
2510 	return rc;
2511 }
2512 
2513 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2514 					struct cq_res_ud *hwcqe,
2515 					struct bnxt_qplib_cqe **pcqe,
2516 					int *budget)
2517 {
2518 	struct bnxt_qplib_srq *srq;
2519 	struct bnxt_qplib_cqe *cqe;
2520 	struct bnxt_qplib_qp *qp;
2521 	struct bnxt_qplib_q *rq;
2522 	u32 wr_id_idx;
2523 	int rc = 0;
2524 
2525 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2526 				      le64_to_cpu(hwcqe->qp_handle));
2527 	if (!qp) {
2528 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2529 		return -EINVAL;
2530 	}
2531 	if (qp->rq.flushed) {
2532 		dev_dbg(&cq->hwq.pdev->dev,
2533 			"%s: QP in Flush QP = %p\n", __func__, qp);
2534 		goto done;
2535 	}
2536 	cqe = *pcqe;
2537 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2538 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2539 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2540 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2541 	cqe->flags = le16_to_cpu(hwcqe->flags);
2542 	cqe->status = hwcqe->status;
2543 	cqe->qp_handle = (u64)(unsigned long)qp;
2544 	/*FIXME: Endianness fix needed for smace */
2545 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2546 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2547 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2548 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2549 				  ((le32_to_cpu(
2550 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2551 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2552 
2553 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2554 		srq = qp->srq;
2555 		if (!srq)
2556 			return -EINVAL;
2557 
2558 		if (wr_id_idx >= srq->hwq.max_elements) {
2559 			dev_err(&cq->hwq.pdev->dev,
2560 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2561 				wr_id_idx, srq->hwq.max_elements);
2562 			return -EINVAL;
2563 		}
2564 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2565 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2566 		cqe++;
2567 		(*budget)--;
2568 		*pcqe = cqe;
2569 	} else {
2570 		struct bnxt_qplib_swq *swq;
2571 
2572 		rq = &qp->rq;
2573 		if (wr_id_idx > (rq->max_wqe - 1)) {
2574 			dev_err(&cq->hwq.pdev->dev,
2575 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2576 				wr_id_idx, rq->max_wqe);
2577 			return -EINVAL;
2578 		}
2579 
2580 		if (rq->swq_last != wr_id_idx)
2581 			return -EINVAL;
2582 		swq = &rq->swq[rq->swq_last];
2583 		cqe->wr_id = swq->wr_id;
2584 		cqe++;
2585 		(*budget)--;
2586 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2587 		rq->swq_last = swq->next_idx;
2588 		*pcqe = cqe;
2589 
2590 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2591 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2592 			/* Add qp to flush list of the CQ */
2593 			bnxt_qplib_add_flush_qp(qp);
2594 		}
2595 	}
2596 done:
2597 	return rc;
2598 }
2599 
2600 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2601 {
2602 	struct cq_base *hw_cqe;
2603 	u32 sw_cons, raw_cons;
2604 	bool rc = true;
2605 
2606 	raw_cons = cq->hwq.cons;
2607 	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2608 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2609 	 /* Check for Valid bit. If the CQE is valid, return false */
2610 	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2611 	return rc;
2612 }
2613 
2614 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2615 						struct cq_res_raweth_qp1 *hwcqe,
2616 						struct bnxt_qplib_cqe **pcqe,
2617 						int *budget)
2618 {
2619 	struct bnxt_qplib_qp *qp;
2620 	struct bnxt_qplib_q *rq;
2621 	struct bnxt_qplib_srq *srq;
2622 	struct bnxt_qplib_cqe *cqe;
2623 	u32 wr_id_idx;
2624 	int rc = 0;
2625 
2626 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2627 				      le64_to_cpu(hwcqe->qp_handle));
2628 	if (!qp) {
2629 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2630 		return -EINVAL;
2631 	}
2632 	if (qp->rq.flushed) {
2633 		dev_dbg(&cq->hwq.pdev->dev,
2634 			"%s: QP in Flush QP = %p\n", __func__, qp);
2635 		goto done;
2636 	}
2637 	cqe = *pcqe;
2638 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2639 	cqe->flags = le16_to_cpu(hwcqe->flags);
2640 	cqe->qp_handle = (u64)(unsigned long)qp;
2641 
2642 	wr_id_idx =
2643 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2644 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2645 	cqe->src_qp = qp->id;
2646 	if (qp->id == 1 && !cqe->length) {
2647 		/* Add workaround for the length misdetection */
2648 		cqe->length = 296;
2649 	} else {
2650 		cqe->length = le16_to_cpu(hwcqe->length);
2651 	}
2652 	cqe->pkey_index = qp->pkey_index;
2653 	memcpy(cqe->smac, qp->smac, 6);
2654 
2655 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2656 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2657 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2658 
2659 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2660 		srq = qp->srq;
2661 		if (!srq) {
2662 			dev_err(&cq->hwq.pdev->dev,
2663 				"FP: SRQ used but not defined??\n");
2664 			return -EINVAL;
2665 		}
2666 		if (wr_id_idx >= srq->hwq.max_elements) {
2667 			dev_err(&cq->hwq.pdev->dev,
2668 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2669 				wr_id_idx, srq->hwq.max_elements);
2670 			return -EINVAL;
2671 		}
2672 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2673 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2674 		cqe++;
2675 		(*budget)--;
2676 		*pcqe = cqe;
2677 	} else {
2678 		struct bnxt_qplib_swq *swq;
2679 
2680 		rq = &qp->rq;
2681 		if (wr_id_idx > (rq->max_wqe - 1)) {
2682 			dev_err(&cq->hwq.pdev->dev,
2683 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2684 				wr_id_idx, rq->max_wqe);
2685 			return -EINVAL;
2686 		}
2687 		if (rq->swq_last != wr_id_idx)
2688 			return -EINVAL;
2689 		swq = &rq->swq[rq->swq_last];
2690 		cqe->wr_id = swq->wr_id;
2691 		cqe++;
2692 		(*budget)--;
2693 		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2694 		rq->swq_last = swq->next_idx;
2695 		*pcqe = cqe;
2696 
2697 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2698 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2699 			/* Add qp to flush list of the CQ */
2700 			bnxt_qplib_add_flush_qp(qp);
2701 		}
2702 	}
2703 
2704 done:
2705 	return rc;
2706 }
2707 
2708 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2709 					  struct cq_terminal *hwcqe,
2710 					  struct bnxt_qplib_cqe **pcqe,
2711 					  int *budget)
2712 {
2713 	struct bnxt_qplib_qp *qp;
2714 	struct bnxt_qplib_q *sq, *rq;
2715 	struct bnxt_qplib_cqe *cqe;
2716 	u32 swq_last = 0, cqe_cons;
2717 	int rc = 0;
2718 
2719 	/* Check the Status */
2720 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2721 		dev_warn(&cq->hwq.pdev->dev,
2722 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2723 			 hwcqe->status);
2724 
2725 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2726 				      le64_to_cpu(hwcqe->qp_handle));
2727 	if (!qp) {
2728 		dev_err(&cq->hwq.pdev->dev,
2729 			"FP: CQ Process terminal qp is NULL\n");
2730 		return -EINVAL;
2731 	}
2732 
2733 	/* Must block new posting of SQ and RQ */
2734 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2735 
2736 	sq = &qp->sq;
2737 	rq = &qp->rq;
2738 
2739 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2740 	if (cqe_cons == 0xFFFF)
2741 		goto do_rq;
2742 	cqe_cons %= sq->max_wqe;
2743 
2744 	if (qp->sq.flushed) {
2745 		dev_dbg(&cq->hwq.pdev->dev,
2746 			"%s: QP in Flush QP = %p\n", __func__, qp);
2747 		goto sq_done;
2748 	}
2749 
2750 	/* Terminal CQE can also include aggregated successful CQEs prior.
2751 	 * So we must complete all CQEs from the current sq's cons to the
2752 	 * cq_cons with status OK
2753 	 */
2754 	cqe = *pcqe;
2755 	while (*budget) {
2756 		swq_last = sq->swq_last;
2757 		if (swq_last == cqe_cons)
2758 			break;
2759 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2760 			memset(cqe, 0, sizeof(*cqe));
2761 			cqe->status = CQ_REQ_STATUS_OK;
2762 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2763 			cqe->qp_handle = (u64)(unsigned long)qp;
2764 			cqe->src_qp = qp->id;
2765 			cqe->wr_id = sq->swq[swq_last].wr_id;
2766 			cqe->type = sq->swq[swq_last].type;
2767 			cqe++;
2768 			(*budget)--;
2769 		}
2770 		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2771 		sq->swq_last = sq->swq[swq_last].next_idx;
2772 	}
2773 	*pcqe = cqe;
2774 	if (!(*budget) && swq_last != cqe_cons) {
2775 		/* Out of budget */
2776 		rc = -EAGAIN;
2777 		goto sq_done;
2778 	}
2779 sq_done:
2780 	if (rc)
2781 		return rc;
2782 do_rq:
2783 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2784 	if (cqe_cons == 0xFFFF) {
2785 		goto done;
2786 	} else if (cqe_cons > rq->max_wqe - 1) {
2787 		dev_err(&cq->hwq.pdev->dev,
2788 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2789 			cqe_cons, rq->max_wqe);
2790 		rc = -EINVAL;
2791 		goto done;
2792 	}
2793 
2794 	if (qp->rq.flushed) {
2795 		dev_dbg(&cq->hwq.pdev->dev,
2796 			"%s: QP in Flush QP = %p\n", __func__, qp);
2797 		rc = 0;
2798 		goto done;
2799 	}
2800 
2801 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2802 	 * from the current rq->cons to the rq->prod regardless what the
2803 	 * rq->cons the terminal CQE indicates
2804 	 */
2805 
2806 	/* Add qp to flush list of the CQ */
2807 	bnxt_qplib_add_flush_qp(qp);
2808 done:
2809 	return rc;
2810 }
2811 
2812 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2813 					struct cq_cutoff *hwcqe)
2814 {
2815 	/* Check the Status */
2816 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2817 		dev_err(&cq->hwq.pdev->dev,
2818 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2819 			hwcqe->status);
2820 		return -EINVAL;
2821 	}
2822 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2823 	wake_up_interruptible(&cq->waitq);
2824 
2825 	return 0;
2826 }
2827 
2828 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2829 				  struct bnxt_qplib_cqe *cqe,
2830 				  int num_cqes)
2831 {
2832 	struct bnxt_qplib_qp *qp = NULL;
2833 	u32 budget = num_cqes;
2834 	unsigned long flags;
2835 
2836 	spin_lock_irqsave(&cq->flush_lock, flags);
2837 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2838 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2839 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2840 	}
2841 
2842 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2843 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2844 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2845 	}
2846 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2847 
2848 	return num_cqes - budget;
2849 }
2850 
2851 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2852 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2853 {
2854 	struct cq_base *hw_cqe;
2855 	u32 sw_cons, raw_cons;
2856 	int budget, rc = 0;
2857 	u8 type;
2858 
2859 	raw_cons = cq->hwq.cons;
2860 	budget = num_cqes;
2861 
2862 	while (budget) {
2863 		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2864 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2865 
2866 		/* Check for Valid bit */
2867 		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2868 			break;
2869 
2870 		/*
2871 		 * The valid test of the entry must be done first before
2872 		 * reading any further.
2873 		 */
2874 		dma_rmb();
2875 		/* From the device's respective CQE format to qplib_wc*/
2876 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2877 		switch (type) {
2878 		case CQ_BASE_CQE_TYPE_REQ:
2879 			rc = bnxt_qplib_cq_process_req(cq,
2880 						       (struct cq_req *)hw_cqe,
2881 						       &cqe, &budget,
2882 						       sw_cons, lib_qp);
2883 			break;
2884 		case CQ_BASE_CQE_TYPE_RES_RC:
2885 			rc = bnxt_qplib_cq_process_res_rc(cq,
2886 							  (struct cq_res_rc *)
2887 							  hw_cqe, &cqe,
2888 							  &budget);
2889 			break;
2890 		case CQ_BASE_CQE_TYPE_RES_UD:
2891 			rc = bnxt_qplib_cq_process_res_ud
2892 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2893 					 &budget);
2894 			break;
2895 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2896 			rc = bnxt_qplib_cq_process_res_raweth_qp1
2897 					(cq, (struct cq_res_raweth_qp1 *)
2898 					 hw_cqe, &cqe, &budget);
2899 			break;
2900 		case CQ_BASE_CQE_TYPE_TERMINAL:
2901 			rc = bnxt_qplib_cq_process_terminal
2902 					(cq, (struct cq_terminal *)hw_cqe,
2903 					 &cqe, &budget);
2904 			break;
2905 		case CQ_BASE_CQE_TYPE_CUT_OFF:
2906 			bnxt_qplib_cq_process_cutoff
2907 					(cq, (struct cq_cutoff *)hw_cqe);
2908 			/* Done processing this CQ */
2909 			goto exit;
2910 		default:
2911 			dev_err(&cq->hwq.pdev->dev,
2912 				"process_cq unknown type 0x%lx\n",
2913 				hw_cqe->cqe_type_toggle &
2914 				CQ_BASE_CQE_TYPE_MASK);
2915 			rc = -EINVAL;
2916 			break;
2917 		}
2918 		if (rc < 0) {
2919 			if (rc == -EAGAIN)
2920 				break;
2921 			/* Error while processing the CQE, just skip to the
2922 			 * next one
2923 			 */
2924 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
2925 				dev_err(&cq->hwq.pdev->dev,
2926 					"process_cqe error rc = 0x%x\n", rc);
2927 		}
2928 		raw_cons++;
2929 	}
2930 	if (cq->hwq.cons != raw_cons) {
2931 		cq->hwq.cons = raw_cons;
2932 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
2933 	}
2934 exit:
2935 	return num_cqes - budget;
2936 }
2937 
2938 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2939 {
2940 	if (arm_type)
2941 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
2942 	/* Using cq->arm_state variable to track whether to issue cq handler */
2943 	atomic_set(&cq->arm_state, 1);
2944 }
2945 
2946 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2947 {
2948 	flush_workqueue(qp->scq->nq->cqn_wq);
2949 	if (qp->scq != qp->rcq)
2950 		flush_workqueue(qp->rcq->nq->cqn_wq);
2951 }
2952