xref: /linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c (revision f728c17fc97aea7a33151d9ba64106291c62bb02)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50 
51 #include "roce_hsi.h"
52 
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59 
60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 	qp->sq.condition = false;
63 	qp->sq.send_phantom = false;
64 	qp->sq.single = false;
65 }
66 
67 /* Flush list */
68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 	struct bnxt_qplib_cq *scq, *rcq;
71 
72 	scq = qp->scq;
73 	rcq = qp->rcq;
74 
75 	if (!qp->sq.flushed) {
76 		dev_dbg(&scq->hwq.pdev->dev,
77 			"FP: Adding to SQ Flush list = %p\n", qp);
78 		bnxt_qplib_cancel_phantom_processing(qp);
79 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 		qp->sq.flushed = true;
81 	}
82 	if (!qp->srq) {
83 		if (!qp->rq.flushed) {
84 			dev_dbg(&rcq->hwq.pdev->dev,
85 				"FP: Adding to RQ Flush list = %p\n", qp);
86 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 			qp->rq.flushed = true;
88 		}
89 	}
90 }
91 
92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 				       unsigned long *flags)
94 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 	if (qp->scq == qp->rcq)
98 		__acquire(&qp->rcq->flush_lock);
99 	else
100 		spin_lock(&qp->rcq->flush_lock);
101 }
102 
103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 				       unsigned long *flags)
105 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 	if (qp->scq == qp->rcq)
108 		__release(&qp->rcq->flush_lock);
109 	else
110 		spin_unlock(&qp->rcq->flush_lock);
111 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113 
114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 	unsigned long flags;
117 
118 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 	__bnxt_qplib_add_flush_qp(qp);
120 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122 
123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 	if (qp->sq.flushed) {
126 		qp->sq.flushed = false;
127 		list_del(&qp->sq_flush);
128 	}
129 	if (!qp->srq) {
130 		if (qp->rq.flushed) {
131 			qp->rq.flushed = false;
132 			list_del(&qp->rq_flush);
133 		}
134 	}
135 }
136 
137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 	unsigned long flags;
140 
141 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143 	qp->sq.hwq.prod = 0;
144 	qp->sq.hwq.cons = 0;
145 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 	qp->rq.hwq.prod = 0;
147 	qp->rq.hwq.cons = 0;
148 
149 	__bnxt_qplib_del_flush_qp(qp);
150 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152 
153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 	struct bnxt_qplib_nq_work *nq_work =
156 			container_of(work, struct bnxt_qplib_nq_work, work);
157 
158 	struct bnxt_qplib_cq *cq = nq_work->cq;
159 	struct bnxt_qplib_nq *nq = nq_work->nq;
160 
161 	if (cq && nq) {
162 		spin_lock_bh(&cq->compl_lock);
163 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 			dev_dbg(&nq->pdev->dev,
165 				"%s:Trigger cq  = %p event nq = %p\n",
166 				__func__, cq, nq);
167 			nq->cqn_handler(nq, cq);
168 		}
169 		spin_unlock_bh(&cq->compl_lock);
170 	}
171 	kfree(nq_work);
172 }
173 
174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 				       struct bnxt_qplib_qp *qp)
176 {
177 	struct bnxt_qplib_q *rq = &qp->rq;
178 	struct bnxt_qplib_q *sq = &qp->sq;
179 
180 	if (qp->rq_hdr_buf)
181 		dma_free_coherent(&res->pdev->dev,
182 				  rq->max_wqe * qp->rq_hdr_buf_size,
183 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 	if (qp->sq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  sq->max_wqe * qp->sq_hdr_buf_size,
187 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 	qp->rq_hdr_buf = NULL;
189 	qp->sq_hdr_buf = NULL;
190 	qp->rq_hdr_buf_map = 0;
191 	qp->sq_hdr_buf_map = 0;
192 	qp->sq_hdr_buf_size = 0;
193 	qp->rq_hdr_buf_size = 0;
194 }
195 
196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 				       struct bnxt_qplib_qp *qp)
198 {
199 	struct bnxt_qplib_q *rq = &qp->rq;
200 	struct bnxt_qplib_q *sq = &qp->sq;
201 	int rc = 0;
202 
203 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 					sq->max_wqe * qp->sq_hdr_buf_size,
206 					&qp->sq_hdr_buf_map, GFP_KERNEL);
207 		if (!qp->sq_hdr_buf) {
208 			rc = -ENOMEM;
209 			dev_err(&res->pdev->dev,
210 				"Failed to create sq_hdr_buf\n");
211 			goto fail;
212 		}
213 	}
214 
215 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 						    rq->max_wqe *
218 						    qp->rq_hdr_buf_size,
219 						    &qp->rq_hdr_buf_map,
220 						    GFP_KERNEL);
221 		if (!qp->rq_hdr_buf) {
222 			rc = -ENOMEM;
223 			dev_err(&res->pdev->dev,
224 				"Failed to create rq_hdr_buf\n");
225 			goto fail;
226 		}
227 	}
228 	return 0;
229 
230 fail:
231 	bnxt_qplib_free_qp_hdr_buf(res, qp);
232 	return rc;
233 }
234 
235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 	struct nq_base *nqe, **nq_ptr;
239 	int budget = nq->budget;
240 	uintptr_t q_handle;
241 	u16 type;
242 
243 	spin_lock_bh(&hwq->lock);
244 	/* Service the NQ until empty */
245 	while (budget--) {
246 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247 		nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248 		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
249 			break;
250 
251 		/*
252 		 * The valid test of the entry must be done first before
253 		 * reading any further.
254 		 */
255 		dma_rmb();
256 
257 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
258 		switch (type) {
259 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
260 		{
261 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
262 
263 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
264 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
265 						     << 32;
266 			if ((unsigned long)cq == q_handle) {
267 				nqcne->cq_handle_low = 0;
268 				nqcne->cq_handle_high = 0;
269 				cq->cnq_events++;
270 			}
271 			break;
272 		}
273 		default:
274 			break;
275 		}
276 		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277 					 1, &nq->nq_db.dbinfo.flags);
278 	}
279 	spin_unlock_bh(&hwq->lock);
280 }
281 
282 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
283  * this CQ.
284  */
285 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
286 {
287 	u32 retry_cnt = 100;
288 
289 	while (retry_cnt--) {
290 		if (cnq_events == cq->cnq_events)
291 			return;
292 		usleep_range(50, 100);
293 		clean_nq(cq->nq, cq);
294 	}
295 }
296 
297 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
298 {
299 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
301 	struct bnxt_qplib_cq *cq;
302 	int budget = nq->budget;
303 	struct nq_base *nqe;
304 	uintptr_t q_handle;
305 	u32 hw_polled = 0;
306 	u16 type;
307 
308 	spin_lock_bh(&hwq->lock);
309 	/* Service the NQ until empty */
310 	while (budget--) {
311 		nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312 		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
313 			break;
314 
315 		/*
316 		 * The valid test of the entry must be done first before
317 		 * reading any further.
318 		 */
319 		dma_rmb();
320 
321 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
322 		switch (type) {
323 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
324 		{
325 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
326 
327 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
328 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
329 						     << 32;
330 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
331 			if (!cq)
332 				break;
333 			cq->toggle = (le16_to_cpu(nqe->info10_type) &
334 					NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
335 			cq->dbinfo.toggle = cq->toggle;
336 			bnxt_qplib_armen_db(&cq->dbinfo,
337 					    DBC_DBC_TYPE_CQ_ARMENA);
338 			spin_lock_bh(&cq->compl_lock);
339 			atomic_set(&cq->arm_state, 0);
340 			if (nq->cqn_handler(nq, (cq)))
341 				dev_warn(&nq->pdev->dev,
342 					 "cqn - type 0x%x not handled\n", type);
343 			cq->cnq_events++;
344 			spin_unlock_bh(&cq->compl_lock);
345 			break;
346 		}
347 		case NQ_BASE_TYPE_SRQ_EVENT:
348 		{
349 			struct bnxt_qplib_srq *srq;
350 			struct nq_srq_event *nqsrqe =
351 						(struct nq_srq_event *)nqe;
352 
353 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
354 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
355 				     << 32;
356 			srq = (struct bnxt_qplib_srq *)q_handle;
357 			bnxt_qplib_armen_db(&srq->dbinfo,
358 					    DBC_DBC_TYPE_SRQ_ARMENA);
359 			if (nq->srqn_handler(nq,
360 					     (struct bnxt_qplib_srq *)q_handle,
361 					     nqsrqe->event))
362 				dev_warn(&nq->pdev->dev,
363 					 "SRQ event 0x%x not handled\n",
364 					 nqsrqe->event);
365 			break;
366 		}
367 		case NQ_BASE_TYPE_DBQ_EVENT:
368 			break;
369 		default:
370 			dev_warn(&nq->pdev->dev,
371 				 "nqe with type = 0x%x not handled\n", type);
372 			break;
373 		}
374 		hw_polled++;
375 		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
376 					 1, &nq->nq_db.dbinfo.flags);
377 	}
378 	if (hw_polled)
379 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 	spin_unlock_bh(&hwq->lock);
381 }
382 
383 /* bnxt_re_synchronize_nq - self polling notification queue.
384  * @nq      -     notification queue pointer
385  *
386  * This function will start polling entries of a given notification queue
387  * for all pending  entries.
388  * This function is useful to synchronize notification entries while resources
389  * are going away.
390  */
391 
392 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
393 {
394 	int budget = nq->budget;
395 
396 	nq->budget = nq->hwq.max_elements;
397 	bnxt_qplib_service_nq(&nq->nq_tasklet);
398 	nq->budget = budget;
399 }
400 
401 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
402 {
403 	struct bnxt_qplib_nq *nq = dev_instance;
404 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
405 	u32 sw_cons;
406 
407 	/* Prefetch the NQ element */
408 	sw_cons = HWQ_CMP(hwq->cons, hwq);
409 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
410 
411 	/* Fan out to CPU affinitized kthreads? */
412 	tasklet_schedule(&nq->nq_tasklet);
413 
414 	return IRQ_HANDLED;
415 }
416 
417 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
418 {
419 	if (!nq->requested)
420 		return;
421 
422 	nq->requested = false;
423 	/* Mask h/w interrupt */
424 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
425 	/* Sync with last running IRQ handler */
426 	synchronize_irq(nq->msix_vec);
427 	irq_set_affinity_hint(nq->msix_vec, NULL);
428 	free_irq(nq->msix_vec, nq);
429 	kfree(nq->name);
430 	nq->name = NULL;
431 
432 	if (kill)
433 		tasklet_kill(&nq->nq_tasklet);
434 	tasklet_disable(&nq->nq_tasklet);
435 }
436 
437 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
438 {
439 	if (nq->cqn_wq) {
440 		destroy_workqueue(nq->cqn_wq);
441 		nq->cqn_wq = NULL;
442 	}
443 
444 	/* Make sure the HW is stopped! */
445 	bnxt_qplib_nq_stop_irq(nq, true);
446 
447 	if (nq->nq_db.reg.bar_reg) {
448 		iounmap(nq->nq_db.reg.bar_reg);
449 		nq->nq_db.reg.bar_reg = NULL;
450 	}
451 
452 	nq->cqn_handler = NULL;
453 	nq->srqn_handler = NULL;
454 	nq->msix_vec = 0;
455 }
456 
457 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
458 			    int msix_vector, bool need_init)
459 {
460 	struct bnxt_qplib_res *res = nq->res;
461 	int rc;
462 
463 	if (nq->requested)
464 		return -EFAULT;
465 
466 	nq->msix_vec = msix_vector;
467 	if (need_init)
468 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
469 	else
470 		tasklet_enable(&nq->nq_tasklet);
471 
472 	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
473 			     nq_indx, pci_name(res->pdev));
474 	if (!nq->name)
475 		return -ENOMEM;
476 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
477 	if (rc) {
478 		kfree(nq->name);
479 		nq->name = NULL;
480 		tasklet_disable(&nq->nq_tasklet);
481 		return rc;
482 	}
483 
484 	cpumask_clear(&nq->mask);
485 	cpumask_set_cpu(nq_indx, &nq->mask);
486 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
487 	if (rc) {
488 		dev_warn(&nq->pdev->dev,
489 			 "set affinity failed; vector: %d nq_idx: %d\n",
490 			 nq->msix_vec, nq_indx);
491 	}
492 	nq->requested = true;
493 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
494 
495 	return rc;
496 }
497 
498 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
499 {
500 	resource_size_t reg_base;
501 	struct bnxt_qplib_nq_db *nq_db;
502 	struct pci_dev *pdev;
503 
504 	pdev = nq->pdev;
505 	nq_db = &nq->nq_db;
506 
507 	nq_db->dbinfo.flags = 0;
508 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510 	if (!nq_db->reg.bar_base) {
511 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
512 			nq_db->reg.bar_id);
513 		return -ENOMEM;
514 	}
515 
516 	reg_base = nq_db->reg.bar_base + reg_offt;
517 	/* Unconditionally map 8 bytes to support 57500 series */
518 	nq_db->reg.len = 8;
519 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520 	if (!nq_db->reg.bar_reg) {
521 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
522 			nq_db->reg.bar_id);
523 		return -ENOMEM;
524 	}
525 
526 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
527 	nq_db->dbinfo.hwq = &nq->hwq;
528 	nq_db->dbinfo.xid = nq->ring_id;
529 
530 	return 0;
531 }
532 
533 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534 			 int nq_idx, int msix_vector, int bar_reg_offset,
535 			 cqn_handler_t cqn_handler,
536 			 srqn_handler_t srqn_handler)
537 {
538 	int rc;
539 
540 	nq->pdev = pdev;
541 	nq->cqn_handler = cqn_handler;
542 	nq->srqn_handler = srqn_handler;
543 
544 	/* Have a task to schedule CQ notifiers in post send case */
545 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
546 	if (!nq->cqn_wq)
547 		return -ENOMEM;
548 
549 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
550 	if (rc)
551 		goto fail;
552 
553 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
554 	if (rc) {
555 		dev_err(&nq->pdev->dev,
556 			"Failed to request irq for nq-idx %d\n", nq_idx);
557 		goto fail;
558 	}
559 
560 	return 0;
561 fail:
562 	bnxt_qplib_disable_nq(nq);
563 	return rc;
564 }
565 
566 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
567 {
568 	if (nq->hwq.max_elements) {
569 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570 		nq->hwq.max_elements = 0;
571 	}
572 }
573 
574 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
575 {
576 	struct bnxt_qplib_hwq_attr hwq_attr = {};
577 	struct bnxt_qplib_sg_info sginfo = {};
578 
579 	nq->pdev = res->pdev;
580 	nq->res = res;
581 	if (!nq->hwq.max_elements ||
582 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
584 
585 	sginfo.pgsize = PAGE_SIZE;
586 	sginfo.pgshft = PAGE_SHIFT;
587 	hwq_attr.res = res;
588 	hwq_attr.sginfo = &sginfo;
589 	hwq_attr.depth = nq->hwq.max_elements;
590 	hwq_attr.stride = sizeof(struct nq_base);
591 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
594 		return -ENOMEM;
595 	}
596 	nq->budget = 8;
597 	return 0;
598 }
599 
600 /* SRQ */
601 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602 			   struct bnxt_qplib_srq *srq)
603 {
604 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 	struct creq_destroy_srq_resp resp = {};
606 	struct bnxt_qplib_cmdqmsg msg = {};
607 	struct cmdq_destroy_srq req = {};
608 	int rc;
609 
610 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611 				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
612 				 sizeof(req));
613 
614 	/* Configure the request */
615 	req.srq_cid = cpu_to_le32(srq->id);
616 
617 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
619 	kfree(srq->swq);
620 	if (rc)
621 		return;
622 	bnxt_qplib_free_hwq(res, &srq->hwq);
623 }
624 
625 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626 			  struct bnxt_qplib_srq *srq)
627 {
628 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629 	struct bnxt_qplib_hwq_attr hwq_attr = {};
630 	struct creq_create_srq_resp resp = {};
631 	struct bnxt_qplib_cmdqmsg msg = {};
632 	struct cmdq_create_srq req = {};
633 	struct bnxt_qplib_pbl *pbl;
634 	u16 pg_sz_lvl;
635 	int rc, idx;
636 
637 	hwq_attr.res = res;
638 	hwq_attr.sginfo = &srq->sg_info;
639 	hwq_attr.depth = srq->max_wqe;
640 	hwq_attr.stride = srq->wqe_size;
641 	hwq_attr.type = HWQ_TYPE_QUEUE;
642 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
643 	if (rc)
644 		return rc;
645 
646 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
647 			   GFP_KERNEL);
648 	if (!srq->swq) {
649 		rc = -ENOMEM;
650 		goto fail;
651 	}
652 	srq->dbinfo.flags = 0;
653 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654 				 CMDQ_BASE_OPCODE_CREATE_SRQ,
655 				 sizeof(req));
656 
657 	/* Configure the request */
658 	req.dpi = cpu_to_le32(srq->dpi->dpi);
659 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
660 
661 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662 	pbl = &srq->hwq.pbl[PBL_LVL_0];
663 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666 		      CMDQ_CREATE_SRQ_LVL_SFT;
667 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669 	req.pd_id = cpu_to_le32(srq->pd->id);
670 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
671 
672 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
674 	if (rc)
675 		goto fail;
676 
677 	spin_lock_init(&srq->lock);
678 	srq->start_idx = 0;
679 	srq->last_idx = srq->hwq.max_elements - 1;
680 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
681 		srq->swq[idx].next_idx = idx + 1;
682 	srq->swq[srq->last_idx].next_idx = -1;
683 
684 	srq->id = le32_to_cpu(resp.xid);
685 	srq->dbinfo.hwq = &srq->hwq;
686 	srq->dbinfo.xid = srq->id;
687 	srq->dbinfo.db = srq->dpi->dbr;
688 	srq->dbinfo.max_slot = 1;
689 	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
690 	if (srq->threshold)
691 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692 	srq->arm_req = false;
693 
694 	return 0;
695 fail:
696 	bnxt_qplib_free_hwq(res, &srq->hwq);
697 	kfree(srq->swq);
698 
699 	return rc;
700 }
701 
702 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703 			  struct bnxt_qplib_srq *srq)
704 {
705 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
706 	u32 count;
707 
708 	count = __bnxt_qplib_get_avail(srq_hwq);
709 	if (count > srq->threshold) {
710 		srq->arm_req = false;
711 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
712 	} else {
713 		/* Deferred arming */
714 		srq->arm_req = true;
715 	}
716 
717 	return 0;
718 }
719 
720 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
721 			 struct bnxt_qplib_srq *srq)
722 {
723 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
724 	struct creq_query_srq_resp resp = {};
725 	struct bnxt_qplib_cmdqmsg msg = {};
726 	struct bnxt_qplib_rcfw_sbuf sbuf;
727 	struct creq_query_srq_resp_sb *sb;
728 	struct cmdq_query_srq req = {};
729 	int rc;
730 
731 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
732 				 CMDQ_BASE_OPCODE_QUERY_SRQ,
733 				 sizeof(req));
734 
735 	/* Configure the request */
736 	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
737 	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
738 				     &sbuf.dma_addr, GFP_KERNEL);
739 	if (!sbuf.sb)
740 		return -ENOMEM;
741 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
742 	req.srq_cid = cpu_to_le32(srq->id);
743 	sb = sbuf.sb;
744 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
745 				sizeof(resp), 0);
746 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
747 	srq->threshold = le16_to_cpu(sb->srq_limit);
748 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
749 			  sbuf.sb, sbuf.dma_addr);
750 
751 	return rc;
752 }
753 
754 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
755 			     struct bnxt_qplib_swqe *wqe)
756 {
757 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
758 	struct rq_wqe *srqe;
759 	struct sq_sge *hw_sge;
760 	u32 count = 0;
761 	int i, next;
762 
763 	spin_lock(&srq_hwq->lock);
764 	if (srq->start_idx == srq->last_idx) {
765 		dev_err(&srq_hwq->pdev->dev,
766 			"FP: SRQ (0x%x) is full!\n", srq->id);
767 		spin_unlock(&srq_hwq->lock);
768 		return -EINVAL;
769 	}
770 	next = srq->start_idx;
771 	srq->start_idx = srq->swq[next].next_idx;
772 	spin_unlock(&srq_hwq->lock);
773 
774 	srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
775 	memset(srqe, 0, srq->wqe_size);
776 	/* Calculate wqe_size16 and data_len */
777 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
778 	     i < wqe->num_sge; i++, hw_sge++) {
779 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
780 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
781 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
782 	}
783 	srqe->wqe_type = wqe->type;
784 	srqe->flags = wqe->flags;
785 	srqe->wqe_size = wqe->num_sge +
786 			((offsetof(typeof(*srqe), data) + 15) >> 4);
787 	srqe->wr_id[0] = cpu_to_le32((u32)next);
788 	srq->swq[next].wr_id = wqe->wr_id;
789 
790 	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
791 
792 	spin_lock(&srq_hwq->lock);
793 	count = __bnxt_qplib_get_avail(srq_hwq);
794 	spin_unlock(&srq_hwq->lock);
795 	/* Ring DB */
796 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
797 	if (srq->arm_req == true && count > srq->threshold) {
798 		srq->arm_req = false;
799 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
800 	}
801 
802 	return 0;
803 }
804 
805 /* QP */
806 
807 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
808 {
809 	int indx;
810 
811 	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
812 	if (!que->swq)
813 		return -ENOMEM;
814 
815 	que->swq_start = 0;
816 	que->swq_last = que->max_wqe - 1;
817 	for (indx = 0; indx < que->max_wqe; indx++)
818 		que->swq[indx].next_idx = indx + 1;
819 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
820 	que->swq_last = 0;
821 
822 	return 0;
823 }
824 
825 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
826 {
827 	struct bnxt_qplib_hwq_attr hwq_attr = {};
828 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
829 	struct creq_create_qp1_resp resp = {};
830 	struct bnxt_qplib_cmdqmsg msg = {};
831 	struct bnxt_qplib_q *sq = &qp->sq;
832 	struct bnxt_qplib_q *rq = &qp->rq;
833 	struct cmdq_create_qp1 req = {};
834 	struct bnxt_qplib_pbl *pbl;
835 	u32 qp_flags = 0;
836 	u8 pg_sz_lvl;
837 	u32 tbl_indx;
838 	int rc;
839 
840 	sq->dbinfo.flags = 0;
841 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
842 				 CMDQ_BASE_OPCODE_CREATE_QP1,
843 				 sizeof(req));
844 	/* General */
845 	req.type = qp->type;
846 	req.dpi = cpu_to_le32(qp->dpi->dpi);
847 	req.qp_handle = cpu_to_le64(qp->qp_handle);
848 
849 	/* SQ */
850 	hwq_attr.res = res;
851 	hwq_attr.sginfo = &sq->sg_info;
852 	hwq_attr.stride = sizeof(struct sq_sge);
853 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
854 	hwq_attr.type = HWQ_TYPE_QUEUE;
855 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
856 	if (rc)
857 		return rc;
858 
859 	rc = bnxt_qplib_alloc_init_swq(sq);
860 	if (rc)
861 		goto fail_sq;
862 
863 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
864 	pbl = &sq->hwq.pbl[PBL_LVL_0];
865 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
866 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
867 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
868 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
869 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
870 	req.sq_fwo_sq_sge =
871 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
872 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
873 	req.scq_cid = cpu_to_le32(qp->scq->id);
874 
875 	/* RQ */
876 	if (rq->max_wqe) {
877 		rq->dbinfo.flags = 0;
878 		hwq_attr.res = res;
879 		hwq_attr.sginfo = &rq->sg_info;
880 		hwq_attr.stride = sizeof(struct sq_sge);
881 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
882 		hwq_attr.type = HWQ_TYPE_QUEUE;
883 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
884 		if (rc)
885 			goto sq_swq;
886 		rc = bnxt_qplib_alloc_init_swq(rq);
887 		if (rc)
888 			goto fail_rq;
889 		req.rq_size = cpu_to_le32(rq->max_wqe);
890 		pbl = &rq->hwq.pbl[PBL_LVL_0];
891 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
892 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
893 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
894 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
895 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
896 		req.rq_fwo_rq_sge =
897 			cpu_to_le16((rq->max_sge &
898 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
899 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
900 	}
901 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
902 	/* Header buffer - allow hdr_buf pass in */
903 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
904 	if (rc) {
905 		rc = -ENOMEM;
906 		goto rq_rwq;
907 	}
908 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
909 	req.qp_flags = cpu_to_le32(qp_flags);
910 	req.pd_id = cpu_to_le32(qp->pd->id);
911 
912 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
913 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
914 	if (rc)
915 		goto fail;
916 
917 	qp->id = le32_to_cpu(resp.xid);
918 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
919 	qp->cctx = res->cctx;
920 	sq->dbinfo.hwq = &sq->hwq;
921 	sq->dbinfo.xid = qp->id;
922 	sq->dbinfo.db = qp->dpi->dbr;
923 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
924 	if (rq->max_wqe) {
925 		rq->dbinfo.hwq = &rq->hwq;
926 		rq->dbinfo.xid = qp->id;
927 		rq->dbinfo.db = qp->dpi->dbr;
928 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
929 	}
930 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
931 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
932 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
933 
934 	return 0;
935 
936 fail:
937 	bnxt_qplib_free_qp_hdr_buf(res, qp);
938 rq_rwq:
939 	kfree(rq->swq);
940 fail_rq:
941 	bnxt_qplib_free_hwq(res, &rq->hwq);
942 sq_swq:
943 	kfree(sq->swq);
944 fail_sq:
945 	bnxt_qplib_free_hwq(res, &sq->hwq);
946 	return rc;
947 }
948 
949 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
950 {
951 	struct bnxt_qplib_hwq *hwq;
952 	struct bnxt_qplib_q *sq;
953 	u64 fpsne, psn_pg;
954 	u16 indx_pad = 0;
955 
956 	sq = &qp->sq;
957 	hwq = &sq->hwq;
958 	/* First psn entry */
959 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
960 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
961 		indx_pad = (fpsne & ~PAGE_MASK) / size;
962 	hwq->pad_pgofft = indx_pad;
963 	hwq->pad_pg = (u64 *)psn_pg;
964 	hwq->pad_stride = size;
965 }
966 
967 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
968 {
969 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
970 	struct bnxt_qplib_hwq_attr hwq_attr = {};
971 	struct bnxt_qplib_sg_info sginfo = {};
972 	struct creq_create_qp_resp resp = {};
973 	struct bnxt_qplib_cmdqmsg msg = {};
974 	struct bnxt_qplib_q *sq = &qp->sq;
975 	struct bnxt_qplib_q *rq = &qp->rq;
976 	struct cmdq_create_qp req = {};
977 	int rc, req_size, psn_sz = 0;
978 	struct bnxt_qplib_hwq *xrrq;
979 	struct bnxt_qplib_pbl *pbl;
980 	u32 qp_flags = 0;
981 	u8 pg_sz_lvl;
982 	u32 tbl_indx;
983 	u16 nsge;
984 
985 	if (res->dattr)
986 		qp->dev_cap_flags = res->dattr->dev_cap_flags;
987 
988 	sq->dbinfo.flags = 0;
989 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
990 				 CMDQ_BASE_OPCODE_CREATE_QP,
991 				 sizeof(req));
992 
993 	/* General */
994 	req.type = qp->type;
995 	req.dpi = cpu_to_le32(qp->dpi->dpi);
996 	req.qp_handle = cpu_to_le64(qp->qp_handle);
997 
998 	/* SQ */
999 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1000 		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1001 			 sizeof(struct sq_psn_search_ext) :
1002 			 sizeof(struct sq_psn_search);
1003 
1004 		if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1005 			psn_sz = sizeof(struct sq_msn_search);
1006 			qp->msn = 0;
1007 		}
1008 	}
1009 
1010 	hwq_attr.res = res;
1011 	hwq_attr.sginfo = &sq->sg_info;
1012 	hwq_attr.stride = sizeof(struct sq_sge);
1013 	hwq_attr.depth = bnxt_qplib_get_depth(sq);
1014 	hwq_attr.aux_stride = psn_sz;
1015 	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1016 	/* Update msn tbl size */
1017 	if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1018 		hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1019 		qp->msn_tbl_sz = hwq_attr.aux_depth;
1020 		qp->msn = 0;
1021 	}
1022 
1023 	hwq_attr.type = HWQ_TYPE_QUEUE;
1024 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1025 	if (rc)
1026 		return rc;
1027 
1028 	rc = bnxt_qplib_alloc_init_swq(sq);
1029 	if (rc)
1030 		goto fail_sq;
1031 
1032 	if (psn_sz)
1033 		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1034 
1035 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1036 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1037 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1038 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1039 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1040 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1041 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1042 	req.sq_fwo_sq_sge =
1043 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1044 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1045 	req.scq_cid = cpu_to_le32(qp->scq->id);
1046 
1047 	/* RQ */
1048 	if (!qp->srq) {
1049 		rq->dbinfo.flags = 0;
1050 		hwq_attr.res = res;
1051 		hwq_attr.sginfo = &rq->sg_info;
1052 		hwq_attr.stride = sizeof(struct sq_sge);
1053 		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1054 		hwq_attr.aux_stride = 0;
1055 		hwq_attr.aux_depth = 0;
1056 		hwq_attr.type = HWQ_TYPE_QUEUE;
1057 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1058 		if (rc)
1059 			goto sq_swq;
1060 		rc = bnxt_qplib_alloc_init_swq(rq);
1061 		if (rc)
1062 			goto fail_rq;
1063 
1064 		req.rq_size = cpu_to_le32(rq->max_wqe);
1065 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1066 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1067 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1068 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1069 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1070 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1071 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1072 			6 : rq->max_sge;
1073 		req.rq_fwo_rq_sge =
1074 			cpu_to_le16(((nsge &
1075 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1076 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1077 	} else {
1078 		/* SRQ */
1079 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1080 		req.srq_cid = cpu_to_le32(qp->srq->id);
1081 	}
1082 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1083 
1084 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1085 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1086 	if (qp->sig_type)
1087 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1088 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1089 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1090 	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1091 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1092 
1093 	req.qp_flags = cpu_to_le32(qp_flags);
1094 
1095 	/* ORRQ and IRRQ */
1096 	if (psn_sz) {
1097 		xrrq = &qp->orrq;
1098 		xrrq->max_elements =
1099 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1100 		req_size = xrrq->max_elements *
1101 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1102 		req_size &= ~(PAGE_SIZE - 1);
1103 		sginfo.pgsize = req_size;
1104 		sginfo.pgshft = PAGE_SHIFT;
1105 
1106 		hwq_attr.res = res;
1107 		hwq_attr.sginfo = &sginfo;
1108 		hwq_attr.depth = xrrq->max_elements;
1109 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1110 		hwq_attr.aux_stride = 0;
1111 		hwq_attr.aux_depth = 0;
1112 		hwq_attr.type = HWQ_TYPE_CTX;
1113 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1114 		if (rc)
1115 			goto rq_swq;
1116 		pbl = &xrrq->pbl[PBL_LVL_0];
1117 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1118 
1119 		xrrq = &qp->irrq;
1120 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1121 						qp->max_dest_rd_atomic);
1122 		req_size = xrrq->max_elements *
1123 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1124 		req_size &= ~(PAGE_SIZE - 1);
1125 		sginfo.pgsize = req_size;
1126 		hwq_attr.depth =  xrrq->max_elements;
1127 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1128 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1129 		if (rc)
1130 			goto fail_orrq;
1131 
1132 		pbl = &xrrq->pbl[PBL_LVL_0];
1133 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1134 	}
1135 	req.pd_id = cpu_to_le32(qp->pd->id);
1136 
1137 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1138 				sizeof(resp), 0);
1139 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1140 	if (rc)
1141 		goto fail;
1142 
1143 	qp->id = le32_to_cpu(resp.xid);
1144 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1145 	INIT_LIST_HEAD(&qp->sq_flush);
1146 	INIT_LIST_HEAD(&qp->rq_flush);
1147 	qp->cctx = res->cctx;
1148 	sq->dbinfo.hwq = &sq->hwq;
1149 	sq->dbinfo.xid = qp->id;
1150 	sq->dbinfo.db = qp->dpi->dbr;
1151 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1152 	if (rq->max_wqe) {
1153 		rq->dbinfo.hwq = &rq->hwq;
1154 		rq->dbinfo.xid = qp->id;
1155 		rq->dbinfo.db = qp->dpi->dbr;
1156 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1157 	}
1158 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1159 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1160 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1161 
1162 	return 0;
1163 fail:
1164 	bnxt_qplib_free_hwq(res, &qp->irrq);
1165 fail_orrq:
1166 	bnxt_qplib_free_hwq(res, &qp->orrq);
1167 rq_swq:
1168 	kfree(rq->swq);
1169 fail_rq:
1170 	bnxt_qplib_free_hwq(res, &rq->hwq);
1171 sq_swq:
1172 	kfree(sq->swq);
1173 fail_sq:
1174 	bnxt_qplib_free_hwq(res, &sq->hwq);
1175 	return rc;
1176 }
1177 
1178 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1179 {
1180 	switch (qp->state) {
1181 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1182 		/* INIT->RTR, configure the path_mtu to the default
1183 		 * 2048 if not being requested
1184 		 */
1185 		if (!(qp->modify_flags &
1186 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1187 			qp->modify_flags |=
1188 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1189 			qp->path_mtu =
1190 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1191 		}
1192 		qp->modify_flags &=
1193 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1194 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1195 		if (qp->max_dest_rd_atomic < 1)
1196 			qp->max_dest_rd_atomic = 1;
1197 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1198 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1199 		if (!(qp->modify_flags &
1200 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1201 			qp->modify_flags |=
1202 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1203 			qp->ah.sgid_index = 0;
1204 		}
1205 		break;
1206 	default:
1207 		break;
1208 	}
1209 }
1210 
1211 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1212 {
1213 	switch (qp->state) {
1214 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1215 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1216 		if (qp->max_rd_atomic < 1)
1217 			qp->max_rd_atomic = 1;
1218 		/* Bono FW does not allow PKEY_INDEX,
1219 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1220 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1221 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1222 		 * modification
1223 		 */
1224 		qp->modify_flags &=
1225 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1226 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1227 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1228 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1229 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1230 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1231 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1232 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1233 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1234 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1235 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1236 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1237 		break;
1238 	default:
1239 		break;
1240 	}
1241 }
1242 
1243 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1244 {
1245 	switch (qp->cur_qp_state) {
1246 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1247 		break;
1248 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1249 		__modify_flags_from_init_state(qp);
1250 		break;
1251 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1252 		__modify_flags_from_rtr_state(qp);
1253 		break;
1254 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1255 		break;
1256 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1257 		break;
1258 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1259 		break;
1260 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1261 		break;
1262 	default:
1263 		break;
1264 	}
1265 }
1266 
1267 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1268 {
1269 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1270 	struct creq_modify_qp_resp resp = {};
1271 	struct bnxt_qplib_cmdqmsg msg = {};
1272 	struct cmdq_modify_qp req = {};
1273 	u32 temp32[4];
1274 	u32 bmask;
1275 	int rc;
1276 
1277 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1278 				 CMDQ_BASE_OPCODE_MODIFY_QP,
1279 				 sizeof(req));
1280 
1281 	/* Filter out the qp_attr_mask based on the state->new transition */
1282 	__filter_modify_flags(qp);
1283 	bmask = qp->modify_flags;
1284 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1285 	req.qp_cid = cpu_to_le32(qp->id);
1286 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1287 		req.network_type_en_sqd_async_notify_new_state =
1288 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1289 				(qp->en_sqd_async_notify ?
1290 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1291 	}
1292 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1293 
1294 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1295 		req.access = qp->access;
1296 
1297 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1298 		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1299 
1300 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1301 		req.qkey = cpu_to_le32(qp->qkey);
1302 
1303 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1304 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1305 		req.dgid[0] = cpu_to_le32(temp32[0]);
1306 		req.dgid[1] = cpu_to_le32(temp32[1]);
1307 		req.dgid[2] = cpu_to_le32(temp32[2]);
1308 		req.dgid[3] = cpu_to_le32(temp32[3]);
1309 	}
1310 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1311 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1312 
1313 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1314 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1315 					     [qp->ah.sgid_index]);
1316 
1317 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1318 		req.hop_limit = qp->ah.hop_limit;
1319 
1320 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1321 		req.traffic_class = qp->ah.traffic_class;
1322 
1323 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1324 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1325 
1326 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1327 		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1328 
1329 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1330 		req.timeout = qp->timeout;
1331 
1332 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1333 		req.retry_cnt = qp->retry_cnt;
1334 
1335 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1336 		req.rnr_retry = qp->rnr_retry;
1337 
1338 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1339 		req.min_rnr_timer = qp->min_rnr_timer;
1340 
1341 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1342 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1343 
1344 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1345 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1346 
1347 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1348 		req.max_rd_atomic =
1349 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1350 
1351 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1352 		req.max_dest_rd_atomic =
1353 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1354 
1355 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1356 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1357 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1358 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1359 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1360 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1361 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1362 
1363 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1364 
1365 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1366 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1367 	if (rc)
1368 		return rc;
1369 	qp->cur_qp_state = qp->state;
1370 	return 0;
1371 }
1372 
1373 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1374 {
1375 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1376 	struct creq_query_qp_resp resp = {};
1377 	struct bnxt_qplib_cmdqmsg msg = {};
1378 	struct bnxt_qplib_rcfw_sbuf sbuf;
1379 	struct creq_query_qp_resp_sb *sb;
1380 	struct cmdq_query_qp req = {};
1381 	u32 temp32[4];
1382 	int i, rc;
1383 
1384 	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1385 	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1386 				     &sbuf.dma_addr, GFP_KERNEL);
1387 	if (!sbuf.sb)
1388 		return -ENOMEM;
1389 	sb = sbuf.sb;
1390 
1391 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1392 				 CMDQ_BASE_OPCODE_QUERY_QP,
1393 				 sizeof(req));
1394 
1395 	req.qp_cid = cpu_to_le32(qp->id);
1396 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1397 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1398 				sizeof(resp), 0);
1399 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1400 	if (rc)
1401 		goto bail;
1402 	/* Extract the context from the side buffer */
1403 	qp->state = sb->en_sqd_async_notify_state &
1404 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1405 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1406 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1407 	qp->access = sb->access;
1408 	qp->pkey_index = le16_to_cpu(sb->pkey);
1409 	qp->qkey = le32_to_cpu(sb->qkey);
1410 
1411 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1412 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1413 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1414 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1415 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1416 
1417 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1418 
1419 	qp->ah.sgid_index = 0;
1420 	for (i = 0; i < res->sgid_tbl.max; i++) {
1421 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1422 			qp->ah.sgid_index = i;
1423 			break;
1424 		}
1425 	}
1426 	if (i == res->sgid_tbl.max)
1427 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1428 
1429 	qp->ah.hop_limit = sb->hop_limit;
1430 	qp->ah.traffic_class = sb->traffic_class;
1431 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1432 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1433 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1434 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1435 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1436 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1437 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1438 	qp->timeout = sb->timeout;
1439 	qp->retry_cnt = sb->retry_cnt;
1440 	qp->rnr_retry = sb->rnr_retry;
1441 	qp->min_rnr_timer = sb->min_rnr_timer;
1442 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1443 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1444 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1445 	qp->max_dest_rd_atomic =
1446 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1447 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1448 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1449 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1450 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1451 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1452 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1453 	memcpy(qp->smac, sb->src_mac, 6);
1454 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1455 bail:
1456 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1457 			  sbuf.sb, sbuf.dma_addr);
1458 	return rc;
1459 }
1460 
1461 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1462 {
1463 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1464 	u32 peek_flags, peek_cons;
1465 	struct cq_base *hw_cqe;
1466 	int i;
1467 
1468 	peek_flags = cq->dbinfo.flags;
1469 	peek_cons = cq_hwq->cons;
1470 	for (i = 0; i < cq_hwq->max_elements; i++) {
1471 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1472 		if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1473 			continue;
1474 		/*
1475 		 * The valid test of the entry must be done first before
1476 		 * reading any further.
1477 		 */
1478 		dma_rmb();
1479 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1480 		case CQ_BASE_CQE_TYPE_REQ:
1481 		case CQ_BASE_CQE_TYPE_TERMINAL:
1482 		{
1483 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1484 
1485 			if (qp == le64_to_cpu(cqe->qp_handle))
1486 				cqe->qp_handle = 0;
1487 			break;
1488 		}
1489 		case CQ_BASE_CQE_TYPE_RES_RC:
1490 		case CQ_BASE_CQE_TYPE_RES_UD:
1491 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1492 		{
1493 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1494 
1495 			if (qp == le64_to_cpu(cqe->qp_handle))
1496 				cqe->qp_handle = 0;
1497 			break;
1498 		}
1499 		default:
1500 			break;
1501 		}
1502 		bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1503 					 1, &peek_flags);
1504 	}
1505 }
1506 
1507 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1508 			  struct bnxt_qplib_qp *qp)
1509 {
1510 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1511 	struct creq_destroy_qp_resp resp = {};
1512 	struct bnxt_qplib_cmdqmsg msg = {};
1513 	struct cmdq_destroy_qp req = {};
1514 	u32 tbl_indx;
1515 	int rc;
1516 
1517 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1518 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1519 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1520 
1521 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1522 				 CMDQ_BASE_OPCODE_DESTROY_QP,
1523 				 sizeof(req));
1524 
1525 	req.qp_cid = cpu_to_le32(qp->id);
1526 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1527 				sizeof(resp), 0);
1528 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1529 	if (rc) {
1530 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1531 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1532 		return rc;
1533 	}
1534 
1535 	return 0;
1536 }
1537 
1538 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1539 			    struct bnxt_qplib_qp *qp)
1540 {
1541 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1542 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1543 	kfree(qp->sq.swq);
1544 
1545 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1546 	kfree(qp->rq.swq);
1547 
1548 	if (qp->irrq.max_elements)
1549 		bnxt_qplib_free_hwq(res, &qp->irrq);
1550 	if (qp->orrq.max_elements)
1551 		bnxt_qplib_free_hwq(res, &qp->orrq);
1552 
1553 }
1554 
1555 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1556 				struct bnxt_qplib_sge *sge)
1557 {
1558 	struct bnxt_qplib_q *sq = &qp->sq;
1559 	u32 sw_prod;
1560 
1561 	memset(sge, 0, sizeof(*sge));
1562 
1563 	if (qp->sq_hdr_buf) {
1564 		sw_prod = sq->swq_start;
1565 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1566 					 sw_prod * qp->sq_hdr_buf_size);
1567 		sge->lkey = 0xFFFFFFFF;
1568 		sge->size = qp->sq_hdr_buf_size;
1569 		return qp->sq_hdr_buf + sw_prod * sge->size;
1570 	}
1571 	return NULL;
1572 }
1573 
1574 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1575 {
1576 	struct bnxt_qplib_q *rq = &qp->rq;
1577 
1578 	return rq->swq_start;
1579 }
1580 
1581 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1582 {
1583 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1584 }
1585 
1586 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1587 				struct bnxt_qplib_sge *sge)
1588 {
1589 	struct bnxt_qplib_q *rq = &qp->rq;
1590 	u32 sw_prod;
1591 
1592 	memset(sge, 0, sizeof(*sge));
1593 
1594 	if (qp->rq_hdr_buf) {
1595 		sw_prod = rq->swq_start;
1596 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1597 					 sw_prod * qp->rq_hdr_buf_size);
1598 		sge->lkey = 0xFFFFFFFF;
1599 		sge->size = qp->rq_hdr_buf_size;
1600 		return qp->rq_hdr_buf + sw_prod * sge->size;
1601 	}
1602 	return NULL;
1603 }
1604 
1605 /* Fil the MSN table into the next psn row */
1606 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1607 				       struct bnxt_qplib_swqe *wqe,
1608 				       struct bnxt_qplib_swq *swq)
1609 {
1610 	struct sq_msn_search *msns;
1611 	u32 start_psn, next_psn;
1612 	u16 start_idx;
1613 
1614 	msns = (struct sq_msn_search *)swq->psn_search;
1615 	msns->start_idx_next_psn_start_psn = 0;
1616 
1617 	start_psn = swq->start_psn;
1618 	next_psn = swq->next_psn;
1619 	start_idx = swq->slot_idx;
1620 	msns->start_idx_next_psn_start_psn |=
1621 		bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1622 	qp->msn++;
1623 	qp->msn %= qp->msn_tbl_sz;
1624 }
1625 
1626 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1627 				       struct bnxt_qplib_swqe *wqe,
1628 				       struct bnxt_qplib_swq *swq)
1629 {
1630 	struct sq_psn_search_ext *psns_ext;
1631 	struct sq_psn_search *psns;
1632 	u32 flg_npsn;
1633 	u32 op_spsn;
1634 
1635 	if (!swq->psn_search)
1636 		return;
1637 	/* Handle MSN differently on cap flags  */
1638 	if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1639 		bnxt_qplib_fill_msn_search(qp, wqe, swq);
1640 		return;
1641 	}
1642 	psns = (struct sq_psn_search *)swq->psn_search;
1643 	psns = swq->psn_search;
1644 	psns_ext = swq->psn_ext;
1645 
1646 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1647 		    SQ_PSN_SEARCH_START_PSN_MASK);
1648 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1649 		     SQ_PSN_SEARCH_OPCODE_MASK);
1650 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1651 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1652 
1653 	if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1654 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1655 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1656 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1657 	} else {
1658 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1659 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1660 	}
1661 }
1662 
1663 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1664 				 struct bnxt_qplib_swqe *wqe,
1665 				 u16 *idx)
1666 {
1667 	struct bnxt_qplib_hwq *hwq;
1668 	int len, t_len, offt;
1669 	bool pull_dst = true;
1670 	void *il_dst = NULL;
1671 	void *il_src = NULL;
1672 	int t_cplen, cplen;
1673 	int indx;
1674 
1675 	hwq = &qp->sq.hwq;
1676 	t_len = 0;
1677 	for (indx = 0; indx < wqe->num_sge; indx++) {
1678 		len = wqe->sg_list[indx].size;
1679 		il_src = (void *)wqe->sg_list[indx].addr;
1680 		t_len += len;
1681 		if (t_len > qp->max_inline_data)
1682 			return -ENOMEM;
1683 		while (len) {
1684 			if (pull_dst) {
1685 				pull_dst = false;
1686 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1687 				(*idx)++;
1688 				t_cplen = 0;
1689 				offt = 0;
1690 			}
1691 			cplen = min_t(int, len, sizeof(struct sq_sge));
1692 			cplen = min_t(int, cplen,
1693 					(sizeof(struct sq_sge) - offt));
1694 			memcpy(il_dst, il_src, cplen);
1695 			t_cplen += cplen;
1696 			il_src += cplen;
1697 			il_dst += cplen;
1698 			offt += cplen;
1699 			len -= cplen;
1700 			if (t_cplen == sizeof(struct sq_sge))
1701 				pull_dst = true;
1702 		}
1703 	}
1704 
1705 	return t_len;
1706 }
1707 
1708 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1709 			       struct bnxt_qplib_sge *ssge,
1710 			       u16 nsge, u16 *idx)
1711 {
1712 	struct sq_sge *dsge;
1713 	int indx, len = 0;
1714 
1715 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1716 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1717 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1718 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1719 		dsge->size = cpu_to_le32(ssge[indx].size);
1720 		len += ssge[indx].size;
1721 	}
1722 
1723 	return len;
1724 }
1725 
1726 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1727 				     struct bnxt_qplib_swqe *wqe,
1728 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1729 {
1730 	u32 ilsize, bytes;
1731 	u16 nsge;
1732 	u16 slot;
1733 
1734 	nsge = wqe->num_sge;
1735 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1736 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1737 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1738 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1739 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1740 		bytes += sizeof(struct sq_send_hdr);
1741 	}
1742 
1743 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1744 	slot = bytes >> 4;
1745 	*wqe_sz = slot;
1746 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1747 		slot = 8;
1748 	return slot;
1749 }
1750 
1751 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1752 				     struct bnxt_qplib_swq *swq, bool hw_retx)
1753 {
1754 	struct bnxt_qplib_hwq *hwq;
1755 	u32 pg_num, pg_indx;
1756 	void *buff;
1757 	u32 tail;
1758 
1759 	hwq = &sq->hwq;
1760 	if (!hwq->pad_pg)
1761 		return;
1762 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1763 	if (hw_retx) {
1764 		/* For HW retx use qp msn index */
1765 		tail = qp->msn;
1766 		tail %= qp->msn_tbl_sz;
1767 	}
1768 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1769 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1770 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1771 	swq->psn_ext = buff;
1772 	swq->psn_search = buff;
1773 }
1774 
1775 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1776 {
1777 	struct bnxt_qplib_q *sq = &qp->sq;
1778 
1779 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1780 }
1781 
1782 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1783 			 struct bnxt_qplib_swqe *wqe)
1784 {
1785 	struct bnxt_qplib_nq_work *nq_work = NULL;
1786 	int i, rc = 0, data_len = 0, pkt_num = 0;
1787 	struct bnxt_qplib_q *sq = &qp->sq;
1788 	struct bnxt_qplib_hwq *hwq;
1789 	struct bnxt_qplib_swq *swq;
1790 	bool sch_handler = false;
1791 	u16 wqe_sz, qdf = 0;
1792 	bool msn_update;
1793 	void *base_hdr;
1794 	void *ext_hdr;
1795 	__le32 temp32;
1796 	u32 wqe_idx;
1797 	u32 slots;
1798 	u16 idx;
1799 
1800 	hwq = &sq->hwq;
1801 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1802 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1803 		dev_err(&hwq->pdev->dev,
1804 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1805 			qp->id, qp->state);
1806 		rc = -EINVAL;
1807 		goto done;
1808 	}
1809 
1810 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1811 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1812 		dev_err(&hwq->pdev->dev,
1813 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1814 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1815 		rc = -ENOMEM;
1816 		goto done;
1817 	}
1818 
1819 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1820 	bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1821 
1822 	idx = 0;
1823 	swq->slot_idx = hwq->prod;
1824 	swq->slots = slots;
1825 	swq->wr_id = wqe->wr_id;
1826 	swq->type = wqe->type;
1827 	swq->flags = wqe->flags;
1828 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1829 	if (qp->sig_type)
1830 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1831 
1832 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1833 		sch_handler = true;
1834 		dev_dbg(&hwq->pdev->dev,
1835 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1836 		goto queue_err;
1837 	}
1838 
1839 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1840 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1841 	memset(base_hdr, 0, sizeof(struct sq_sge));
1842 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1843 
1844 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1845 		/* Copy the inline data */
1846 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1847 	else
1848 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1849 					       &idx);
1850 	if (data_len < 0)
1851 		goto queue_err;
1852 	/* Make sure we update MSN table only for wired wqes */
1853 	msn_update = true;
1854 	/* Specifics */
1855 	switch (wqe->type) {
1856 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1857 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1858 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1859 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1860 			/* Assemble info for Raw Ethertype QPs */
1861 
1862 			sqe->wqe_type = wqe->type;
1863 			sqe->flags = wqe->flags;
1864 			sqe->wqe_size = wqe_sz;
1865 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1866 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1867 			sqe->length = cpu_to_le32(data_len);
1868 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1869 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1870 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1871 
1872 			break;
1873 		}
1874 		fallthrough;
1875 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1876 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1877 	{
1878 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1879 		struct sq_send_hdr *sqe = base_hdr;
1880 
1881 		sqe->wqe_type = wqe->type;
1882 		sqe->flags = wqe->flags;
1883 		sqe->wqe_size = wqe_sz;
1884 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1885 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1886 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1887 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1888 			sqe->length = cpu_to_le32(data_len);
1889 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1890 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1891 						      SQ_SEND_DST_QP_MASK);
1892 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1893 						    SQ_SEND_AVID_MASK);
1894 			msn_update = false;
1895 		} else {
1896 			sqe->length = cpu_to_le32(data_len);
1897 			if (qp->mtu)
1898 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1899 			if (!pkt_num)
1900 				pkt_num = 1;
1901 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1902 		}
1903 		break;
1904 	}
1905 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1906 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1907 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1908 	{
1909 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1910 		struct sq_rdma_hdr *sqe = base_hdr;
1911 
1912 		sqe->wqe_type = wqe->type;
1913 		sqe->flags = wqe->flags;
1914 		sqe->wqe_size = wqe_sz;
1915 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1916 		sqe->length = cpu_to_le32((u32)data_len);
1917 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1918 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1919 		if (qp->mtu)
1920 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1921 		if (!pkt_num)
1922 			pkt_num = 1;
1923 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1924 		break;
1925 	}
1926 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1927 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1928 	{
1929 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1930 		struct sq_atomic_hdr *sqe = base_hdr;
1931 
1932 		sqe->wqe_type = wqe->type;
1933 		sqe->flags = wqe->flags;
1934 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1935 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1936 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1937 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1938 		if (qp->mtu)
1939 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1940 		if (!pkt_num)
1941 			pkt_num = 1;
1942 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1943 		break;
1944 	}
1945 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1946 	{
1947 		struct sq_localinvalidate *sqe = base_hdr;
1948 
1949 		sqe->wqe_type = wqe->type;
1950 		sqe->flags = wqe->flags;
1951 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1952 		msn_update = false;
1953 		break;
1954 	}
1955 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1956 	{
1957 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1958 		struct sq_fr_pmr_hdr *sqe = base_hdr;
1959 
1960 		sqe->wqe_type = wqe->type;
1961 		sqe->flags = wqe->flags;
1962 		sqe->access_cntl = wqe->frmr.access_cntl |
1963 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1964 		sqe->zero_based_page_size_log =
1965 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1966 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1967 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1968 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1969 		temp32 = cpu_to_le32(wqe->frmr.length);
1970 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1971 		sqe->numlevels_pbl_page_size_log =
1972 			((wqe->frmr.pbl_pg_sz_log <<
1973 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1974 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1975 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1976 					SQ_FR_PMR_NUMLEVELS_MASK);
1977 
1978 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1979 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1980 						wqe->frmr.page_list[i] |
1981 						PTU_PTE_VALID);
1982 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1983 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1984 		msn_update = false;
1985 
1986 		break;
1987 	}
1988 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1989 	{
1990 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1991 		struct sq_bind_hdr *sqe = base_hdr;
1992 
1993 		sqe->wqe_type = wqe->type;
1994 		sqe->flags = wqe->flags;
1995 		sqe->access_cntl = wqe->bind.access_cntl;
1996 		sqe->mw_type_zero_based = wqe->bind.mw_type |
1997 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1998 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1999 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2000 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
2001 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2002 		msn_update = false;
2003 		break;
2004 	}
2005 	default:
2006 		/* Bad wqe, return error */
2007 		rc = -EINVAL;
2008 		goto done;
2009 	}
2010 	if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2011 		swq->next_psn = sq->psn & BTH_PSN_MASK;
2012 		bnxt_qplib_fill_psn_search(qp, wqe, swq);
2013 	}
2014 queue_err:
2015 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
2016 	bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2017 	qp->wqe_cnt++;
2018 done:
2019 	if (sch_handler) {
2020 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2021 		if (nq_work) {
2022 			nq_work->cq = qp->scq;
2023 			nq_work->nq = qp->scq->nq;
2024 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2025 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2026 		} else {
2027 			dev_err(&hwq->pdev->dev,
2028 				"FP: Failed to allocate SQ nq_work!\n");
2029 			rc = -ENOMEM;
2030 		}
2031 	}
2032 	return rc;
2033 }
2034 
2035 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2036 {
2037 	struct bnxt_qplib_q *rq = &qp->rq;
2038 
2039 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2040 }
2041 
2042 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2043 			 struct bnxt_qplib_swqe *wqe)
2044 {
2045 	struct bnxt_qplib_nq_work *nq_work = NULL;
2046 	struct bnxt_qplib_q *rq = &qp->rq;
2047 	struct rq_wqe_hdr *base_hdr;
2048 	struct rq_ext_hdr *ext_hdr;
2049 	struct bnxt_qplib_hwq *hwq;
2050 	struct bnxt_qplib_swq *swq;
2051 	bool sch_handler = false;
2052 	u16 wqe_sz, idx;
2053 	u32 wqe_idx;
2054 	int rc = 0;
2055 
2056 	hwq = &rq->hwq;
2057 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2058 		dev_err(&hwq->pdev->dev,
2059 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2060 			qp->id, qp->state);
2061 		rc = -EINVAL;
2062 		goto done;
2063 	}
2064 
2065 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2066 		dev_err(&hwq->pdev->dev,
2067 			"FP: QP (0x%x) RQ is full!\n", qp->id);
2068 		rc = -EINVAL;
2069 		goto done;
2070 	}
2071 
2072 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2073 	swq->wr_id = wqe->wr_id;
2074 	swq->slots = rq->dbinfo.max_slot;
2075 
2076 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2077 		sch_handler = true;
2078 		dev_dbg(&hwq->pdev->dev,
2079 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2080 		goto queue_err;
2081 	}
2082 
2083 	idx = 0;
2084 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2085 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2086 	memset(base_hdr, 0, sizeof(struct sq_sge));
2087 	memset(ext_hdr, 0, sizeof(struct sq_sge));
2088 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2089 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2090 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2091 	if (!wqe->num_sge) {
2092 		struct sq_sge *sge;
2093 
2094 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2095 		sge->size = 0;
2096 		wqe_sz++;
2097 	}
2098 	base_hdr->wqe_type = wqe->type;
2099 	base_hdr->flags = wqe->flags;
2100 	base_hdr->wqe_size = wqe_sz;
2101 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2102 queue_err:
2103 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2104 	bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2105 done:
2106 	if (sch_handler) {
2107 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2108 		if (nq_work) {
2109 			nq_work->cq = qp->rcq;
2110 			nq_work->nq = qp->rcq->nq;
2111 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2112 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2113 		} else {
2114 			dev_err(&hwq->pdev->dev,
2115 				"FP: Failed to allocate RQ nq_work!\n");
2116 			rc = -ENOMEM;
2117 		}
2118 	}
2119 
2120 	return rc;
2121 }
2122 
2123 /* CQ */
2124 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2125 {
2126 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2127 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2128 	struct creq_create_cq_resp resp = {};
2129 	struct bnxt_qplib_cmdqmsg msg = {};
2130 	struct cmdq_create_cq req = {};
2131 	struct bnxt_qplib_pbl *pbl;
2132 	u32 pg_sz_lvl;
2133 	int rc;
2134 
2135 	if (!cq->dpi) {
2136 		dev_err(&rcfw->pdev->dev,
2137 			"FP: CREATE_CQ failed due to NULL DPI\n");
2138 		return -EINVAL;
2139 	}
2140 
2141 	cq->dbinfo.flags = 0;
2142 	hwq_attr.res = res;
2143 	hwq_attr.depth = cq->max_wqe;
2144 	hwq_attr.stride = sizeof(struct cq_base);
2145 	hwq_attr.type = HWQ_TYPE_QUEUE;
2146 	hwq_attr.sginfo = &cq->sg_info;
2147 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2148 	if (rc)
2149 		return rc;
2150 
2151 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2152 				 CMDQ_BASE_OPCODE_CREATE_CQ,
2153 				 sizeof(req));
2154 
2155 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2156 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2157 	req.cq_size = cpu_to_le32(cq->max_wqe);
2158 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2159 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2160 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2161 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2162 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2163 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2164 	req.cq_fco_cnq_id = cpu_to_le32(
2165 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2166 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2167 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2168 				sizeof(resp), 0);
2169 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2170 	if (rc)
2171 		goto fail;
2172 
2173 	cq->id = le32_to_cpu(resp.xid);
2174 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2175 	init_waitqueue_head(&cq->waitq);
2176 	INIT_LIST_HEAD(&cq->sqf_head);
2177 	INIT_LIST_HEAD(&cq->rqf_head);
2178 	spin_lock_init(&cq->compl_lock);
2179 	spin_lock_init(&cq->flush_lock);
2180 
2181 	cq->dbinfo.hwq = &cq->hwq;
2182 	cq->dbinfo.xid = cq->id;
2183 	cq->dbinfo.db = cq->dpi->dbr;
2184 	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2185 	cq->dbinfo.flags = 0;
2186 	cq->dbinfo.toggle = 0;
2187 
2188 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2189 
2190 	return 0;
2191 
2192 fail:
2193 	bnxt_qplib_free_hwq(res, &cq->hwq);
2194 	return rc;
2195 }
2196 
2197 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2198 				   struct bnxt_qplib_cq *cq)
2199 {
2200 	bnxt_qplib_free_hwq(res, &cq->hwq);
2201 	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2202        /* Reset only the cons bit in the flags */
2203 	cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2204 }
2205 
2206 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2207 			 int new_cqes)
2208 {
2209 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2210 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2211 	struct creq_resize_cq_resp resp = {};
2212 	struct bnxt_qplib_cmdqmsg msg = {};
2213 	struct cmdq_resize_cq req = {};
2214 	struct bnxt_qplib_pbl *pbl;
2215 	u32 pg_sz, lvl, new_sz;
2216 	int rc;
2217 
2218 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2219 				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2220 				 sizeof(req));
2221 	hwq_attr.sginfo = &cq->sg_info;
2222 	hwq_attr.res = res;
2223 	hwq_attr.depth = new_cqes;
2224 	hwq_attr.stride = sizeof(struct cq_base);
2225 	hwq_attr.type = HWQ_TYPE_QUEUE;
2226 	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2227 	if (rc)
2228 		return rc;
2229 
2230 	req.cq_cid = cpu_to_le32(cq->id);
2231 	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2232 	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2233 	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2234 				       CMDQ_RESIZE_CQ_LVL_MASK;
2235 	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2236 		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2237 	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2238 	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2239 
2240 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2241 				sizeof(resp), 0);
2242 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2243 	return rc;
2244 }
2245 
2246 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2247 {
2248 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2249 	struct creq_destroy_cq_resp resp = {};
2250 	struct bnxt_qplib_cmdqmsg msg = {};
2251 	struct cmdq_destroy_cq req = {};
2252 	u16 total_cnq_events;
2253 	int rc;
2254 
2255 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2256 				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2257 				 sizeof(req));
2258 
2259 	req.cq_cid = cpu_to_le32(cq->id);
2260 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2261 				sizeof(resp), 0);
2262 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2263 	if (rc)
2264 		return rc;
2265 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2266 	__wait_for_all_nqes(cq, total_cnq_events);
2267 	bnxt_qplib_free_hwq(res, &cq->hwq);
2268 	return 0;
2269 }
2270 
2271 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2272 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2273 {
2274 	struct bnxt_qplib_cqe *cqe;
2275 	u32 start, last;
2276 	int rc = 0;
2277 
2278 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2279 	start = sq->swq_start;
2280 	cqe = *pcqe;
2281 	while (*budget) {
2282 		last = sq->swq_last;
2283 		if (start == last)
2284 			break;
2285 		/* Skip the FENCE WQE completions */
2286 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2287 			bnxt_qplib_cancel_phantom_processing(qp);
2288 			goto skip_compl;
2289 		}
2290 		memset(cqe, 0, sizeof(*cqe));
2291 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2292 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2293 		cqe->qp_handle = (u64)(unsigned long)qp;
2294 		cqe->wr_id = sq->swq[last].wr_id;
2295 		cqe->src_qp = qp->id;
2296 		cqe->type = sq->swq[last].type;
2297 		cqe++;
2298 		(*budget)--;
2299 skip_compl:
2300 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2301 					 sq->swq[last].slots, &sq->dbinfo.flags);
2302 		sq->swq_last = sq->swq[last].next_idx;
2303 	}
2304 	*pcqe = cqe;
2305 	if (!(*budget) && sq->swq_last != start)
2306 		/* Out of budget */
2307 		rc = -EAGAIN;
2308 
2309 	return rc;
2310 }
2311 
2312 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2313 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2314 {
2315 	struct bnxt_qplib_cqe *cqe;
2316 	u32 start, last;
2317 	int opcode = 0;
2318 	int rc = 0;
2319 
2320 	switch (qp->type) {
2321 	case CMDQ_CREATE_QP1_TYPE_GSI:
2322 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2323 		break;
2324 	case CMDQ_CREATE_QP_TYPE_RC:
2325 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2326 		break;
2327 	case CMDQ_CREATE_QP_TYPE_UD:
2328 	case CMDQ_CREATE_QP_TYPE_GSI:
2329 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2330 		break;
2331 	}
2332 
2333 	/* Flush the rest of the RQ */
2334 	start = rq->swq_start;
2335 	cqe = *pcqe;
2336 	while (*budget) {
2337 		last = rq->swq_last;
2338 		if (last == start)
2339 			break;
2340 		memset(cqe, 0, sizeof(*cqe));
2341 		cqe->status =
2342 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2343 		cqe->opcode = opcode;
2344 		cqe->qp_handle = (unsigned long)qp;
2345 		cqe->wr_id = rq->swq[last].wr_id;
2346 		cqe++;
2347 		(*budget)--;
2348 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2349 					 rq->swq[last].slots, &rq->dbinfo.flags);
2350 		rq->swq_last = rq->swq[last].next_idx;
2351 	}
2352 	*pcqe = cqe;
2353 	if (!*budget && rq->swq_last != start)
2354 		/* Out of budget */
2355 		rc = -EAGAIN;
2356 
2357 	return rc;
2358 }
2359 
2360 void bnxt_qplib_mark_qp_error(void *qp_handle)
2361 {
2362 	struct bnxt_qplib_qp *qp = qp_handle;
2363 
2364 	if (!qp)
2365 		return;
2366 
2367 	/* Must block new posting of SQ and RQ */
2368 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2369 	bnxt_qplib_cancel_phantom_processing(qp);
2370 }
2371 
2372 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2373  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2374  */
2375 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2376 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2377 {
2378 	u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2379 	struct bnxt_qplib_q *sq = &qp->sq;
2380 	struct cq_req *peek_req_hwcqe;
2381 	struct bnxt_qplib_qp *peek_qp;
2382 	struct bnxt_qplib_q *peek_sq;
2383 	struct bnxt_qplib_swq *swq;
2384 	struct cq_base *peek_hwcqe;
2385 	int i, rc = 0;
2386 
2387 	/* Normal mode */
2388 	/* Check for the psn_search marking before completing */
2389 	swq = &sq->swq[swq_last];
2390 	if (swq->psn_search &&
2391 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2392 		/* Unmark */
2393 		swq->psn_search->flags_next_psn = cpu_to_le32
2394 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2395 				     & ~0x80000000);
2396 		dev_dbg(&cq->hwq.pdev->dev,
2397 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2398 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2399 		sq->condition = true;
2400 		sq->send_phantom = true;
2401 
2402 		/* TODO: Only ARM if the previous SQE is ARMALL */
2403 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2404 		rc = -EAGAIN;
2405 		goto out;
2406 	}
2407 	if (sq->condition) {
2408 		/* Peek at the completions */
2409 		peek_flags = cq->dbinfo.flags;
2410 		peek_sw_cq_cons = cq_cons;
2411 		i = cq->hwq.max_elements;
2412 		while (i--) {
2413 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2414 						       peek_sw_cq_cons, NULL);
2415 			/* If the next hwcqe is VALID */
2416 			if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2417 			/*
2418 			 * The valid test of the entry must be done first before
2419 			 * reading any further.
2420 			 */
2421 				dma_rmb();
2422 				/* If the next hwcqe is a REQ */
2423 				if ((peek_hwcqe->cqe_type_toggle &
2424 				    CQ_BASE_CQE_TYPE_MASK) ==
2425 				    CQ_BASE_CQE_TYPE_REQ) {
2426 					peek_req_hwcqe = (struct cq_req *)
2427 							 peek_hwcqe;
2428 					peek_qp = (struct bnxt_qplib_qp *)
2429 						((unsigned long)
2430 						 le64_to_cpu
2431 						 (peek_req_hwcqe->qp_handle));
2432 					peek_sq = &peek_qp->sq;
2433 					peek_sq_cons_idx =
2434 						((le16_to_cpu(
2435 						  peek_req_hwcqe->sq_cons_idx)
2436 						  - 1) % sq->max_wqe);
2437 					/* If the hwcqe's sq's wr_id matches */
2438 					if (peek_sq == sq &&
2439 					    sq->swq[peek_sq_cons_idx].wr_id ==
2440 					    BNXT_QPLIB_FENCE_WRID) {
2441 						/*
2442 						 *  Unbreak only if the phantom
2443 						 *  comes back
2444 						 */
2445 						dev_dbg(&cq->hwq.pdev->dev,
2446 							"FP: Got Phantom CQE\n");
2447 						sq->condition = false;
2448 						sq->single = true;
2449 						rc = 0;
2450 						goto out;
2451 					}
2452 				}
2453 				/* Valid but not the phantom, so keep looping */
2454 			} else {
2455 				/* Not valid yet, just exit and wait */
2456 				rc = -EINVAL;
2457 				goto out;
2458 			}
2459 			bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2460 						 &peek_sw_cq_cons,
2461 						 1, &peek_flags);
2462 		}
2463 		dev_err(&cq->hwq.pdev->dev,
2464 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2465 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2466 		rc = -EINVAL;
2467 	}
2468 out:
2469 	return rc;
2470 }
2471 
2472 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2473 				     struct cq_req *hwcqe,
2474 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2475 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2476 {
2477 	struct bnxt_qplib_swq *swq;
2478 	struct bnxt_qplib_cqe *cqe;
2479 	struct bnxt_qplib_qp *qp;
2480 	struct bnxt_qplib_q *sq;
2481 	u32 cqe_sq_cons;
2482 	int rc = 0;
2483 
2484 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2485 				      le64_to_cpu(hwcqe->qp_handle));
2486 	if (!qp) {
2487 		dev_err(&cq->hwq.pdev->dev,
2488 			"FP: Process Req qp is NULL\n");
2489 		return -EINVAL;
2490 	}
2491 	sq = &qp->sq;
2492 
2493 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2494 	if (qp->sq.flushed) {
2495 		dev_dbg(&cq->hwq.pdev->dev,
2496 			"%s: QP in Flush QP = %p\n", __func__, qp);
2497 		goto done;
2498 	}
2499 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2500 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2501 	 * to the cqe_sq_cons
2502 	 */
2503 	cqe = *pcqe;
2504 	while (*budget) {
2505 		if (sq->swq_last == cqe_sq_cons)
2506 			/* Done */
2507 			break;
2508 
2509 		swq = &sq->swq[sq->swq_last];
2510 		memset(cqe, 0, sizeof(*cqe));
2511 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2512 		cqe->qp_handle = (u64)(unsigned long)qp;
2513 		cqe->src_qp = qp->id;
2514 		cqe->wr_id = swq->wr_id;
2515 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2516 			goto skip;
2517 		cqe->type = swq->type;
2518 
2519 		/* For the last CQE, check for status.  For errors, regardless
2520 		 * of the request being signaled or not, it must complete with
2521 		 * the hwcqe error status
2522 		 */
2523 		if (swq->next_idx == cqe_sq_cons &&
2524 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2525 			cqe->status = hwcqe->status;
2526 			dev_err(&cq->hwq.pdev->dev,
2527 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2528 				sq->swq_last, cqe->wr_id, cqe->status);
2529 			cqe++;
2530 			(*budget)--;
2531 			bnxt_qplib_mark_qp_error(qp);
2532 			/* Add qp to flush list of the CQ */
2533 			bnxt_qplib_add_flush_qp(qp);
2534 		} else {
2535 			/* Before we complete, do WA 9060 */
2536 			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2537 				      cqe_sq_cons)) {
2538 				*lib_qp = qp;
2539 				goto out;
2540 			}
2541 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2542 				cqe->status = CQ_REQ_STATUS_OK;
2543 				cqe++;
2544 				(*budget)--;
2545 			}
2546 		}
2547 skip:
2548 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2549 					 swq->slots, &sq->dbinfo.flags);
2550 		sq->swq_last = swq->next_idx;
2551 		if (sq->single)
2552 			break;
2553 	}
2554 out:
2555 	*pcqe = cqe;
2556 	if (sq->swq_last != cqe_sq_cons) {
2557 		/* Out of budget */
2558 		rc = -EAGAIN;
2559 		goto done;
2560 	}
2561 	/*
2562 	 * Back to normal completion mode only after it has completed all of
2563 	 * the WC for this CQE
2564 	 */
2565 	sq->single = false;
2566 done:
2567 	return rc;
2568 }
2569 
2570 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2571 {
2572 	spin_lock(&srq->hwq.lock);
2573 	srq->swq[srq->last_idx].next_idx = (int)tag;
2574 	srq->last_idx = (int)tag;
2575 	srq->swq[srq->last_idx].next_idx = -1;
2576 	bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2577 				 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2578 	spin_unlock(&srq->hwq.lock);
2579 }
2580 
2581 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2582 					struct cq_res_rc *hwcqe,
2583 					struct bnxt_qplib_cqe **pcqe,
2584 					int *budget)
2585 {
2586 	struct bnxt_qplib_srq *srq;
2587 	struct bnxt_qplib_cqe *cqe;
2588 	struct bnxt_qplib_qp *qp;
2589 	struct bnxt_qplib_q *rq;
2590 	u32 wr_id_idx;
2591 
2592 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2593 				      le64_to_cpu(hwcqe->qp_handle));
2594 	if (!qp) {
2595 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2596 		return -EINVAL;
2597 	}
2598 	if (qp->rq.flushed) {
2599 		dev_dbg(&cq->hwq.pdev->dev,
2600 			"%s: QP in Flush QP = %p\n", __func__, qp);
2601 		return 0;
2602 	}
2603 
2604 	cqe = *pcqe;
2605 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2606 	cqe->length = le32_to_cpu(hwcqe->length);
2607 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2608 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2609 	cqe->flags = le16_to_cpu(hwcqe->flags);
2610 	cqe->status = hwcqe->status;
2611 	cqe->qp_handle = (u64)(unsigned long)qp;
2612 
2613 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2614 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2615 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2616 		srq = qp->srq;
2617 		if (!srq)
2618 			return -EINVAL;
2619 		if (wr_id_idx >= srq->hwq.max_elements) {
2620 			dev_err(&cq->hwq.pdev->dev,
2621 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2622 				wr_id_idx, srq->hwq.max_elements);
2623 			return -EINVAL;
2624 		}
2625 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2626 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2627 		cqe++;
2628 		(*budget)--;
2629 		*pcqe = cqe;
2630 	} else {
2631 		struct bnxt_qplib_swq *swq;
2632 
2633 		rq = &qp->rq;
2634 		if (wr_id_idx > (rq->max_wqe - 1)) {
2635 			dev_err(&cq->hwq.pdev->dev,
2636 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2637 				wr_id_idx, rq->max_wqe);
2638 			return -EINVAL;
2639 		}
2640 		if (wr_id_idx != rq->swq_last)
2641 			return -EINVAL;
2642 		swq = &rq->swq[rq->swq_last];
2643 		cqe->wr_id = swq->wr_id;
2644 		cqe++;
2645 		(*budget)--;
2646 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2647 					 swq->slots, &rq->dbinfo.flags);
2648 		rq->swq_last = swq->next_idx;
2649 		*pcqe = cqe;
2650 
2651 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2652 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2653 			/* Add qp to flush list of the CQ */
2654 			bnxt_qplib_add_flush_qp(qp);
2655 		}
2656 	}
2657 
2658 	return 0;
2659 }
2660 
2661 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2662 					struct cq_res_ud *hwcqe,
2663 					struct bnxt_qplib_cqe **pcqe,
2664 					int *budget)
2665 {
2666 	struct bnxt_qplib_srq *srq;
2667 	struct bnxt_qplib_cqe *cqe;
2668 	struct bnxt_qplib_qp *qp;
2669 	struct bnxt_qplib_q *rq;
2670 	u32 wr_id_idx;
2671 
2672 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2673 				      le64_to_cpu(hwcqe->qp_handle));
2674 	if (!qp) {
2675 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2676 		return -EINVAL;
2677 	}
2678 	if (qp->rq.flushed) {
2679 		dev_dbg(&cq->hwq.pdev->dev,
2680 			"%s: QP in Flush QP = %p\n", __func__, qp);
2681 		return 0;
2682 	}
2683 	cqe = *pcqe;
2684 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2685 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2686 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2687 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2688 	cqe->flags = le16_to_cpu(hwcqe->flags);
2689 	cqe->status = hwcqe->status;
2690 	cqe->qp_handle = (u64)(unsigned long)qp;
2691 	/*FIXME: Endianness fix needed for smace */
2692 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2693 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2694 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2695 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2696 				  ((le32_to_cpu(
2697 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2698 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2699 
2700 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2701 		srq = qp->srq;
2702 		if (!srq)
2703 			return -EINVAL;
2704 
2705 		if (wr_id_idx >= srq->hwq.max_elements) {
2706 			dev_err(&cq->hwq.pdev->dev,
2707 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2708 				wr_id_idx, srq->hwq.max_elements);
2709 			return -EINVAL;
2710 		}
2711 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2712 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2713 		cqe++;
2714 		(*budget)--;
2715 		*pcqe = cqe;
2716 	} else {
2717 		struct bnxt_qplib_swq *swq;
2718 
2719 		rq = &qp->rq;
2720 		if (wr_id_idx > (rq->max_wqe - 1)) {
2721 			dev_err(&cq->hwq.pdev->dev,
2722 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2723 				wr_id_idx, rq->max_wqe);
2724 			return -EINVAL;
2725 		}
2726 
2727 		if (rq->swq_last != wr_id_idx)
2728 			return -EINVAL;
2729 		swq = &rq->swq[rq->swq_last];
2730 		cqe->wr_id = swq->wr_id;
2731 		cqe++;
2732 		(*budget)--;
2733 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2734 					 swq->slots, &rq->dbinfo.flags);
2735 		rq->swq_last = swq->next_idx;
2736 		*pcqe = cqe;
2737 
2738 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2739 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2740 			/* Add qp to flush list of the CQ */
2741 			bnxt_qplib_add_flush_qp(qp);
2742 		}
2743 	}
2744 
2745 	return 0;
2746 }
2747 
2748 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2749 {
2750 	struct cq_base *hw_cqe;
2751 	bool rc = true;
2752 
2753 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2754 	 /* Check for Valid bit. If the CQE is valid, return false */
2755 	rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2756 	return rc;
2757 }
2758 
2759 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2760 						struct cq_res_raweth_qp1 *hwcqe,
2761 						struct bnxt_qplib_cqe **pcqe,
2762 						int *budget)
2763 {
2764 	struct bnxt_qplib_qp *qp;
2765 	struct bnxt_qplib_q *rq;
2766 	struct bnxt_qplib_srq *srq;
2767 	struct bnxt_qplib_cqe *cqe;
2768 	u32 wr_id_idx;
2769 
2770 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2771 				      le64_to_cpu(hwcqe->qp_handle));
2772 	if (!qp) {
2773 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2774 		return -EINVAL;
2775 	}
2776 	if (qp->rq.flushed) {
2777 		dev_dbg(&cq->hwq.pdev->dev,
2778 			"%s: QP in Flush QP = %p\n", __func__, qp);
2779 		return 0;
2780 	}
2781 	cqe = *pcqe;
2782 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2783 	cqe->flags = le16_to_cpu(hwcqe->flags);
2784 	cqe->qp_handle = (u64)(unsigned long)qp;
2785 
2786 	wr_id_idx =
2787 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2788 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2789 	cqe->src_qp = qp->id;
2790 	if (qp->id == 1 && !cqe->length) {
2791 		/* Add workaround for the length misdetection */
2792 		cqe->length = 296;
2793 	} else {
2794 		cqe->length = le16_to_cpu(hwcqe->length);
2795 	}
2796 	cqe->pkey_index = qp->pkey_index;
2797 	memcpy(cqe->smac, qp->smac, 6);
2798 
2799 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2800 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2801 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2802 
2803 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2804 		srq = qp->srq;
2805 		if (!srq) {
2806 			dev_err(&cq->hwq.pdev->dev,
2807 				"FP: SRQ used but not defined??\n");
2808 			return -EINVAL;
2809 		}
2810 		if (wr_id_idx >= srq->hwq.max_elements) {
2811 			dev_err(&cq->hwq.pdev->dev,
2812 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2813 				wr_id_idx, srq->hwq.max_elements);
2814 			return -EINVAL;
2815 		}
2816 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2817 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2818 		cqe++;
2819 		(*budget)--;
2820 		*pcqe = cqe;
2821 	} else {
2822 		struct bnxt_qplib_swq *swq;
2823 
2824 		rq = &qp->rq;
2825 		if (wr_id_idx > (rq->max_wqe - 1)) {
2826 			dev_err(&cq->hwq.pdev->dev,
2827 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2828 				wr_id_idx, rq->max_wqe);
2829 			return -EINVAL;
2830 		}
2831 		if (rq->swq_last != wr_id_idx)
2832 			return -EINVAL;
2833 		swq = &rq->swq[rq->swq_last];
2834 		cqe->wr_id = swq->wr_id;
2835 		cqe++;
2836 		(*budget)--;
2837 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2838 					 swq->slots, &rq->dbinfo.flags);
2839 		rq->swq_last = swq->next_idx;
2840 		*pcqe = cqe;
2841 
2842 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2843 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2844 			/* Add qp to flush list of the CQ */
2845 			bnxt_qplib_add_flush_qp(qp);
2846 		}
2847 	}
2848 
2849 	return 0;
2850 }
2851 
2852 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2853 					  struct cq_terminal *hwcqe,
2854 					  struct bnxt_qplib_cqe **pcqe,
2855 					  int *budget)
2856 {
2857 	struct bnxt_qplib_qp *qp;
2858 	struct bnxt_qplib_q *sq, *rq;
2859 	struct bnxt_qplib_cqe *cqe;
2860 	u32 swq_last = 0, cqe_cons;
2861 	int rc = 0;
2862 
2863 	/* Check the Status */
2864 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2865 		dev_warn(&cq->hwq.pdev->dev,
2866 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2867 			 hwcqe->status);
2868 
2869 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2870 				      le64_to_cpu(hwcqe->qp_handle));
2871 	if (!qp)
2872 		return -EINVAL;
2873 
2874 	/* Must block new posting of SQ and RQ */
2875 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2876 
2877 	sq = &qp->sq;
2878 	rq = &qp->rq;
2879 
2880 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2881 	if (cqe_cons == 0xFFFF)
2882 		goto do_rq;
2883 	cqe_cons %= sq->max_wqe;
2884 
2885 	if (qp->sq.flushed) {
2886 		dev_dbg(&cq->hwq.pdev->dev,
2887 			"%s: QP in Flush QP = %p\n", __func__, qp);
2888 		goto sq_done;
2889 	}
2890 
2891 	/* Terminal CQE can also include aggregated successful CQEs prior.
2892 	 * So we must complete all CQEs from the current sq's cons to the
2893 	 * cq_cons with status OK
2894 	 */
2895 	cqe = *pcqe;
2896 	while (*budget) {
2897 		swq_last = sq->swq_last;
2898 		if (swq_last == cqe_cons)
2899 			break;
2900 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2901 			memset(cqe, 0, sizeof(*cqe));
2902 			cqe->status = CQ_REQ_STATUS_OK;
2903 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2904 			cqe->qp_handle = (u64)(unsigned long)qp;
2905 			cqe->src_qp = qp->id;
2906 			cqe->wr_id = sq->swq[swq_last].wr_id;
2907 			cqe->type = sq->swq[swq_last].type;
2908 			cqe++;
2909 			(*budget)--;
2910 		}
2911 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2912 					 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2913 		sq->swq_last = sq->swq[swq_last].next_idx;
2914 	}
2915 	*pcqe = cqe;
2916 	if (!(*budget) && swq_last != cqe_cons) {
2917 		/* Out of budget */
2918 		rc = -EAGAIN;
2919 		goto sq_done;
2920 	}
2921 sq_done:
2922 	if (rc)
2923 		return rc;
2924 do_rq:
2925 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2926 	if (cqe_cons == 0xFFFF) {
2927 		goto done;
2928 	} else if (cqe_cons > rq->max_wqe - 1) {
2929 		dev_err(&cq->hwq.pdev->dev,
2930 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2931 			cqe_cons, rq->max_wqe);
2932 		rc = -EINVAL;
2933 		goto done;
2934 	}
2935 
2936 	if (qp->rq.flushed) {
2937 		dev_dbg(&cq->hwq.pdev->dev,
2938 			"%s: QP in Flush QP = %p\n", __func__, qp);
2939 		rc = 0;
2940 		goto done;
2941 	}
2942 
2943 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2944 	 * from the current rq->cons to the rq->prod regardless what the
2945 	 * rq->cons the terminal CQE indicates
2946 	 */
2947 
2948 	/* Add qp to flush list of the CQ */
2949 	bnxt_qplib_add_flush_qp(qp);
2950 done:
2951 	return rc;
2952 }
2953 
2954 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2955 					struct cq_cutoff *hwcqe)
2956 {
2957 	/* Check the Status */
2958 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2959 		dev_err(&cq->hwq.pdev->dev,
2960 			"FP: CQ Process Cutoff Error status = 0x%x\n",
2961 			hwcqe->status);
2962 		return -EINVAL;
2963 	}
2964 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2965 	wake_up_interruptible(&cq->waitq);
2966 
2967 	return 0;
2968 }
2969 
2970 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2971 				  struct bnxt_qplib_cqe *cqe,
2972 				  int num_cqes)
2973 {
2974 	struct bnxt_qplib_qp *qp = NULL;
2975 	u32 budget = num_cqes;
2976 	unsigned long flags;
2977 
2978 	spin_lock_irqsave(&cq->flush_lock, flags);
2979 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2980 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2981 		__flush_sq(&qp->sq, qp, &cqe, &budget);
2982 	}
2983 
2984 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2985 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2986 		__flush_rq(&qp->rq, qp, &cqe, &budget);
2987 	}
2988 	spin_unlock_irqrestore(&cq->flush_lock, flags);
2989 
2990 	return num_cqes - budget;
2991 }
2992 
2993 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2994 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2995 {
2996 	struct cq_base *hw_cqe;
2997 	int budget, rc = 0;
2998 	u32 hw_polled = 0;
2999 	u8 type;
3000 
3001 	budget = num_cqes;
3002 
3003 	while (budget) {
3004 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3005 
3006 		/* Check for Valid bit */
3007 		if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3008 			break;
3009 
3010 		/*
3011 		 * The valid test of the entry must be done first before
3012 		 * reading any further.
3013 		 */
3014 		dma_rmb();
3015 		/* From the device's respective CQE format to qplib_wc*/
3016 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3017 		switch (type) {
3018 		case CQ_BASE_CQE_TYPE_REQ:
3019 			rc = bnxt_qplib_cq_process_req(cq,
3020 						       (struct cq_req *)hw_cqe,
3021 						       &cqe, &budget,
3022 						       cq->hwq.cons, lib_qp);
3023 			break;
3024 		case CQ_BASE_CQE_TYPE_RES_RC:
3025 			rc = bnxt_qplib_cq_process_res_rc(cq,
3026 							  (struct cq_res_rc *)
3027 							  hw_cqe, &cqe,
3028 							  &budget);
3029 			break;
3030 		case CQ_BASE_CQE_TYPE_RES_UD:
3031 			rc = bnxt_qplib_cq_process_res_ud
3032 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
3033 					 &budget);
3034 			break;
3035 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3036 			rc = bnxt_qplib_cq_process_res_raweth_qp1
3037 					(cq, (struct cq_res_raweth_qp1 *)
3038 					 hw_cqe, &cqe, &budget);
3039 			break;
3040 		case CQ_BASE_CQE_TYPE_TERMINAL:
3041 			rc = bnxt_qplib_cq_process_terminal
3042 					(cq, (struct cq_terminal *)hw_cqe,
3043 					 &cqe, &budget);
3044 			break;
3045 		case CQ_BASE_CQE_TYPE_CUT_OFF:
3046 			bnxt_qplib_cq_process_cutoff
3047 					(cq, (struct cq_cutoff *)hw_cqe);
3048 			/* Done processing this CQ */
3049 			goto exit;
3050 		default:
3051 			dev_err(&cq->hwq.pdev->dev,
3052 				"process_cq unknown type 0x%lx\n",
3053 				hw_cqe->cqe_type_toggle &
3054 				CQ_BASE_CQE_TYPE_MASK);
3055 			rc = -EINVAL;
3056 			break;
3057 		}
3058 		if (rc < 0) {
3059 			if (rc == -EAGAIN)
3060 				break;
3061 			/* Error while processing the CQE, just skip to the
3062 			 * next one
3063 			 */
3064 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3065 				dev_err(&cq->hwq.pdev->dev,
3066 					"process_cqe error rc = 0x%x\n", rc);
3067 		}
3068 		hw_polled++;
3069 		bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3070 					 1, &cq->dbinfo.flags);
3071 
3072 	}
3073 	if (hw_polled)
3074 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3075 exit:
3076 	return num_cqes - budget;
3077 }
3078 
3079 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3080 {
3081 	cq->dbinfo.toggle = cq->toggle;
3082 	if (arm_type)
3083 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3084 	/* Using cq->arm_state variable to track whether to issue cq handler */
3085 	atomic_set(&cq->arm_state, 1);
3086 }
3087 
3088 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3089 {
3090 	flush_workqueue(qp->scq->nq->cqn_wq);
3091 	if (qp->scq != qp->rcq)
3092 		flush_workqueue(qp->rcq->nq->cqn_wq);
3093 }
3094