xref: /linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c (revision 566ab427f827b0256d3e8ce0235d088e6a9c28bd)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50 
51 #include "roce_hsi.h"
52 
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 #include <rdma/ib_addr.h>
58 #include "bnxt_ulp.h"
59 #include "bnxt_re.h"
60 #include "ib_verbs.h"
61 
62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
63 
64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
65 {
66 	qp->sq.condition = false;
67 	qp->sq.send_phantom = false;
68 	qp->sq.single = false;
69 }
70 
71 /* Flush list */
72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
73 {
74 	struct bnxt_qplib_cq *scq, *rcq;
75 
76 	scq = qp->scq;
77 	rcq = qp->rcq;
78 
79 	if (!qp->sq.flushed) {
80 		dev_dbg(&scq->hwq.pdev->dev,
81 			"FP: Adding to SQ Flush list = %p\n", qp);
82 		bnxt_qplib_cancel_phantom_processing(qp);
83 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 		qp->sq.flushed = true;
85 	}
86 	if (!qp->srq) {
87 		if (!qp->rq.flushed) {
88 			dev_dbg(&rcq->hwq.pdev->dev,
89 				"FP: Adding to RQ Flush list = %p\n", qp);
90 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 			qp->rq.flushed = true;
92 		}
93 	}
94 }
95 
96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
97 				       unsigned long *flags)
98 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
99 {
100 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 	if (qp->scq == qp->rcq)
102 		__acquire(&qp->rcq->flush_lock);
103 	else
104 		spin_lock(&qp->rcq->flush_lock);
105 }
106 
107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 				       unsigned long *flags)
109 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
110 {
111 	if (qp->scq == qp->rcq)
112 		__release(&qp->rcq->flush_lock);
113 	else
114 		spin_unlock(&qp->rcq->flush_lock);
115 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
116 }
117 
118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
119 {
120 	unsigned long flags;
121 
122 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 	__bnxt_qplib_add_flush_qp(qp);
124 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
125 }
126 
127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
128 {
129 	if (qp->sq.flushed) {
130 		qp->sq.flushed = false;
131 		list_del(&qp->sq_flush);
132 	}
133 	if (!qp->srq) {
134 		if (qp->rq.flushed) {
135 			qp->rq.flushed = false;
136 			list_del(&qp->rq_flush);
137 		}
138 	}
139 }
140 
141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
142 {
143 	unsigned long flags;
144 
145 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
147 	qp->sq.hwq.prod = 0;
148 	qp->sq.hwq.cons = 0;
149 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
150 	qp->rq.hwq.prod = 0;
151 	qp->rq.hwq.cons = 0;
152 
153 	__bnxt_qplib_del_flush_qp(qp);
154 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
155 }
156 
157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
158 {
159 	struct bnxt_qplib_nq_work *nq_work =
160 			container_of(work, struct bnxt_qplib_nq_work, work);
161 
162 	struct bnxt_qplib_cq *cq = nq_work->cq;
163 	struct bnxt_qplib_nq *nq = nq_work->nq;
164 
165 	if (cq && nq) {
166 		spin_lock_bh(&cq->compl_lock);
167 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 			dev_dbg(&nq->pdev->dev,
169 				"%s:Trigger cq  = %p event nq = %p\n",
170 				__func__, cq, nq);
171 			nq->cqn_handler(nq, cq);
172 		}
173 		spin_unlock_bh(&cq->compl_lock);
174 	}
175 	kfree(nq_work);
176 }
177 
178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 				       struct bnxt_qplib_qp *qp)
180 {
181 	struct bnxt_qplib_q *rq = &qp->rq;
182 	struct bnxt_qplib_q *sq = &qp->sq;
183 
184 	if (qp->rq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  rq->max_wqe * qp->rq_hdr_buf_size,
187 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
188 	if (qp->sq_hdr_buf)
189 		dma_free_coherent(&res->pdev->dev,
190 				  sq->max_wqe * qp->sq_hdr_buf_size,
191 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 	qp->rq_hdr_buf = NULL;
193 	qp->sq_hdr_buf = NULL;
194 	qp->rq_hdr_buf_map = 0;
195 	qp->sq_hdr_buf_map = 0;
196 	qp->sq_hdr_buf_size = 0;
197 	qp->rq_hdr_buf_size = 0;
198 }
199 
200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 				       struct bnxt_qplib_qp *qp)
202 {
203 	struct bnxt_qplib_q *rq = &qp->rq;
204 	struct bnxt_qplib_q *sq = &qp->sq;
205 	int rc = 0;
206 
207 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 					sq->max_wqe * qp->sq_hdr_buf_size,
210 					&qp->sq_hdr_buf_map, GFP_KERNEL);
211 		if (!qp->sq_hdr_buf) {
212 			rc = -ENOMEM;
213 			dev_err(&res->pdev->dev,
214 				"Failed to create sq_hdr_buf\n");
215 			goto fail;
216 		}
217 	}
218 
219 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 						    rq->max_wqe *
222 						    qp->rq_hdr_buf_size,
223 						    &qp->rq_hdr_buf_map,
224 						    GFP_KERNEL);
225 		if (!qp->rq_hdr_buf) {
226 			rc = -ENOMEM;
227 			dev_err(&res->pdev->dev,
228 				"Failed to create rq_hdr_buf\n");
229 			goto fail;
230 		}
231 	}
232 	return 0;
233 
234 fail:
235 	bnxt_qplib_free_qp_hdr_buf(res, qp);
236 	return rc;
237 }
238 
239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
240 {
241 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 	struct nq_base *nqe, **nq_ptr;
243 	int budget = nq->budget;
244 	uintptr_t q_handle;
245 	u16 type;
246 
247 	spin_lock_bh(&hwq->lock);
248 	/* Service the NQ until empty */
249 	while (budget--) {
250 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 		nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
253 			break;
254 
255 		/*
256 		 * The valid test of the entry must be done first before
257 		 * reading any further.
258 		 */
259 		dma_rmb();
260 
261 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 		switch (type) {
263 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 		{
265 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
266 
267 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 						     << 32;
270 			if ((unsigned long)cq == q_handle) {
271 				nqcne->cq_handle_low = 0;
272 				nqcne->cq_handle_high = 0;
273 				cq->cnq_events++;
274 			}
275 			break;
276 		}
277 		default:
278 			break;
279 		}
280 		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 					 1, &nq->nq_db.dbinfo.flags);
282 	}
283 	spin_unlock_bh(&hwq->lock);
284 }
285 
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287  * this CQ.
288  */
289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290 {
291 	u32 retry_cnt = 100;
292 
293 	while (retry_cnt--) {
294 		if (cnq_events == cq->cnq_events)
295 			return;
296 		usleep_range(50, 100);
297 		clean_nq(cq->nq, cq);
298 	}
299 }
300 
301 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
302 {
303 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 	struct bnxt_qplib_cq *cq;
306 	int budget = nq->budget;
307 	struct nq_base *nqe;
308 	uintptr_t q_handle;
309 	u32 hw_polled = 0;
310 	u16 type;
311 
312 	spin_lock_bh(&hwq->lock);
313 	/* Service the NQ until empty */
314 	while (budget--) {
315 		nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
317 			break;
318 
319 		/*
320 		 * The valid test of the entry must be done first before
321 		 * reading any further.
322 		 */
323 		dma_rmb();
324 
325 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 		switch (type) {
327 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 		{
329 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 
331 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
332 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
333 						     << 32;
334 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
335 			if (!cq)
336 				break;
337 			cq->toggle = (le16_to_cpu(nqe->info10_type) &
338 					NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
339 			cq->dbinfo.toggle = cq->toggle;
340 			bnxt_qplib_armen_db(&cq->dbinfo,
341 					    DBC_DBC_TYPE_CQ_ARMENA);
342 			spin_lock_bh(&cq->compl_lock);
343 			atomic_set(&cq->arm_state, 0);
344 			if (nq->cqn_handler(nq, (cq)))
345 				dev_warn(&nq->pdev->dev,
346 					 "cqn - type 0x%x not handled\n", type);
347 			cq->cnq_events++;
348 			spin_unlock_bh(&cq->compl_lock);
349 			break;
350 		}
351 		case NQ_BASE_TYPE_SRQ_EVENT:
352 		{
353 			struct bnxt_qplib_srq *srq;
354 			struct bnxt_re_srq *srq_p;
355 			struct nq_srq_event *nqsrqe =
356 						(struct nq_srq_event *)nqe;
357 
358 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
359 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
360 				     << 32;
361 			srq = (struct bnxt_qplib_srq *)q_handle;
362 			srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
363 				      >> NQ_CN_TOGGLE_SFT;
364 			srq->dbinfo.toggle = srq->toggle;
365 			srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
366 			if (srq_p->uctx_srq_page)
367 				*((u32 *)srq_p->uctx_srq_page) = srq->toggle;
368 			bnxt_qplib_armen_db(&srq->dbinfo,
369 					    DBC_DBC_TYPE_SRQ_ARMENA);
370 			if (nq->srqn_handler(nq,
371 					     (struct bnxt_qplib_srq *)q_handle,
372 					     nqsrqe->event))
373 				dev_warn(&nq->pdev->dev,
374 					 "SRQ event 0x%x not handled\n",
375 					 nqsrqe->event);
376 			break;
377 		}
378 		case NQ_BASE_TYPE_DBQ_EVENT:
379 			break;
380 		default:
381 			dev_warn(&nq->pdev->dev,
382 				 "nqe with type = 0x%x not handled\n", type);
383 			break;
384 		}
385 		hw_polled++;
386 		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
387 					 1, &nq->nq_db.dbinfo.flags);
388 	}
389 	if (hw_polled)
390 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
391 	spin_unlock_bh(&hwq->lock);
392 }
393 
394 /* bnxt_re_synchronize_nq - self polling notification queue.
395  * @nq      -     notification queue pointer
396  *
397  * This function will start polling entries of a given notification queue
398  * for all pending  entries.
399  * This function is useful to synchronize notification entries while resources
400  * are going away.
401  */
402 
403 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
404 {
405 	int budget = nq->budget;
406 
407 	nq->budget = nq->hwq.max_elements;
408 	bnxt_qplib_service_nq(&nq->nq_tasklet);
409 	nq->budget = budget;
410 }
411 
412 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
413 {
414 	struct bnxt_qplib_nq *nq = dev_instance;
415 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
416 	u32 sw_cons;
417 
418 	/* Prefetch the NQ element */
419 	sw_cons = HWQ_CMP(hwq->cons, hwq);
420 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
421 
422 	/* Fan out to CPU affinitized kthreads? */
423 	tasklet_schedule(&nq->nq_tasklet);
424 
425 	return IRQ_HANDLED;
426 }
427 
428 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
429 {
430 	if (!nq->requested)
431 		return;
432 
433 	nq->requested = false;
434 	/* Mask h/w interrupt */
435 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
436 	/* Sync with last running IRQ handler */
437 	synchronize_irq(nq->msix_vec);
438 	irq_set_affinity_hint(nq->msix_vec, NULL);
439 	free_irq(nq->msix_vec, nq);
440 	kfree(nq->name);
441 	nq->name = NULL;
442 
443 	if (kill)
444 		tasklet_kill(&nq->nq_tasklet);
445 	tasklet_disable(&nq->nq_tasklet);
446 }
447 
448 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
449 {
450 	if (nq->cqn_wq) {
451 		destroy_workqueue(nq->cqn_wq);
452 		nq->cqn_wq = NULL;
453 	}
454 
455 	/* Make sure the HW is stopped! */
456 	bnxt_qplib_nq_stop_irq(nq, true);
457 
458 	if (nq->nq_db.reg.bar_reg) {
459 		iounmap(nq->nq_db.reg.bar_reg);
460 		nq->nq_db.reg.bar_reg = NULL;
461 	}
462 
463 	nq->cqn_handler = NULL;
464 	nq->srqn_handler = NULL;
465 	nq->msix_vec = 0;
466 }
467 
468 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
469 			    int msix_vector, bool need_init)
470 {
471 	struct bnxt_qplib_res *res = nq->res;
472 	int rc;
473 
474 	if (nq->requested)
475 		return -EFAULT;
476 
477 	nq->msix_vec = msix_vector;
478 	if (need_init)
479 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
480 	else
481 		tasklet_enable(&nq->nq_tasklet);
482 
483 	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
484 			     nq_indx, pci_name(res->pdev));
485 	if (!nq->name)
486 		return -ENOMEM;
487 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
488 	if (rc) {
489 		kfree(nq->name);
490 		nq->name = NULL;
491 		tasklet_disable(&nq->nq_tasklet);
492 		return rc;
493 	}
494 
495 	cpumask_clear(&nq->mask);
496 	cpumask_set_cpu(nq_indx, &nq->mask);
497 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
498 	if (rc) {
499 		dev_warn(&nq->pdev->dev,
500 			 "set affinity failed; vector: %d nq_idx: %d\n",
501 			 nq->msix_vec, nq_indx);
502 	}
503 	nq->requested = true;
504 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
505 
506 	return rc;
507 }
508 
509 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
510 {
511 	resource_size_t reg_base;
512 	struct bnxt_qplib_nq_db *nq_db;
513 	struct pci_dev *pdev;
514 
515 	pdev = nq->pdev;
516 	nq_db = &nq->nq_db;
517 
518 	nq_db->dbinfo.flags = 0;
519 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
520 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
521 	if (!nq_db->reg.bar_base) {
522 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
523 			nq_db->reg.bar_id);
524 		return -ENOMEM;
525 	}
526 
527 	reg_base = nq_db->reg.bar_base + reg_offt;
528 	/* Unconditionally map 8 bytes to support 57500 series */
529 	nq_db->reg.len = 8;
530 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
531 	if (!nq_db->reg.bar_reg) {
532 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
533 			nq_db->reg.bar_id);
534 		return -ENOMEM;
535 	}
536 
537 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
538 	nq_db->dbinfo.hwq = &nq->hwq;
539 	nq_db->dbinfo.xid = nq->ring_id;
540 
541 	return 0;
542 }
543 
544 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
545 			 int nq_idx, int msix_vector, int bar_reg_offset,
546 			 cqn_handler_t cqn_handler,
547 			 srqn_handler_t srqn_handler)
548 {
549 	int rc;
550 
551 	nq->pdev = pdev;
552 	nq->cqn_handler = cqn_handler;
553 	nq->srqn_handler = srqn_handler;
554 
555 	/* Have a task to schedule CQ notifiers in post send case */
556 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
557 	if (!nq->cqn_wq)
558 		return -ENOMEM;
559 
560 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
561 	if (rc)
562 		goto fail;
563 
564 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
565 	if (rc) {
566 		dev_err(&nq->pdev->dev,
567 			"Failed to request irq for nq-idx %d\n", nq_idx);
568 		goto fail;
569 	}
570 
571 	return 0;
572 fail:
573 	bnxt_qplib_disable_nq(nq);
574 	return rc;
575 }
576 
577 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
578 {
579 	if (nq->hwq.max_elements) {
580 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
581 		nq->hwq.max_elements = 0;
582 	}
583 }
584 
585 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
586 {
587 	struct bnxt_qplib_hwq_attr hwq_attr = {};
588 	struct bnxt_qplib_sg_info sginfo = {};
589 
590 	nq->pdev = res->pdev;
591 	nq->res = res;
592 	if (!nq->hwq.max_elements ||
593 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
594 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
595 
596 	sginfo.pgsize = PAGE_SIZE;
597 	sginfo.pgshft = PAGE_SHIFT;
598 	hwq_attr.res = res;
599 	hwq_attr.sginfo = &sginfo;
600 	hwq_attr.depth = nq->hwq.max_elements;
601 	hwq_attr.stride = sizeof(struct nq_base);
602 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
603 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
604 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
605 		return -ENOMEM;
606 	}
607 	nq->budget = 8;
608 	return 0;
609 }
610 
611 /* SRQ */
612 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
613 			   struct bnxt_qplib_srq *srq)
614 {
615 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
616 	struct creq_destroy_srq_resp resp = {};
617 	struct bnxt_qplib_cmdqmsg msg = {};
618 	struct cmdq_destroy_srq req = {};
619 	int rc;
620 
621 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
622 				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
623 				 sizeof(req));
624 
625 	/* Configure the request */
626 	req.srq_cid = cpu_to_le32(srq->id);
627 
628 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
629 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
630 	kfree(srq->swq);
631 	if (rc)
632 		return;
633 	bnxt_qplib_free_hwq(res, &srq->hwq);
634 }
635 
636 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
637 			  struct bnxt_qplib_srq *srq)
638 {
639 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
640 	struct bnxt_qplib_hwq_attr hwq_attr = {};
641 	struct creq_create_srq_resp resp = {};
642 	struct bnxt_qplib_cmdqmsg msg = {};
643 	struct cmdq_create_srq req = {};
644 	struct bnxt_qplib_pbl *pbl;
645 	u16 pg_sz_lvl;
646 	int rc, idx;
647 
648 	hwq_attr.res = res;
649 	hwq_attr.sginfo = &srq->sg_info;
650 	hwq_attr.depth = srq->max_wqe;
651 	hwq_attr.stride = srq->wqe_size;
652 	hwq_attr.type = HWQ_TYPE_QUEUE;
653 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
654 	if (rc)
655 		return rc;
656 
657 	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
658 			   GFP_KERNEL);
659 	if (!srq->swq) {
660 		rc = -ENOMEM;
661 		goto fail;
662 	}
663 	srq->dbinfo.flags = 0;
664 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
665 				 CMDQ_BASE_OPCODE_CREATE_SRQ,
666 				 sizeof(req));
667 
668 	/* Configure the request */
669 	req.dpi = cpu_to_le32(srq->dpi->dpi);
670 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
671 
672 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
673 	pbl = &srq->hwq.pbl[PBL_LVL_0];
674 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
675 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
676 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
677 		      CMDQ_CREATE_SRQ_LVL_SFT;
678 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
679 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
680 	req.pd_id = cpu_to_le32(srq->pd->id);
681 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
682 
683 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
684 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
685 	if (rc)
686 		goto fail;
687 
688 	spin_lock_init(&srq->lock);
689 	srq->start_idx = 0;
690 	srq->last_idx = srq->hwq.max_elements - 1;
691 	for (idx = 0; idx < srq->hwq.max_elements; idx++)
692 		srq->swq[idx].next_idx = idx + 1;
693 	srq->swq[srq->last_idx].next_idx = -1;
694 
695 	srq->id = le32_to_cpu(resp.xid);
696 	srq->dbinfo.hwq = &srq->hwq;
697 	srq->dbinfo.xid = srq->id;
698 	srq->dbinfo.db = srq->dpi->dbr;
699 	srq->dbinfo.max_slot = 1;
700 	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
701 	if (srq->threshold)
702 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
703 	srq->arm_req = false;
704 
705 	return 0;
706 fail:
707 	bnxt_qplib_free_hwq(res, &srq->hwq);
708 	kfree(srq->swq);
709 
710 	return rc;
711 }
712 
713 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
714 			  struct bnxt_qplib_srq *srq)
715 {
716 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
717 	u32 count;
718 
719 	count = __bnxt_qplib_get_avail(srq_hwq);
720 	if (count > srq->threshold) {
721 		srq->arm_req = false;
722 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
723 	} else {
724 		/* Deferred arming */
725 		srq->arm_req = true;
726 	}
727 
728 	return 0;
729 }
730 
731 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
732 			 struct bnxt_qplib_srq *srq)
733 {
734 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
735 	struct creq_query_srq_resp resp = {};
736 	struct bnxt_qplib_cmdqmsg msg = {};
737 	struct bnxt_qplib_rcfw_sbuf sbuf;
738 	struct creq_query_srq_resp_sb *sb;
739 	struct cmdq_query_srq req = {};
740 	int rc;
741 
742 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
743 				 CMDQ_BASE_OPCODE_QUERY_SRQ,
744 				 sizeof(req));
745 
746 	/* Configure the request */
747 	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
748 	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
749 				     &sbuf.dma_addr, GFP_KERNEL);
750 	if (!sbuf.sb)
751 		return -ENOMEM;
752 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
753 	req.srq_cid = cpu_to_le32(srq->id);
754 	sb = sbuf.sb;
755 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
756 				sizeof(resp), 0);
757 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
758 	if (!rc)
759 		srq->threshold = le16_to_cpu(sb->srq_limit);
760 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
761 			  sbuf.sb, sbuf.dma_addr);
762 
763 	return rc;
764 }
765 
766 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
767 			     struct bnxt_qplib_swqe *wqe)
768 {
769 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
770 	struct rq_wqe *srqe;
771 	struct sq_sge *hw_sge;
772 	u32 count = 0;
773 	int i, next;
774 
775 	spin_lock(&srq_hwq->lock);
776 	if (srq->start_idx == srq->last_idx) {
777 		dev_err(&srq_hwq->pdev->dev,
778 			"FP: SRQ (0x%x) is full!\n", srq->id);
779 		spin_unlock(&srq_hwq->lock);
780 		return -EINVAL;
781 	}
782 	next = srq->start_idx;
783 	srq->start_idx = srq->swq[next].next_idx;
784 	spin_unlock(&srq_hwq->lock);
785 
786 	srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
787 	memset(srqe, 0, srq->wqe_size);
788 	/* Calculate wqe_size16 and data_len */
789 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
790 	     i < wqe->num_sge; i++, hw_sge++) {
791 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
792 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
793 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
794 	}
795 	srqe->wqe_type = wqe->type;
796 	srqe->flags = wqe->flags;
797 	srqe->wqe_size = wqe->num_sge +
798 			((offsetof(typeof(*srqe), data) + 15) >> 4);
799 	srqe->wr_id[0] = cpu_to_le32((u32)next);
800 	srq->swq[next].wr_id = wqe->wr_id;
801 
802 	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
803 
804 	spin_lock(&srq_hwq->lock);
805 	count = __bnxt_qplib_get_avail(srq_hwq);
806 	spin_unlock(&srq_hwq->lock);
807 	/* Ring DB */
808 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
809 	if (srq->arm_req == true && count > srq->threshold) {
810 		srq->arm_req = false;
811 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
812 	}
813 
814 	return 0;
815 }
816 
817 /* QP */
818 
819 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
820 {
821 	int indx;
822 
823 	que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
824 	if (!que->swq)
825 		return -ENOMEM;
826 
827 	que->swq_start = 0;
828 	que->swq_last = que->max_sw_wqe - 1;
829 	for (indx = 0; indx < que->max_sw_wqe; indx++)
830 		que->swq[indx].next_idx = indx + 1;
831 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
832 	que->swq_last = 0;
833 
834 	return 0;
835 }
836 
837 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
838 {
839 	struct bnxt_qplib_hwq_attr hwq_attr = {};
840 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
841 	struct creq_create_qp1_resp resp = {};
842 	struct bnxt_qplib_cmdqmsg msg = {};
843 	struct bnxt_qplib_q *sq = &qp->sq;
844 	struct bnxt_qplib_q *rq = &qp->rq;
845 	struct cmdq_create_qp1 req = {};
846 	struct bnxt_qplib_pbl *pbl;
847 	u32 qp_flags = 0;
848 	u8 pg_sz_lvl;
849 	u32 tbl_indx;
850 	int rc;
851 
852 	sq->dbinfo.flags = 0;
853 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
854 				 CMDQ_BASE_OPCODE_CREATE_QP1,
855 				 sizeof(req));
856 	/* General */
857 	req.type = qp->type;
858 	req.dpi = cpu_to_le32(qp->dpi->dpi);
859 	req.qp_handle = cpu_to_le64(qp->qp_handle);
860 
861 	/* SQ */
862 	hwq_attr.res = res;
863 	hwq_attr.sginfo = &sq->sg_info;
864 	hwq_attr.stride = sizeof(struct sq_sge);
865 	hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
866 	hwq_attr.type = HWQ_TYPE_QUEUE;
867 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
868 	if (rc)
869 		return rc;
870 
871 	rc = bnxt_qplib_alloc_init_swq(sq);
872 	if (rc)
873 		goto fail_sq;
874 
875 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
876 	pbl = &sq->hwq.pbl[PBL_LVL_0];
877 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
878 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
879 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
880 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
881 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
882 	req.sq_fwo_sq_sge =
883 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
884 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
885 	req.scq_cid = cpu_to_le32(qp->scq->id);
886 
887 	/* RQ */
888 	if (rq->max_wqe) {
889 		rq->dbinfo.flags = 0;
890 		hwq_attr.res = res;
891 		hwq_attr.sginfo = &rq->sg_info;
892 		hwq_attr.stride = sizeof(struct sq_sge);
893 		hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
894 		hwq_attr.type = HWQ_TYPE_QUEUE;
895 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
896 		if (rc)
897 			goto sq_swq;
898 		rc = bnxt_qplib_alloc_init_swq(rq);
899 		if (rc)
900 			goto fail_rq;
901 		req.rq_size = cpu_to_le32(rq->max_wqe);
902 		pbl = &rq->hwq.pbl[PBL_LVL_0];
903 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
904 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
905 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
906 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
907 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
908 		req.rq_fwo_rq_sge =
909 			cpu_to_le16((rq->max_sge &
910 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
911 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
912 	}
913 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
914 	/* Header buffer - allow hdr_buf pass in */
915 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
916 	if (rc) {
917 		rc = -ENOMEM;
918 		goto rq_rwq;
919 	}
920 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
921 	req.qp_flags = cpu_to_le32(qp_flags);
922 	req.pd_id = cpu_to_le32(qp->pd->id);
923 
924 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
925 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
926 	if (rc)
927 		goto fail;
928 
929 	qp->id = le32_to_cpu(resp.xid);
930 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
931 	qp->cctx = res->cctx;
932 	sq->dbinfo.hwq = &sq->hwq;
933 	sq->dbinfo.xid = qp->id;
934 	sq->dbinfo.db = qp->dpi->dbr;
935 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
936 	if (rq->max_wqe) {
937 		rq->dbinfo.hwq = &rq->hwq;
938 		rq->dbinfo.xid = qp->id;
939 		rq->dbinfo.db = qp->dpi->dbr;
940 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
941 	}
942 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
943 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
944 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
945 
946 	return 0;
947 
948 fail:
949 	bnxt_qplib_free_qp_hdr_buf(res, qp);
950 rq_rwq:
951 	kfree(rq->swq);
952 fail_rq:
953 	bnxt_qplib_free_hwq(res, &rq->hwq);
954 sq_swq:
955 	kfree(sq->swq);
956 fail_sq:
957 	bnxt_qplib_free_hwq(res, &sq->hwq);
958 	return rc;
959 }
960 
961 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
962 {
963 	struct bnxt_qplib_hwq *hwq;
964 	struct bnxt_qplib_q *sq;
965 	u64 fpsne, psn_pg;
966 	u16 indx_pad = 0;
967 
968 	sq = &qp->sq;
969 	hwq = &sq->hwq;
970 	/* First psn entry */
971 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
972 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
973 		indx_pad = (fpsne & ~PAGE_MASK) / size;
974 	hwq->pad_pgofft = indx_pad;
975 	hwq->pad_pg = (u64 *)psn_pg;
976 	hwq->pad_stride = size;
977 }
978 
979 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
980 {
981 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
982 	struct bnxt_qplib_hwq_attr hwq_attr = {};
983 	struct bnxt_qplib_sg_info sginfo = {};
984 	struct creq_create_qp_resp resp = {};
985 	struct bnxt_qplib_cmdqmsg msg = {};
986 	struct bnxt_qplib_q *sq = &qp->sq;
987 	struct bnxt_qplib_q *rq = &qp->rq;
988 	struct cmdq_create_qp req = {};
989 	int rc, req_size, psn_sz = 0;
990 	struct bnxt_qplib_hwq *xrrq;
991 	struct bnxt_qplib_pbl *pbl;
992 	u32 qp_flags = 0;
993 	u8 pg_sz_lvl;
994 	u32 tbl_indx;
995 	u16 nsge;
996 
997 	if (res->dattr)
998 		qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
999 
1000 	sq->dbinfo.flags = 0;
1001 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1002 				 CMDQ_BASE_OPCODE_CREATE_QP,
1003 				 sizeof(req));
1004 
1005 	/* General */
1006 	req.type = qp->type;
1007 	req.dpi = cpu_to_le32(qp->dpi->dpi);
1008 	req.qp_handle = cpu_to_le64(qp->qp_handle);
1009 
1010 	/* SQ */
1011 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1012 		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1013 			 sizeof(struct sq_psn_search_ext) :
1014 			 sizeof(struct sq_psn_search);
1015 
1016 		if (qp->is_host_msn_tbl) {
1017 			psn_sz = sizeof(struct sq_msn_search);
1018 			qp->msn = 0;
1019 		}
1020 	}
1021 
1022 	hwq_attr.res = res;
1023 	hwq_attr.sginfo = &sq->sg_info;
1024 	hwq_attr.stride = sizeof(struct sq_sge);
1025 	hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1026 	hwq_attr.aux_stride = psn_sz;
1027 	hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1028 				    : 0;
1029 	/* Update msn tbl size */
1030 	if (qp->is_host_msn_tbl && psn_sz) {
1031 		hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1032 		qp->msn_tbl_sz = hwq_attr.aux_depth;
1033 		qp->msn = 0;
1034 	}
1035 
1036 	hwq_attr.type = HWQ_TYPE_QUEUE;
1037 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1038 	if (rc)
1039 		return rc;
1040 
1041 	rc = bnxt_qplib_alloc_init_swq(sq);
1042 	if (rc)
1043 		goto fail_sq;
1044 
1045 	if (psn_sz)
1046 		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1047 
1048 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1049 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1050 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1051 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1052 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1053 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1054 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1055 	req.sq_fwo_sq_sge =
1056 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1057 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1058 	req.scq_cid = cpu_to_le32(qp->scq->id);
1059 
1060 	/* RQ */
1061 	if (!qp->srq) {
1062 		rq->dbinfo.flags = 0;
1063 		hwq_attr.res = res;
1064 		hwq_attr.sginfo = &rq->sg_info;
1065 		hwq_attr.stride = sizeof(struct sq_sge);
1066 		hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1067 		hwq_attr.aux_stride = 0;
1068 		hwq_attr.aux_depth = 0;
1069 		hwq_attr.type = HWQ_TYPE_QUEUE;
1070 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1071 		if (rc)
1072 			goto sq_swq;
1073 		rc = bnxt_qplib_alloc_init_swq(rq);
1074 		if (rc)
1075 			goto fail_rq;
1076 
1077 		req.rq_size = cpu_to_le32(rq->max_wqe);
1078 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1079 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1080 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1081 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1082 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1083 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1084 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1085 			6 : rq->max_sge;
1086 		req.rq_fwo_rq_sge =
1087 			cpu_to_le16(((nsge &
1088 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1089 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1090 	} else {
1091 		/* SRQ */
1092 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1093 		req.srq_cid = cpu_to_le32(qp->srq->id);
1094 	}
1095 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1096 
1097 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1098 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1099 	if (qp->sig_type)
1100 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1101 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1102 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1103 	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1104 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1105 
1106 	req.qp_flags = cpu_to_le32(qp_flags);
1107 
1108 	/* ORRQ and IRRQ */
1109 	if (psn_sz) {
1110 		xrrq = &qp->orrq;
1111 		xrrq->max_elements =
1112 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1113 		req_size = xrrq->max_elements *
1114 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1115 		req_size &= ~(PAGE_SIZE - 1);
1116 		sginfo.pgsize = req_size;
1117 		sginfo.pgshft = PAGE_SHIFT;
1118 
1119 		hwq_attr.res = res;
1120 		hwq_attr.sginfo = &sginfo;
1121 		hwq_attr.depth = xrrq->max_elements;
1122 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1123 		hwq_attr.aux_stride = 0;
1124 		hwq_attr.aux_depth = 0;
1125 		hwq_attr.type = HWQ_TYPE_CTX;
1126 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1127 		if (rc)
1128 			goto rq_swq;
1129 		pbl = &xrrq->pbl[PBL_LVL_0];
1130 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1131 
1132 		xrrq = &qp->irrq;
1133 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1134 						qp->max_dest_rd_atomic);
1135 		req_size = xrrq->max_elements *
1136 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1137 		req_size &= ~(PAGE_SIZE - 1);
1138 		sginfo.pgsize = req_size;
1139 		hwq_attr.depth =  xrrq->max_elements;
1140 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1141 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1142 		if (rc)
1143 			goto fail_orrq;
1144 
1145 		pbl = &xrrq->pbl[PBL_LVL_0];
1146 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1147 	}
1148 	req.pd_id = cpu_to_le32(qp->pd->id);
1149 
1150 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1151 				sizeof(resp), 0);
1152 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1153 	if (rc)
1154 		goto fail;
1155 
1156 	qp->id = le32_to_cpu(resp.xid);
1157 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1158 	INIT_LIST_HEAD(&qp->sq_flush);
1159 	INIT_LIST_HEAD(&qp->rq_flush);
1160 	qp->cctx = res->cctx;
1161 	sq->dbinfo.hwq = &sq->hwq;
1162 	sq->dbinfo.xid = qp->id;
1163 	sq->dbinfo.db = qp->dpi->dbr;
1164 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1165 	if (rq->max_wqe) {
1166 		rq->dbinfo.hwq = &rq->hwq;
1167 		rq->dbinfo.xid = qp->id;
1168 		rq->dbinfo.db = qp->dpi->dbr;
1169 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1170 	}
1171 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1172 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1173 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1174 
1175 	return 0;
1176 fail:
1177 	bnxt_qplib_free_hwq(res, &qp->irrq);
1178 fail_orrq:
1179 	bnxt_qplib_free_hwq(res, &qp->orrq);
1180 rq_swq:
1181 	kfree(rq->swq);
1182 fail_rq:
1183 	bnxt_qplib_free_hwq(res, &rq->hwq);
1184 sq_swq:
1185 	kfree(sq->swq);
1186 fail_sq:
1187 	bnxt_qplib_free_hwq(res, &sq->hwq);
1188 	return rc;
1189 }
1190 
1191 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1192 {
1193 	switch (qp->state) {
1194 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1195 		/* INIT->RTR, configure the path_mtu to the default
1196 		 * 2048 if not being requested
1197 		 */
1198 		if (!(qp->modify_flags &
1199 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1200 			qp->modify_flags |=
1201 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1202 			qp->path_mtu =
1203 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1204 		}
1205 		qp->modify_flags &=
1206 			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1207 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1208 		if (qp->max_dest_rd_atomic < 1)
1209 			qp->max_dest_rd_atomic = 1;
1210 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1211 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1212 		if (!(qp->modify_flags &
1213 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1214 			qp->modify_flags |=
1215 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1216 			qp->ah.sgid_index = 0;
1217 		}
1218 		break;
1219 	default:
1220 		break;
1221 	}
1222 }
1223 
1224 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1225 {
1226 	switch (qp->state) {
1227 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1228 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1229 		if (qp->max_rd_atomic < 1)
1230 			qp->max_rd_atomic = 1;
1231 		/* Bono FW does not allow PKEY_INDEX,
1232 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1233 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1234 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1235 		 * modification
1236 		 */
1237 		qp->modify_flags &=
1238 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1239 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1240 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1241 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1242 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1243 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1244 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1245 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1246 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1247 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1248 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1249 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1250 		break;
1251 	default:
1252 		break;
1253 	}
1254 }
1255 
1256 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1257 {
1258 	switch (qp->cur_qp_state) {
1259 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1260 		break;
1261 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1262 		__modify_flags_from_init_state(qp);
1263 		break;
1264 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1265 		__modify_flags_from_rtr_state(qp);
1266 		break;
1267 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1268 		break;
1269 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1270 		break;
1271 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1272 		break;
1273 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1274 		break;
1275 	default:
1276 		break;
1277 	}
1278 }
1279 
1280 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1281 {
1282 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1283 	struct creq_modify_qp_resp resp = {};
1284 	struct bnxt_qplib_cmdqmsg msg = {};
1285 	struct cmdq_modify_qp req = {};
1286 	u32 temp32[4];
1287 	u32 bmask;
1288 	int rc;
1289 
1290 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1291 				 CMDQ_BASE_OPCODE_MODIFY_QP,
1292 				 sizeof(req));
1293 
1294 	/* Filter out the qp_attr_mask based on the state->new transition */
1295 	__filter_modify_flags(qp);
1296 	bmask = qp->modify_flags;
1297 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1298 	req.qp_cid = cpu_to_le32(qp->id);
1299 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1300 		req.network_type_en_sqd_async_notify_new_state =
1301 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1302 				(qp->en_sqd_async_notify ?
1303 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1304 	}
1305 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1306 
1307 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1308 		req.access = qp->access;
1309 
1310 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1311 		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1312 
1313 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1314 		req.qkey = cpu_to_le32(qp->qkey);
1315 
1316 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1317 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1318 		req.dgid[0] = cpu_to_le32(temp32[0]);
1319 		req.dgid[1] = cpu_to_le32(temp32[1]);
1320 		req.dgid[2] = cpu_to_le32(temp32[2]);
1321 		req.dgid[3] = cpu_to_le32(temp32[3]);
1322 	}
1323 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1324 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1325 
1326 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1327 		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1328 					     [qp->ah.sgid_index]);
1329 
1330 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1331 		req.hop_limit = qp->ah.hop_limit;
1332 
1333 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1334 		req.traffic_class = qp->ah.traffic_class;
1335 
1336 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1337 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1338 
1339 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1340 		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1341 
1342 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1343 		req.timeout = qp->timeout;
1344 
1345 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1346 		req.retry_cnt = qp->retry_cnt;
1347 
1348 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1349 		req.rnr_retry = qp->rnr_retry;
1350 
1351 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1352 		req.min_rnr_timer = qp->min_rnr_timer;
1353 
1354 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1355 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1356 
1357 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1358 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1359 
1360 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1361 		req.max_rd_atomic =
1362 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1363 
1364 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1365 		req.max_dest_rd_atomic =
1366 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1367 
1368 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1369 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1370 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1371 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1372 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1373 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1374 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1375 
1376 	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1377 
1378 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1379 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1380 	if (rc)
1381 		return rc;
1382 	qp->cur_qp_state = qp->state;
1383 	return 0;
1384 }
1385 
1386 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1387 {
1388 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1389 	struct creq_query_qp_resp resp = {};
1390 	struct bnxt_qplib_cmdqmsg msg = {};
1391 	struct bnxt_qplib_rcfw_sbuf sbuf;
1392 	struct creq_query_qp_resp_sb *sb;
1393 	struct cmdq_query_qp req = {};
1394 	u32 temp32[4];
1395 	int i, rc;
1396 
1397 	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1398 	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1399 				     &sbuf.dma_addr, GFP_KERNEL);
1400 	if (!sbuf.sb)
1401 		return -ENOMEM;
1402 	sb = sbuf.sb;
1403 
1404 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1405 				 CMDQ_BASE_OPCODE_QUERY_QP,
1406 				 sizeof(req));
1407 
1408 	req.qp_cid = cpu_to_le32(qp->id);
1409 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1410 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1411 				sizeof(resp), 0);
1412 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1413 	if (rc)
1414 		goto bail;
1415 	/* Extract the context from the side buffer */
1416 	qp->state = sb->en_sqd_async_notify_state &
1417 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1418 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1419 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1420 	qp->access = sb->access;
1421 	qp->pkey_index = le16_to_cpu(sb->pkey);
1422 	qp->qkey = le32_to_cpu(sb->qkey);
1423 
1424 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1425 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1426 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1427 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1428 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1429 
1430 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1431 
1432 	qp->ah.sgid_index = 0;
1433 	for (i = 0; i < res->sgid_tbl.max; i++) {
1434 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1435 			qp->ah.sgid_index = i;
1436 			break;
1437 		}
1438 	}
1439 	if (i == res->sgid_tbl.max)
1440 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1441 
1442 	qp->ah.hop_limit = sb->hop_limit;
1443 	qp->ah.traffic_class = sb->traffic_class;
1444 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1445 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1446 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1447 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1448 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1449 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1450 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1451 	qp->timeout = sb->timeout;
1452 	qp->retry_cnt = sb->retry_cnt;
1453 	qp->rnr_retry = sb->rnr_retry;
1454 	qp->min_rnr_timer = sb->min_rnr_timer;
1455 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1456 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1457 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1458 	qp->max_dest_rd_atomic =
1459 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1460 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1461 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1462 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1463 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1464 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1465 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1466 	memcpy(qp->smac, sb->src_mac, 6);
1467 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1468 bail:
1469 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1470 			  sbuf.sb, sbuf.dma_addr);
1471 	return rc;
1472 }
1473 
1474 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1475 {
1476 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1477 	u32 peek_flags, peek_cons;
1478 	struct cq_base *hw_cqe;
1479 	int i;
1480 
1481 	peek_flags = cq->dbinfo.flags;
1482 	peek_cons = cq_hwq->cons;
1483 	for (i = 0; i < cq_hwq->max_elements; i++) {
1484 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1485 		if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1486 			continue;
1487 		/*
1488 		 * The valid test of the entry must be done first before
1489 		 * reading any further.
1490 		 */
1491 		dma_rmb();
1492 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1493 		case CQ_BASE_CQE_TYPE_REQ:
1494 		case CQ_BASE_CQE_TYPE_TERMINAL:
1495 		{
1496 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1497 
1498 			if (qp == le64_to_cpu(cqe->qp_handle))
1499 				cqe->qp_handle = 0;
1500 			break;
1501 		}
1502 		case CQ_BASE_CQE_TYPE_RES_RC:
1503 		case CQ_BASE_CQE_TYPE_RES_UD:
1504 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1505 		{
1506 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1507 
1508 			if (qp == le64_to_cpu(cqe->qp_handle))
1509 				cqe->qp_handle = 0;
1510 			break;
1511 		}
1512 		default:
1513 			break;
1514 		}
1515 		bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1516 					 1, &peek_flags);
1517 	}
1518 }
1519 
1520 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1521 			  struct bnxt_qplib_qp *qp)
1522 {
1523 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1524 	struct creq_destroy_qp_resp resp = {};
1525 	struct bnxt_qplib_cmdqmsg msg = {};
1526 	struct cmdq_destroy_qp req = {};
1527 	u32 tbl_indx;
1528 	int rc;
1529 
1530 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1531 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1532 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1533 
1534 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1535 				 CMDQ_BASE_OPCODE_DESTROY_QP,
1536 				 sizeof(req));
1537 
1538 	req.qp_cid = cpu_to_le32(qp->id);
1539 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1540 				sizeof(resp), 0);
1541 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1542 	if (rc) {
1543 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1544 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1545 		return rc;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1552 			    struct bnxt_qplib_qp *qp)
1553 {
1554 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1555 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1556 	kfree(qp->sq.swq);
1557 
1558 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1559 	kfree(qp->rq.swq);
1560 
1561 	if (qp->irrq.max_elements)
1562 		bnxt_qplib_free_hwq(res, &qp->irrq);
1563 	if (qp->orrq.max_elements)
1564 		bnxt_qplib_free_hwq(res, &qp->orrq);
1565 
1566 }
1567 
1568 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1569 				struct bnxt_qplib_sge *sge)
1570 {
1571 	struct bnxt_qplib_q *sq = &qp->sq;
1572 	u32 sw_prod;
1573 
1574 	memset(sge, 0, sizeof(*sge));
1575 
1576 	if (qp->sq_hdr_buf) {
1577 		sw_prod = sq->swq_start;
1578 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1579 					 sw_prod * qp->sq_hdr_buf_size);
1580 		sge->lkey = 0xFFFFFFFF;
1581 		sge->size = qp->sq_hdr_buf_size;
1582 		return qp->sq_hdr_buf + sw_prod * sge->size;
1583 	}
1584 	return NULL;
1585 }
1586 
1587 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1588 {
1589 	struct bnxt_qplib_q *rq = &qp->rq;
1590 
1591 	return rq->swq_start;
1592 }
1593 
1594 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1595 {
1596 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1597 }
1598 
1599 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1600 				struct bnxt_qplib_sge *sge)
1601 {
1602 	struct bnxt_qplib_q *rq = &qp->rq;
1603 	u32 sw_prod;
1604 
1605 	memset(sge, 0, sizeof(*sge));
1606 
1607 	if (qp->rq_hdr_buf) {
1608 		sw_prod = rq->swq_start;
1609 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1610 					 sw_prod * qp->rq_hdr_buf_size);
1611 		sge->lkey = 0xFFFFFFFF;
1612 		sge->size = qp->rq_hdr_buf_size;
1613 		return qp->rq_hdr_buf + sw_prod * sge->size;
1614 	}
1615 	return NULL;
1616 }
1617 
1618 /* Fil the MSN table into the next psn row */
1619 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1620 				       struct bnxt_qplib_swqe *wqe,
1621 				       struct bnxt_qplib_swq *swq)
1622 {
1623 	struct sq_msn_search *msns;
1624 	u32 start_psn, next_psn;
1625 	u16 start_idx;
1626 
1627 	msns = (struct sq_msn_search *)swq->psn_search;
1628 	msns->start_idx_next_psn_start_psn = 0;
1629 
1630 	start_psn = swq->start_psn;
1631 	next_psn = swq->next_psn;
1632 	start_idx = swq->slot_idx;
1633 	msns->start_idx_next_psn_start_psn |=
1634 		bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1635 	qp->msn++;
1636 	qp->msn %= qp->msn_tbl_sz;
1637 }
1638 
1639 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1640 				       struct bnxt_qplib_swqe *wqe,
1641 				       struct bnxt_qplib_swq *swq)
1642 {
1643 	struct sq_psn_search_ext *psns_ext;
1644 	struct sq_psn_search *psns;
1645 	u32 flg_npsn;
1646 	u32 op_spsn;
1647 
1648 	if (!swq->psn_search)
1649 		return;
1650 	/* Handle MSN differently on cap flags  */
1651 	if (qp->is_host_msn_tbl) {
1652 		bnxt_qplib_fill_msn_search(qp, wqe, swq);
1653 		return;
1654 	}
1655 	psns = (struct sq_psn_search *)swq->psn_search;
1656 	psns = swq->psn_search;
1657 	psns_ext = swq->psn_ext;
1658 
1659 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1660 		    SQ_PSN_SEARCH_START_PSN_MASK);
1661 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1662 		     SQ_PSN_SEARCH_OPCODE_MASK);
1663 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1664 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1665 
1666 	if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1667 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1668 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1669 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1670 	} else {
1671 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1672 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1673 	}
1674 }
1675 
1676 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1677 				 struct bnxt_qplib_swqe *wqe,
1678 				 u16 *idx)
1679 {
1680 	struct bnxt_qplib_hwq *hwq;
1681 	int len, t_len, offt;
1682 	bool pull_dst = true;
1683 	void *il_dst = NULL;
1684 	void *il_src = NULL;
1685 	int t_cplen, cplen;
1686 	int indx;
1687 
1688 	hwq = &qp->sq.hwq;
1689 	t_len = 0;
1690 	for (indx = 0; indx < wqe->num_sge; indx++) {
1691 		len = wqe->sg_list[indx].size;
1692 		il_src = (void *)wqe->sg_list[indx].addr;
1693 		t_len += len;
1694 		if (t_len > qp->max_inline_data)
1695 			return -ENOMEM;
1696 		while (len) {
1697 			if (pull_dst) {
1698 				pull_dst = false;
1699 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1700 				(*idx)++;
1701 				t_cplen = 0;
1702 				offt = 0;
1703 			}
1704 			cplen = min_t(int, len, sizeof(struct sq_sge));
1705 			cplen = min_t(int, cplen,
1706 					(sizeof(struct sq_sge) - offt));
1707 			memcpy(il_dst, il_src, cplen);
1708 			t_cplen += cplen;
1709 			il_src += cplen;
1710 			il_dst += cplen;
1711 			offt += cplen;
1712 			len -= cplen;
1713 			if (t_cplen == sizeof(struct sq_sge))
1714 				pull_dst = true;
1715 		}
1716 	}
1717 
1718 	return t_len;
1719 }
1720 
1721 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1722 			       struct bnxt_qplib_sge *ssge,
1723 			       u16 nsge, u16 *idx)
1724 {
1725 	struct sq_sge *dsge;
1726 	int indx, len = 0;
1727 
1728 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1729 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1730 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1731 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1732 		dsge->size = cpu_to_le32(ssge[indx].size);
1733 		len += ssge[indx].size;
1734 	}
1735 
1736 	return len;
1737 }
1738 
1739 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1740 				     struct bnxt_qplib_swqe *wqe,
1741 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1742 {
1743 	u32 ilsize, bytes;
1744 	u16 nsge;
1745 	u16 slot;
1746 
1747 	nsge = wqe->num_sge;
1748 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1749 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1750 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1751 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1752 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1753 		bytes += sizeof(struct sq_send_hdr);
1754 	}
1755 
1756 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1757 	slot = bytes >> 4;
1758 	*wqe_sz = slot;
1759 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1760 		slot = 8;
1761 	return slot;
1762 }
1763 
1764 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1765 				     struct bnxt_qplib_swq *swq, bool hw_retx)
1766 {
1767 	struct bnxt_qplib_hwq *hwq;
1768 	u32 pg_num, pg_indx;
1769 	void *buff;
1770 	u32 tail;
1771 
1772 	hwq = &sq->hwq;
1773 	if (!hwq->pad_pg)
1774 		return;
1775 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1776 	if (hw_retx) {
1777 		/* For HW retx use qp msn index */
1778 		tail = qp->msn;
1779 		tail %= qp->msn_tbl_sz;
1780 	}
1781 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1782 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1783 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1784 	swq->psn_ext = buff;
1785 	swq->psn_search = buff;
1786 }
1787 
1788 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1789 {
1790 	struct bnxt_qplib_q *sq = &qp->sq;
1791 
1792 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1793 }
1794 
1795 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1796 			 struct bnxt_qplib_swqe *wqe)
1797 {
1798 	struct bnxt_qplib_nq_work *nq_work = NULL;
1799 	int i, rc = 0, data_len = 0, pkt_num = 0;
1800 	struct bnxt_qplib_q *sq = &qp->sq;
1801 	struct bnxt_qplib_hwq *hwq;
1802 	struct bnxt_qplib_swq *swq;
1803 	bool sch_handler = false;
1804 	u16 wqe_sz, qdf = 0;
1805 	bool msn_update;
1806 	void *base_hdr;
1807 	void *ext_hdr;
1808 	__le32 temp32;
1809 	u32 wqe_idx;
1810 	u32 slots;
1811 	u16 idx;
1812 
1813 	hwq = &sq->hwq;
1814 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1815 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1816 		dev_err(&hwq->pdev->dev,
1817 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1818 			qp->id, qp->state);
1819 		rc = -EINVAL;
1820 		goto done;
1821 	}
1822 
1823 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1824 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1825 		dev_err(&hwq->pdev->dev,
1826 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1827 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1828 		rc = -ENOMEM;
1829 		goto done;
1830 	}
1831 
1832 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1833 	bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1834 
1835 	idx = 0;
1836 	swq->slot_idx = hwq->prod;
1837 	swq->slots = slots;
1838 	swq->wr_id = wqe->wr_id;
1839 	swq->type = wqe->type;
1840 	swq->flags = wqe->flags;
1841 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1842 	if (qp->sig_type)
1843 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1844 
1845 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1846 		sch_handler = true;
1847 		dev_dbg(&hwq->pdev->dev,
1848 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1849 		goto queue_err;
1850 	}
1851 
1852 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1853 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1854 	memset(base_hdr, 0, sizeof(struct sq_sge));
1855 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1856 
1857 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1858 		/* Copy the inline data */
1859 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1860 	else
1861 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1862 					       &idx);
1863 	if (data_len < 0)
1864 		goto queue_err;
1865 	/* Make sure we update MSN table only for wired wqes */
1866 	msn_update = true;
1867 	/* Specifics */
1868 	switch (wqe->type) {
1869 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1870 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1871 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1872 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1873 			/* Assemble info for Raw Ethertype QPs */
1874 
1875 			sqe->wqe_type = wqe->type;
1876 			sqe->flags = wqe->flags;
1877 			sqe->wqe_size = wqe_sz;
1878 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1879 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1880 			sqe->length = cpu_to_le32(data_len);
1881 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1882 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1883 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1884 
1885 			break;
1886 		}
1887 		fallthrough;
1888 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1889 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1890 	{
1891 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1892 		struct sq_send_hdr *sqe = base_hdr;
1893 
1894 		sqe->wqe_type = wqe->type;
1895 		sqe->flags = wqe->flags;
1896 		sqe->wqe_size = wqe_sz;
1897 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1898 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1899 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1900 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1901 			sqe->length = cpu_to_le32(data_len);
1902 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1903 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1904 						      SQ_SEND_DST_QP_MASK);
1905 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1906 						    SQ_SEND_AVID_MASK);
1907 			msn_update = false;
1908 		} else {
1909 			sqe->length = cpu_to_le32(data_len);
1910 			if (qp->mtu)
1911 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1912 			if (!pkt_num)
1913 				pkt_num = 1;
1914 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1915 		}
1916 		break;
1917 	}
1918 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1919 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1920 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1921 	{
1922 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1923 		struct sq_rdma_hdr *sqe = base_hdr;
1924 
1925 		sqe->wqe_type = wqe->type;
1926 		sqe->flags = wqe->flags;
1927 		sqe->wqe_size = wqe_sz;
1928 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1929 		sqe->length = cpu_to_le32((u32)data_len);
1930 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1931 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1932 		if (qp->mtu)
1933 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1934 		if (!pkt_num)
1935 			pkt_num = 1;
1936 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1937 		break;
1938 	}
1939 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1940 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1941 	{
1942 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1943 		struct sq_atomic_hdr *sqe = base_hdr;
1944 
1945 		sqe->wqe_type = wqe->type;
1946 		sqe->flags = wqe->flags;
1947 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1948 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1949 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1950 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1951 		if (qp->mtu)
1952 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1953 		if (!pkt_num)
1954 			pkt_num = 1;
1955 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1956 		break;
1957 	}
1958 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1959 	{
1960 		struct sq_localinvalidate *sqe = base_hdr;
1961 
1962 		sqe->wqe_type = wqe->type;
1963 		sqe->flags = wqe->flags;
1964 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1965 		msn_update = false;
1966 		break;
1967 	}
1968 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1969 	{
1970 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1971 		struct sq_fr_pmr_hdr *sqe = base_hdr;
1972 
1973 		sqe->wqe_type = wqe->type;
1974 		sqe->flags = wqe->flags;
1975 		sqe->access_cntl = wqe->frmr.access_cntl |
1976 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1977 		sqe->zero_based_page_size_log =
1978 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1979 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1980 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1981 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1982 		temp32 = cpu_to_le32(wqe->frmr.length);
1983 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1984 		sqe->numlevels_pbl_page_size_log =
1985 			((wqe->frmr.pbl_pg_sz_log <<
1986 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1987 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1988 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1989 					SQ_FR_PMR_NUMLEVELS_MASK);
1990 
1991 		for (i = 0; i < wqe->frmr.page_list_len; i++)
1992 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1993 						wqe->frmr.page_list[i] |
1994 						PTU_PTE_VALID);
1995 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1996 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1997 		msn_update = false;
1998 
1999 		break;
2000 	}
2001 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2002 	{
2003 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2004 		struct sq_bind_hdr *sqe = base_hdr;
2005 
2006 		sqe->wqe_type = wqe->type;
2007 		sqe->flags = wqe->flags;
2008 		sqe->access_cntl = wqe->bind.access_cntl;
2009 		sqe->mw_type_zero_based = wqe->bind.mw_type |
2010 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2011 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2012 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2013 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
2014 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2015 		msn_update = false;
2016 		break;
2017 	}
2018 	default:
2019 		/* Bad wqe, return error */
2020 		rc = -EINVAL;
2021 		goto done;
2022 	}
2023 	if (!qp->is_host_msn_tbl || msn_update) {
2024 		swq->next_psn = sq->psn & BTH_PSN_MASK;
2025 		bnxt_qplib_fill_psn_search(qp, wqe, swq);
2026 	}
2027 queue_err:
2028 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
2029 	bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2030 	qp->wqe_cnt++;
2031 done:
2032 	if (sch_handler) {
2033 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2034 		if (nq_work) {
2035 			nq_work->cq = qp->scq;
2036 			nq_work->nq = qp->scq->nq;
2037 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2038 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2039 		} else {
2040 			dev_err(&hwq->pdev->dev,
2041 				"FP: Failed to allocate SQ nq_work!\n");
2042 			rc = -ENOMEM;
2043 		}
2044 	}
2045 	return rc;
2046 }
2047 
2048 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2049 {
2050 	struct bnxt_qplib_q *rq = &qp->rq;
2051 
2052 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2053 }
2054 
2055 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2056 			 struct bnxt_qplib_swqe *wqe)
2057 {
2058 	struct bnxt_qplib_nq_work *nq_work = NULL;
2059 	struct bnxt_qplib_q *rq = &qp->rq;
2060 	struct rq_wqe_hdr *base_hdr;
2061 	struct rq_ext_hdr *ext_hdr;
2062 	struct bnxt_qplib_hwq *hwq;
2063 	struct bnxt_qplib_swq *swq;
2064 	bool sch_handler = false;
2065 	u16 wqe_sz, idx;
2066 	u32 wqe_idx;
2067 	int rc = 0;
2068 
2069 	hwq = &rq->hwq;
2070 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2071 		dev_err(&hwq->pdev->dev,
2072 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2073 			qp->id, qp->state);
2074 		rc = -EINVAL;
2075 		goto done;
2076 	}
2077 
2078 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2079 		dev_err(&hwq->pdev->dev,
2080 			"FP: QP (0x%x) RQ is full!\n", qp->id);
2081 		rc = -EINVAL;
2082 		goto done;
2083 	}
2084 
2085 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2086 	swq->wr_id = wqe->wr_id;
2087 	swq->slots = rq->dbinfo.max_slot;
2088 
2089 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2090 		sch_handler = true;
2091 		dev_dbg(&hwq->pdev->dev,
2092 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2093 		goto queue_err;
2094 	}
2095 
2096 	idx = 0;
2097 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2098 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2099 	memset(base_hdr, 0, sizeof(struct sq_sge));
2100 	memset(ext_hdr, 0, sizeof(struct sq_sge));
2101 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2102 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2103 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2104 	if (!wqe->num_sge) {
2105 		struct sq_sge *sge;
2106 
2107 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2108 		sge->size = 0;
2109 		wqe_sz++;
2110 	}
2111 	base_hdr->wqe_type = wqe->type;
2112 	base_hdr->flags = wqe->flags;
2113 	base_hdr->wqe_size = wqe_sz;
2114 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2115 queue_err:
2116 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2117 	bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2118 done:
2119 	if (sch_handler) {
2120 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2121 		if (nq_work) {
2122 			nq_work->cq = qp->rcq;
2123 			nq_work->nq = qp->rcq->nq;
2124 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2125 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2126 		} else {
2127 			dev_err(&hwq->pdev->dev,
2128 				"FP: Failed to allocate RQ nq_work!\n");
2129 			rc = -ENOMEM;
2130 		}
2131 	}
2132 
2133 	return rc;
2134 }
2135 
2136 /* CQ */
2137 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2138 {
2139 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2140 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2141 	struct creq_create_cq_resp resp = {};
2142 	struct bnxt_qplib_cmdqmsg msg = {};
2143 	struct cmdq_create_cq req = {};
2144 	struct bnxt_qplib_pbl *pbl;
2145 	u32 pg_sz_lvl;
2146 	int rc;
2147 
2148 	if (!cq->dpi) {
2149 		dev_err(&rcfw->pdev->dev,
2150 			"FP: CREATE_CQ failed due to NULL DPI\n");
2151 		return -EINVAL;
2152 	}
2153 
2154 	cq->dbinfo.flags = 0;
2155 	hwq_attr.res = res;
2156 	hwq_attr.depth = cq->max_wqe;
2157 	hwq_attr.stride = sizeof(struct cq_base);
2158 	hwq_attr.type = HWQ_TYPE_QUEUE;
2159 	hwq_attr.sginfo = &cq->sg_info;
2160 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2161 	if (rc)
2162 		return rc;
2163 
2164 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2165 				 CMDQ_BASE_OPCODE_CREATE_CQ,
2166 				 sizeof(req));
2167 
2168 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2169 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2170 	req.cq_size = cpu_to_le32(cq->max_wqe);
2171 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2172 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2173 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2174 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2175 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2176 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2177 	req.cq_fco_cnq_id = cpu_to_le32(
2178 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2179 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2180 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2181 				sizeof(resp), 0);
2182 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2183 	if (rc)
2184 		goto fail;
2185 
2186 	cq->id = le32_to_cpu(resp.xid);
2187 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2188 	init_waitqueue_head(&cq->waitq);
2189 	INIT_LIST_HEAD(&cq->sqf_head);
2190 	INIT_LIST_HEAD(&cq->rqf_head);
2191 	spin_lock_init(&cq->compl_lock);
2192 	spin_lock_init(&cq->flush_lock);
2193 
2194 	cq->dbinfo.hwq = &cq->hwq;
2195 	cq->dbinfo.xid = cq->id;
2196 	cq->dbinfo.db = cq->dpi->dbr;
2197 	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2198 	cq->dbinfo.flags = 0;
2199 	cq->dbinfo.toggle = 0;
2200 
2201 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2202 
2203 	return 0;
2204 
2205 fail:
2206 	bnxt_qplib_free_hwq(res, &cq->hwq);
2207 	return rc;
2208 }
2209 
2210 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2211 				   struct bnxt_qplib_cq *cq)
2212 {
2213 	bnxt_qplib_free_hwq(res, &cq->hwq);
2214 	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2215        /* Reset only the cons bit in the flags */
2216 	cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2217 }
2218 
2219 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2220 			 int new_cqes)
2221 {
2222 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2223 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2224 	struct creq_resize_cq_resp resp = {};
2225 	struct bnxt_qplib_cmdqmsg msg = {};
2226 	struct cmdq_resize_cq req = {};
2227 	struct bnxt_qplib_pbl *pbl;
2228 	u32 pg_sz, lvl, new_sz;
2229 	int rc;
2230 
2231 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2232 				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2233 				 sizeof(req));
2234 	hwq_attr.sginfo = &cq->sg_info;
2235 	hwq_attr.res = res;
2236 	hwq_attr.depth = new_cqes;
2237 	hwq_attr.stride = sizeof(struct cq_base);
2238 	hwq_attr.type = HWQ_TYPE_QUEUE;
2239 	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2240 	if (rc)
2241 		return rc;
2242 
2243 	req.cq_cid = cpu_to_le32(cq->id);
2244 	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2245 	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2246 	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2247 				       CMDQ_RESIZE_CQ_LVL_MASK;
2248 	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2249 		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2250 	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2251 	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2252 
2253 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2254 				sizeof(resp), 0);
2255 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2256 	return rc;
2257 }
2258 
2259 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2260 {
2261 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2262 	struct creq_destroy_cq_resp resp = {};
2263 	struct bnxt_qplib_cmdqmsg msg = {};
2264 	struct cmdq_destroy_cq req = {};
2265 	u16 total_cnq_events;
2266 	int rc;
2267 
2268 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2269 				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2270 				 sizeof(req));
2271 
2272 	req.cq_cid = cpu_to_le32(cq->id);
2273 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2274 				sizeof(resp), 0);
2275 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2276 	if (rc)
2277 		return rc;
2278 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2279 	__wait_for_all_nqes(cq, total_cnq_events);
2280 	bnxt_qplib_free_hwq(res, &cq->hwq);
2281 	return 0;
2282 }
2283 
2284 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2285 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2286 {
2287 	struct bnxt_qplib_cqe *cqe;
2288 	u32 start, last;
2289 	int rc = 0;
2290 
2291 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2292 	start = sq->swq_start;
2293 	cqe = *pcqe;
2294 	while (*budget) {
2295 		last = sq->swq_last;
2296 		if (start == last)
2297 			break;
2298 		/* Skip the FENCE WQE completions */
2299 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2300 			bnxt_qplib_cancel_phantom_processing(qp);
2301 			goto skip_compl;
2302 		}
2303 		memset(cqe, 0, sizeof(*cqe));
2304 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2305 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2306 		cqe->qp_handle = (u64)(unsigned long)qp;
2307 		cqe->wr_id = sq->swq[last].wr_id;
2308 		cqe->src_qp = qp->id;
2309 		cqe->type = sq->swq[last].type;
2310 		cqe++;
2311 		(*budget)--;
2312 skip_compl:
2313 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2314 					 sq->swq[last].slots, &sq->dbinfo.flags);
2315 		sq->swq_last = sq->swq[last].next_idx;
2316 	}
2317 	*pcqe = cqe;
2318 	if (!(*budget) && sq->swq_last != start)
2319 		/* Out of budget */
2320 		rc = -EAGAIN;
2321 
2322 	return rc;
2323 }
2324 
2325 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2326 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2327 {
2328 	struct bnxt_qplib_cqe *cqe;
2329 	u32 start, last;
2330 	int opcode = 0;
2331 	int rc = 0;
2332 
2333 	switch (qp->type) {
2334 	case CMDQ_CREATE_QP1_TYPE_GSI:
2335 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2336 		break;
2337 	case CMDQ_CREATE_QP_TYPE_RC:
2338 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2339 		break;
2340 	case CMDQ_CREATE_QP_TYPE_UD:
2341 	case CMDQ_CREATE_QP_TYPE_GSI:
2342 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2343 		break;
2344 	}
2345 
2346 	/* Flush the rest of the RQ */
2347 	start = rq->swq_start;
2348 	cqe = *pcqe;
2349 	while (*budget) {
2350 		last = rq->swq_last;
2351 		if (last == start)
2352 			break;
2353 		memset(cqe, 0, sizeof(*cqe));
2354 		cqe->status =
2355 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2356 		cqe->opcode = opcode;
2357 		cqe->qp_handle = (unsigned long)qp;
2358 		cqe->wr_id = rq->swq[last].wr_id;
2359 		cqe++;
2360 		(*budget)--;
2361 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2362 					 rq->swq[last].slots, &rq->dbinfo.flags);
2363 		rq->swq_last = rq->swq[last].next_idx;
2364 	}
2365 	*pcqe = cqe;
2366 	if (!*budget && rq->swq_last != start)
2367 		/* Out of budget */
2368 		rc = -EAGAIN;
2369 
2370 	return rc;
2371 }
2372 
2373 void bnxt_qplib_mark_qp_error(void *qp_handle)
2374 {
2375 	struct bnxt_qplib_qp *qp = qp_handle;
2376 
2377 	if (!qp)
2378 		return;
2379 
2380 	/* Must block new posting of SQ and RQ */
2381 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2382 	bnxt_qplib_cancel_phantom_processing(qp);
2383 }
2384 
2385 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2386  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2387  */
2388 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2389 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2390 {
2391 	u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2392 	struct bnxt_qplib_q *sq = &qp->sq;
2393 	struct cq_req *peek_req_hwcqe;
2394 	struct bnxt_qplib_qp *peek_qp;
2395 	struct bnxt_qplib_q *peek_sq;
2396 	struct bnxt_qplib_swq *swq;
2397 	struct cq_base *peek_hwcqe;
2398 	int i, rc = 0;
2399 
2400 	/* Normal mode */
2401 	/* Check for the psn_search marking before completing */
2402 	swq = &sq->swq[swq_last];
2403 	if (swq->psn_search &&
2404 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2405 		/* Unmark */
2406 		swq->psn_search->flags_next_psn = cpu_to_le32
2407 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2408 				     & ~0x80000000);
2409 		dev_dbg(&cq->hwq.pdev->dev,
2410 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2411 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2412 		sq->condition = true;
2413 		sq->send_phantom = true;
2414 
2415 		/* TODO: Only ARM if the previous SQE is ARMALL */
2416 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2417 		rc = -EAGAIN;
2418 		goto out;
2419 	}
2420 	if (sq->condition) {
2421 		/* Peek at the completions */
2422 		peek_flags = cq->dbinfo.flags;
2423 		peek_sw_cq_cons = cq_cons;
2424 		i = cq->hwq.max_elements;
2425 		while (i--) {
2426 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2427 						       peek_sw_cq_cons, NULL);
2428 			/* If the next hwcqe is VALID */
2429 			if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2430 			/*
2431 			 * The valid test of the entry must be done first before
2432 			 * reading any further.
2433 			 */
2434 				dma_rmb();
2435 				/* If the next hwcqe is a REQ */
2436 				if ((peek_hwcqe->cqe_type_toggle &
2437 				    CQ_BASE_CQE_TYPE_MASK) ==
2438 				    CQ_BASE_CQE_TYPE_REQ) {
2439 					peek_req_hwcqe = (struct cq_req *)
2440 							 peek_hwcqe;
2441 					peek_qp = (struct bnxt_qplib_qp *)
2442 						((unsigned long)
2443 						 le64_to_cpu
2444 						 (peek_req_hwcqe->qp_handle));
2445 					peek_sq = &peek_qp->sq;
2446 					peek_sq_cons_idx =
2447 						((le16_to_cpu(
2448 						  peek_req_hwcqe->sq_cons_idx)
2449 						  - 1) % sq->max_wqe);
2450 					/* If the hwcqe's sq's wr_id matches */
2451 					if (peek_sq == sq &&
2452 					    sq->swq[peek_sq_cons_idx].wr_id ==
2453 					    BNXT_QPLIB_FENCE_WRID) {
2454 						/*
2455 						 *  Unbreak only if the phantom
2456 						 *  comes back
2457 						 */
2458 						dev_dbg(&cq->hwq.pdev->dev,
2459 							"FP: Got Phantom CQE\n");
2460 						sq->condition = false;
2461 						sq->single = true;
2462 						rc = 0;
2463 						goto out;
2464 					}
2465 				}
2466 				/* Valid but not the phantom, so keep looping */
2467 			} else {
2468 				/* Not valid yet, just exit and wait */
2469 				rc = -EINVAL;
2470 				goto out;
2471 			}
2472 			bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2473 						 &peek_sw_cq_cons,
2474 						 1, &peek_flags);
2475 		}
2476 		dev_err(&cq->hwq.pdev->dev,
2477 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2478 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2479 		rc = -EINVAL;
2480 	}
2481 out:
2482 	return rc;
2483 }
2484 
2485 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2486 {
2487 	struct bnxt_qplib_hwq *sq_hwq;
2488 	struct bnxt_qplib_swq *swq;
2489 	int cqe_sq_cons = -1;
2490 	u32 start, last;
2491 
2492 	sq_hwq = &sq->hwq;
2493 
2494 	start = sq->swq_start;
2495 	last = sq->swq_last;
2496 
2497 	while (last != start) {
2498 		swq = &sq->swq[last];
2499 		if (swq->slot_idx  == cqe_slot) {
2500 			cqe_sq_cons = swq->next_idx;
2501 			dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2502 				__func__, cqe_sq_cons, cqe_slot);
2503 			break;
2504 		}
2505 
2506 		last = swq->next_idx;
2507 	}
2508 	return cqe_sq_cons;
2509 }
2510 
2511 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2512 				     struct cq_req *hwcqe,
2513 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2514 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2515 {
2516 	struct bnxt_qplib_swq *swq;
2517 	struct bnxt_qplib_cqe *cqe;
2518 	u32 cqe_sq_cons, slot_num;
2519 	struct bnxt_qplib_qp *qp;
2520 	struct bnxt_qplib_q *sq;
2521 	int cqe_cons;
2522 	int rc = 0;
2523 
2524 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2525 				      le64_to_cpu(hwcqe->qp_handle));
2526 	if (!qp) {
2527 		dev_err(&cq->hwq.pdev->dev,
2528 			"FP: Process Req qp is NULL\n");
2529 		return -EINVAL;
2530 	}
2531 	sq = &qp->sq;
2532 
2533 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2534 	if (qp->sq.flushed) {
2535 		dev_dbg(&cq->hwq.pdev->dev,
2536 			"%s: QP in Flush QP = %p\n", __func__, qp);
2537 		goto done;
2538 	}
2539 
2540 	if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2541 		slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2542 		cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2543 		if (cqe_cons < 0) {
2544 			dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2545 				__func__, slot_num);
2546 			goto done;
2547 		}
2548 		cqe_sq_cons = cqe_cons;
2549 		dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2550 			__func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2551 	}
2552 
2553 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2554 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2555 	 * to the cqe_sq_cons
2556 	 */
2557 	cqe = *pcqe;
2558 	while (*budget) {
2559 		if (sq->swq_last == cqe_sq_cons)
2560 			/* Done */
2561 			break;
2562 
2563 		swq = &sq->swq[sq->swq_last];
2564 		memset(cqe, 0, sizeof(*cqe));
2565 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2566 		cqe->qp_handle = (u64)(unsigned long)qp;
2567 		cqe->src_qp = qp->id;
2568 		cqe->wr_id = swq->wr_id;
2569 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2570 			goto skip;
2571 		cqe->type = swq->type;
2572 
2573 		/* For the last CQE, check for status.  For errors, regardless
2574 		 * of the request being signaled or not, it must complete with
2575 		 * the hwcqe error status
2576 		 */
2577 		if (swq->next_idx == cqe_sq_cons &&
2578 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2579 			cqe->status = hwcqe->status;
2580 			dev_err(&cq->hwq.pdev->dev,
2581 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2582 				sq->swq_last, cqe->wr_id, cqe->status);
2583 			cqe++;
2584 			(*budget)--;
2585 			bnxt_qplib_mark_qp_error(qp);
2586 			/* Add qp to flush list of the CQ */
2587 			bnxt_qplib_add_flush_qp(qp);
2588 		} else {
2589 			/* Before we complete, do WA 9060 */
2590 			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2591 				      cqe_sq_cons)) {
2592 				*lib_qp = qp;
2593 				goto out;
2594 			}
2595 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2596 				cqe->status = CQ_REQ_STATUS_OK;
2597 				cqe++;
2598 				(*budget)--;
2599 			}
2600 		}
2601 skip:
2602 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2603 					 swq->slots, &sq->dbinfo.flags);
2604 		sq->swq_last = swq->next_idx;
2605 		if (sq->single)
2606 			break;
2607 	}
2608 out:
2609 	*pcqe = cqe;
2610 	if (sq->swq_last != cqe_sq_cons) {
2611 		/* Out of budget */
2612 		rc = -EAGAIN;
2613 		goto done;
2614 	}
2615 	/*
2616 	 * Back to normal completion mode only after it has completed all of
2617 	 * the WC for this CQE
2618 	 */
2619 	sq->single = false;
2620 done:
2621 	return rc;
2622 }
2623 
2624 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2625 {
2626 	spin_lock(&srq->hwq.lock);
2627 	srq->swq[srq->last_idx].next_idx = (int)tag;
2628 	srq->last_idx = (int)tag;
2629 	srq->swq[srq->last_idx].next_idx = -1;
2630 	bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2631 				 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2632 	spin_unlock(&srq->hwq.lock);
2633 }
2634 
2635 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2636 					struct cq_res_rc *hwcqe,
2637 					struct bnxt_qplib_cqe **pcqe,
2638 					int *budget)
2639 {
2640 	struct bnxt_qplib_srq *srq;
2641 	struct bnxt_qplib_cqe *cqe;
2642 	struct bnxt_qplib_qp *qp;
2643 	struct bnxt_qplib_q *rq;
2644 	u32 wr_id_idx;
2645 
2646 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2647 				      le64_to_cpu(hwcqe->qp_handle));
2648 	if (!qp) {
2649 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2650 		return -EINVAL;
2651 	}
2652 	if (qp->rq.flushed) {
2653 		dev_dbg(&cq->hwq.pdev->dev,
2654 			"%s: QP in Flush QP = %p\n", __func__, qp);
2655 		return 0;
2656 	}
2657 
2658 	cqe = *pcqe;
2659 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2660 	cqe->length = le32_to_cpu(hwcqe->length);
2661 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2662 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2663 	cqe->flags = le16_to_cpu(hwcqe->flags);
2664 	cqe->status = hwcqe->status;
2665 	cqe->qp_handle = (u64)(unsigned long)qp;
2666 
2667 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2668 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2669 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2670 		srq = qp->srq;
2671 		if (!srq)
2672 			return -EINVAL;
2673 		if (wr_id_idx >= srq->hwq.max_elements) {
2674 			dev_err(&cq->hwq.pdev->dev,
2675 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2676 				wr_id_idx, srq->hwq.max_elements);
2677 			return -EINVAL;
2678 		}
2679 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2680 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2681 		cqe++;
2682 		(*budget)--;
2683 		*pcqe = cqe;
2684 	} else {
2685 		struct bnxt_qplib_swq *swq;
2686 
2687 		rq = &qp->rq;
2688 		if (wr_id_idx > (rq->max_wqe - 1)) {
2689 			dev_err(&cq->hwq.pdev->dev,
2690 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2691 				wr_id_idx, rq->max_wqe);
2692 			return -EINVAL;
2693 		}
2694 		if (wr_id_idx != rq->swq_last)
2695 			return -EINVAL;
2696 		swq = &rq->swq[rq->swq_last];
2697 		cqe->wr_id = swq->wr_id;
2698 		cqe++;
2699 		(*budget)--;
2700 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2701 					 swq->slots, &rq->dbinfo.flags);
2702 		rq->swq_last = swq->next_idx;
2703 		*pcqe = cqe;
2704 
2705 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2706 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2707 			/* Add qp to flush list of the CQ */
2708 			bnxt_qplib_add_flush_qp(qp);
2709 		}
2710 	}
2711 
2712 	return 0;
2713 }
2714 
2715 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2716 					struct cq_res_ud *hwcqe,
2717 					struct bnxt_qplib_cqe **pcqe,
2718 					int *budget)
2719 {
2720 	struct bnxt_qplib_srq *srq;
2721 	struct bnxt_qplib_cqe *cqe;
2722 	struct bnxt_qplib_qp *qp;
2723 	struct bnxt_qplib_q *rq;
2724 	u32 wr_id_idx;
2725 
2726 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2727 				      le64_to_cpu(hwcqe->qp_handle));
2728 	if (!qp) {
2729 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2730 		return -EINVAL;
2731 	}
2732 	if (qp->rq.flushed) {
2733 		dev_dbg(&cq->hwq.pdev->dev,
2734 			"%s: QP in Flush QP = %p\n", __func__, qp);
2735 		return 0;
2736 	}
2737 	cqe = *pcqe;
2738 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2739 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2740 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2741 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2742 	cqe->flags = le16_to_cpu(hwcqe->flags);
2743 	cqe->status = hwcqe->status;
2744 	cqe->qp_handle = (u64)(unsigned long)qp;
2745 	/*FIXME: Endianness fix needed for smace */
2746 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2747 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2748 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2749 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2750 				  ((le32_to_cpu(
2751 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2752 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2753 
2754 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2755 		srq = qp->srq;
2756 		if (!srq)
2757 			return -EINVAL;
2758 
2759 		if (wr_id_idx >= srq->hwq.max_elements) {
2760 			dev_err(&cq->hwq.pdev->dev,
2761 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2762 				wr_id_idx, srq->hwq.max_elements);
2763 			return -EINVAL;
2764 		}
2765 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2766 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2767 		cqe++;
2768 		(*budget)--;
2769 		*pcqe = cqe;
2770 	} else {
2771 		struct bnxt_qplib_swq *swq;
2772 
2773 		rq = &qp->rq;
2774 		if (wr_id_idx > (rq->max_wqe - 1)) {
2775 			dev_err(&cq->hwq.pdev->dev,
2776 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2777 				wr_id_idx, rq->max_wqe);
2778 			return -EINVAL;
2779 		}
2780 
2781 		if (rq->swq_last != wr_id_idx)
2782 			return -EINVAL;
2783 		swq = &rq->swq[rq->swq_last];
2784 		cqe->wr_id = swq->wr_id;
2785 		cqe++;
2786 		(*budget)--;
2787 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2788 					 swq->slots, &rq->dbinfo.flags);
2789 		rq->swq_last = swq->next_idx;
2790 		*pcqe = cqe;
2791 
2792 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2793 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2794 			/* Add qp to flush list of the CQ */
2795 			bnxt_qplib_add_flush_qp(qp);
2796 		}
2797 	}
2798 
2799 	return 0;
2800 }
2801 
2802 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2803 {
2804 	struct cq_base *hw_cqe;
2805 	bool rc = true;
2806 
2807 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2808 	 /* Check for Valid bit. If the CQE is valid, return false */
2809 	rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2810 	return rc;
2811 }
2812 
2813 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2814 						struct cq_res_raweth_qp1 *hwcqe,
2815 						struct bnxt_qplib_cqe **pcqe,
2816 						int *budget)
2817 {
2818 	struct bnxt_qplib_qp *qp;
2819 	struct bnxt_qplib_q *rq;
2820 	struct bnxt_qplib_srq *srq;
2821 	struct bnxt_qplib_cqe *cqe;
2822 	u32 wr_id_idx;
2823 
2824 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2825 				      le64_to_cpu(hwcqe->qp_handle));
2826 	if (!qp) {
2827 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2828 		return -EINVAL;
2829 	}
2830 	if (qp->rq.flushed) {
2831 		dev_dbg(&cq->hwq.pdev->dev,
2832 			"%s: QP in Flush QP = %p\n", __func__, qp);
2833 		return 0;
2834 	}
2835 	cqe = *pcqe;
2836 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2837 	cqe->flags = le16_to_cpu(hwcqe->flags);
2838 	cqe->qp_handle = (u64)(unsigned long)qp;
2839 
2840 	wr_id_idx =
2841 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2842 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2843 	cqe->src_qp = qp->id;
2844 	if (qp->id == 1 && !cqe->length) {
2845 		/* Add workaround for the length misdetection */
2846 		cqe->length = 296;
2847 	} else {
2848 		cqe->length = le16_to_cpu(hwcqe->length);
2849 	}
2850 	cqe->pkey_index = qp->pkey_index;
2851 	memcpy(cqe->smac, qp->smac, 6);
2852 
2853 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2854 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2855 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2856 
2857 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2858 		srq = qp->srq;
2859 		if (!srq) {
2860 			dev_err(&cq->hwq.pdev->dev,
2861 				"FP: SRQ used but not defined??\n");
2862 			return -EINVAL;
2863 		}
2864 		if (wr_id_idx >= srq->hwq.max_elements) {
2865 			dev_err(&cq->hwq.pdev->dev,
2866 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2867 				wr_id_idx, srq->hwq.max_elements);
2868 			return -EINVAL;
2869 		}
2870 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2871 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2872 		cqe++;
2873 		(*budget)--;
2874 		*pcqe = cqe;
2875 	} else {
2876 		struct bnxt_qplib_swq *swq;
2877 
2878 		rq = &qp->rq;
2879 		if (wr_id_idx > (rq->max_wqe - 1)) {
2880 			dev_err(&cq->hwq.pdev->dev,
2881 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2882 				wr_id_idx, rq->max_wqe);
2883 			return -EINVAL;
2884 		}
2885 		if (rq->swq_last != wr_id_idx)
2886 			return -EINVAL;
2887 		swq = &rq->swq[rq->swq_last];
2888 		cqe->wr_id = swq->wr_id;
2889 		cqe++;
2890 		(*budget)--;
2891 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2892 					 swq->slots, &rq->dbinfo.flags);
2893 		rq->swq_last = swq->next_idx;
2894 		*pcqe = cqe;
2895 
2896 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2897 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2898 			/* Add qp to flush list of the CQ */
2899 			bnxt_qplib_add_flush_qp(qp);
2900 		}
2901 	}
2902 
2903 	return 0;
2904 }
2905 
2906 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2907 					  struct cq_terminal *hwcqe,
2908 					  struct bnxt_qplib_cqe **pcqe,
2909 					  int *budget)
2910 {
2911 	struct bnxt_qplib_qp *qp;
2912 	struct bnxt_qplib_q *sq, *rq;
2913 	struct bnxt_qplib_cqe *cqe;
2914 	u32 swq_last = 0, cqe_cons;
2915 	int rc = 0;
2916 
2917 	/* Check the Status */
2918 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2919 		dev_warn(&cq->hwq.pdev->dev,
2920 			 "FP: CQ Process Terminal Error status = 0x%x\n",
2921 			 hwcqe->status);
2922 
2923 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2924 				      le64_to_cpu(hwcqe->qp_handle));
2925 	if (!qp)
2926 		return -EINVAL;
2927 
2928 	/* Must block new posting of SQ and RQ */
2929 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2930 
2931 	sq = &qp->sq;
2932 	rq = &qp->rq;
2933 
2934 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2935 	if (cqe_cons == 0xFFFF)
2936 		goto do_rq;
2937 	cqe_cons %= sq->max_sw_wqe;
2938 
2939 	if (qp->sq.flushed) {
2940 		dev_dbg(&cq->hwq.pdev->dev,
2941 			"%s: QP in Flush QP = %p\n", __func__, qp);
2942 		goto sq_done;
2943 	}
2944 
2945 	/* Terminal CQE can also include aggregated successful CQEs prior.
2946 	 * So we must complete all CQEs from the current sq's cons to the
2947 	 * cq_cons with status OK
2948 	 */
2949 	cqe = *pcqe;
2950 	while (*budget) {
2951 		swq_last = sq->swq_last;
2952 		if (swq_last == cqe_cons)
2953 			break;
2954 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2955 			memset(cqe, 0, sizeof(*cqe));
2956 			cqe->status = CQ_REQ_STATUS_OK;
2957 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2958 			cqe->qp_handle = (u64)(unsigned long)qp;
2959 			cqe->src_qp = qp->id;
2960 			cqe->wr_id = sq->swq[swq_last].wr_id;
2961 			cqe->type = sq->swq[swq_last].type;
2962 			cqe++;
2963 			(*budget)--;
2964 		}
2965 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2966 					 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2967 		sq->swq_last = sq->swq[swq_last].next_idx;
2968 	}
2969 	*pcqe = cqe;
2970 	if (!(*budget) && swq_last != cqe_cons) {
2971 		/* Out of budget */
2972 		rc = -EAGAIN;
2973 		goto sq_done;
2974 	}
2975 sq_done:
2976 	if (rc)
2977 		return rc;
2978 do_rq:
2979 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2980 	if (cqe_cons == 0xFFFF) {
2981 		goto done;
2982 	} else if (cqe_cons > rq->max_wqe - 1) {
2983 		dev_err(&cq->hwq.pdev->dev,
2984 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2985 			cqe_cons, rq->max_wqe);
2986 		rc = -EINVAL;
2987 		goto done;
2988 	}
2989 
2990 	if (qp->rq.flushed) {
2991 		dev_dbg(&cq->hwq.pdev->dev,
2992 			"%s: QP in Flush QP = %p\n", __func__, qp);
2993 		rc = 0;
2994 		goto done;
2995 	}
2996 
2997 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2998 	 * from the current rq->cons to the rq->prod regardless what the
2999 	 * rq->cons the terminal CQE indicates
3000 	 */
3001 
3002 	/* Add qp to flush list of the CQ */
3003 	bnxt_qplib_add_flush_qp(qp);
3004 done:
3005 	return rc;
3006 }
3007 
3008 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3009 					struct cq_cutoff *hwcqe)
3010 {
3011 	/* Check the Status */
3012 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3013 		dev_err(&cq->hwq.pdev->dev,
3014 			"FP: CQ Process Cutoff Error status = 0x%x\n",
3015 			hwcqe->status);
3016 		return -EINVAL;
3017 	}
3018 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3019 	wake_up_interruptible(&cq->waitq);
3020 
3021 	return 0;
3022 }
3023 
3024 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3025 				  struct bnxt_qplib_cqe *cqe,
3026 				  int num_cqes)
3027 {
3028 	struct bnxt_qplib_qp *qp = NULL;
3029 	u32 budget = num_cqes;
3030 	unsigned long flags;
3031 
3032 	spin_lock_irqsave(&cq->flush_lock, flags);
3033 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3034 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3035 		__flush_sq(&qp->sq, qp, &cqe, &budget);
3036 	}
3037 
3038 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3039 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3040 		__flush_rq(&qp->rq, qp, &cqe, &budget);
3041 	}
3042 	spin_unlock_irqrestore(&cq->flush_lock, flags);
3043 
3044 	return num_cqes - budget;
3045 }
3046 
3047 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3048 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
3049 {
3050 	struct cq_base *hw_cqe;
3051 	int budget, rc = 0;
3052 	u32 hw_polled = 0;
3053 	u8 type;
3054 
3055 	budget = num_cqes;
3056 
3057 	while (budget) {
3058 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3059 
3060 		/* Check for Valid bit */
3061 		if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3062 			break;
3063 
3064 		/*
3065 		 * The valid test of the entry must be done first before
3066 		 * reading any further.
3067 		 */
3068 		dma_rmb();
3069 		/* From the device's respective CQE format to qplib_wc*/
3070 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3071 		switch (type) {
3072 		case CQ_BASE_CQE_TYPE_REQ:
3073 			rc = bnxt_qplib_cq_process_req(cq,
3074 						       (struct cq_req *)hw_cqe,
3075 						       &cqe, &budget,
3076 						       cq->hwq.cons, lib_qp);
3077 			break;
3078 		case CQ_BASE_CQE_TYPE_RES_RC:
3079 			rc = bnxt_qplib_cq_process_res_rc(cq,
3080 							  (struct cq_res_rc *)
3081 							  hw_cqe, &cqe,
3082 							  &budget);
3083 			break;
3084 		case CQ_BASE_CQE_TYPE_RES_UD:
3085 			rc = bnxt_qplib_cq_process_res_ud
3086 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
3087 					 &budget);
3088 			break;
3089 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3090 			rc = bnxt_qplib_cq_process_res_raweth_qp1
3091 					(cq, (struct cq_res_raweth_qp1 *)
3092 					 hw_cqe, &cqe, &budget);
3093 			break;
3094 		case CQ_BASE_CQE_TYPE_TERMINAL:
3095 			rc = bnxt_qplib_cq_process_terminal
3096 					(cq, (struct cq_terminal *)hw_cqe,
3097 					 &cqe, &budget);
3098 			break;
3099 		case CQ_BASE_CQE_TYPE_CUT_OFF:
3100 			bnxt_qplib_cq_process_cutoff
3101 					(cq, (struct cq_cutoff *)hw_cqe);
3102 			/* Done processing this CQ */
3103 			goto exit;
3104 		default:
3105 			dev_err(&cq->hwq.pdev->dev,
3106 				"process_cq unknown type 0x%lx\n",
3107 				hw_cqe->cqe_type_toggle &
3108 				CQ_BASE_CQE_TYPE_MASK);
3109 			rc = -EINVAL;
3110 			break;
3111 		}
3112 		if (rc < 0) {
3113 			if (rc == -EAGAIN)
3114 				break;
3115 			/* Error while processing the CQE, just skip to the
3116 			 * next one
3117 			 */
3118 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3119 				dev_err(&cq->hwq.pdev->dev,
3120 					"process_cqe error rc = 0x%x\n", rc);
3121 		}
3122 		hw_polled++;
3123 		bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3124 					 1, &cq->dbinfo.flags);
3125 
3126 	}
3127 	if (hw_polled)
3128 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3129 exit:
3130 	return num_cqes - budget;
3131 }
3132 
3133 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3134 {
3135 	cq->dbinfo.toggle = cq->toggle;
3136 	if (arm_type)
3137 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3138 	/* Using cq->arm_state variable to track whether to issue cq handler */
3139 	atomic_set(&cq->arm_state, 1);
3140 }
3141 
3142 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3143 {
3144 	flush_workqueue(qp->scq->nq->cqn_wq);
3145 	if (qp->scq != qp->rcq)
3146 		flush_workqueue(qp->rcq->nq->cqn_wq);
3147 }
3148