xref: /linux/drivers/infiniband/hw/bnxt_re/qplib_fp.c (revision 8d765af51a099884bab37a51e211c7047f67f1f3)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Fast Path Operators
37  */
38 
39 #define dev_fmt(fmt) "QPLIB: " fmt
40 
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50 
51 #include "roce_hsi.h"
52 
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 #include <rdma/ib_addr.h>
58 #include "bnxt_ulp.h"
59 #include "bnxt_re.h"
60 #include "ib_verbs.h"
61 
62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
63 
64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
65 {
66 	qp->sq.condition = false;
67 	qp->sq.send_phantom = false;
68 	qp->sq.single = false;
69 }
70 
71 /* Flush list */
72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
73 {
74 	struct bnxt_qplib_cq *scq, *rcq;
75 
76 	scq = qp->scq;
77 	rcq = qp->rcq;
78 
79 	if (!qp->sq.flushed) {
80 		dev_dbg(&scq->hwq.pdev->dev,
81 			"FP: Adding to SQ Flush list = %p\n", qp);
82 		bnxt_qplib_cancel_phantom_processing(qp);
83 		list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 		qp->sq.flushed = true;
85 	}
86 	if (!qp->srq) {
87 		if (!qp->rq.flushed) {
88 			dev_dbg(&rcq->hwq.pdev->dev,
89 				"FP: Adding to RQ Flush list = %p\n", qp);
90 			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 			qp->rq.flushed = true;
92 		}
93 	}
94 }
95 
96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
97 				       unsigned long *flags)
98 	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
99 {
100 	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 	if (qp->scq == qp->rcq)
102 		__acquire(&qp->rcq->flush_lock);
103 	else
104 		spin_lock(&qp->rcq->flush_lock);
105 }
106 
107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 				       unsigned long *flags)
109 	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
110 {
111 	if (qp->scq == qp->rcq)
112 		__release(&qp->rcq->flush_lock);
113 	else
114 		spin_unlock(&qp->rcq->flush_lock);
115 	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
116 }
117 
118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
119 {
120 	unsigned long flags;
121 
122 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 	__bnxt_qplib_add_flush_qp(qp);
124 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
125 }
126 
127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
128 {
129 	if (qp->sq.flushed) {
130 		qp->sq.flushed = false;
131 		list_del(&qp->sq_flush);
132 	}
133 	if (!qp->srq) {
134 		if (qp->rq.flushed) {
135 			qp->rq.flushed = false;
136 			list_del(&qp->rq_flush);
137 		}
138 	}
139 }
140 
141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
142 {
143 	unsigned long flags;
144 
145 	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 	__clean_cq(qp->scq, (u64)(unsigned long)qp);
147 	qp->sq.hwq.prod = 0;
148 	qp->sq.hwq.cons = 0;
149 	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
150 	qp->rq.hwq.prod = 0;
151 	qp->rq.hwq.cons = 0;
152 
153 	__bnxt_qplib_del_flush_qp(qp);
154 	bnxt_qplib_release_cq_flush_locks(qp, &flags);
155 }
156 
157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
158 {
159 	struct bnxt_qplib_nq_work *nq_work =
160 			container_of(work, struct bnxt_qplib_nq_work, work);
161 
162 	struct bnxt_qplib_cq *cq = nq_work->cq;
163 	struct bnxt_qplib_nq *nq = nq_work->nq;
164 
165 	if (cq && nq) {
166 		spin_lock_bh(&cq->compl_lock);
167 		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 			dev_dbg(&nq->pdev->dev,
169 				"%s:Trigger cq  = %p event nq = %p\n",
170 				__func__, cq, nq);
171 			nq->cqn_handler(nq, cq);
172 		}
173 		spin_unlock_bh(&cq->compl_lock);
174 	}
175 	kfree(nq_work);
176 }
177 
178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 				       struct bnxt_qplib_qp *qp)
180 {
181 	struct bnxt_qplib_q *rq = &qp->rq;
182 	struct bnxt_qplib_q *sq = &qp->sq;
183 
184 	if (qp->rq_hdr_buf)
185 		dma_free_coherent(&res->pdev->dev,
186 				  rq->max_wqe * qp->rq_hdr_buf_size,
187 				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
188 	if (qp->sq_hdr_buf)
189 		dma_free_coherent(&res->pdev->dev,
190 				  sq->max_wqe * qp->sq_hdr_buf_size,
191 				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 	qp->rq_hdr_buf = NULL;
193 	qp->sq_hdr_buf = NULL;
194 	qp->rq_hdr_buf_map = 0;
195 	qp->sq_hdr_buf_map = 0;
196 	qp->sq_hdr_buf_size = 0;
197 	qp->rq_hdr_buf_size = 0;
198 }
199 
200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 				       struct bnxt_qplib_qp *qp)
202 {
203 	struct bnxt_qplib_q *rq = &qp->rq;
204 	struct bnxt_qplib_q *sq = &qp->sq;
205 	int rc = 0;
206 
207 	if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 					sq->max_wqe * qp->sq_hdr_buf_size,
210 					&qp->sq_hdr_buf_map, GFP_KERNEL);
211 		if (!qp->sq_hdr_buf) {
212 			rc = -ENOMEM;
213 			dev_err(&res->pdev->dev,
214 				"Failed to create sq_hdr_buf\n");
215 			goto fail;
216 		}
217 	}
218 
219 	if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 						    rq->max_wqe *
222 						    qp->rq_hdr_buf_size,
223 						    &qp->rq_hdr_buf_map,
224 						    GFP_KERNEL);
225 		if (!qp->rq_hdr_buf) {
226 			rc = -ENOMEM;
227 			dev_err(&res->pdev->dev,
228 				"Failed to create rq_hdr_buf\n");
229 			goto fail;
230 		}
231 	}
232 	return 0;
233 
234 fail:
235 	bnxt_qplib_free_qp_hdr_buf(res, qp);
236 	return rc;
237 }
238 
239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
240 {
241 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 	struct nq_base *nqe, **nq_ptr;
243 	int budget = nq->budget;
244 	uintptr_t q_handle;
245 	u16 type;
246 
247 	spin_lock_bh(&hwq->lock);
248 	/* Service the NQ until empty */
249 	while (budget--) {
250 		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 		nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
253 			break;
254 
255 		/*
256 		 * The valid test of the entry must be done first before
257 		 * reading any further.
258 		 */
259 		dma_rmb();
260 
261 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 		switch (type) {
263 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 		{
265 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
266 
267 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 						     << 32;
270 			if ((unsigned long)cq == q_handle) {
271 				nqcne->cq_handle_low = 0;
272 				nqcne->cq_handle_high = 0;
273 				cq->cnq_events++;
274 			}
275 			break;
276 		}
277 		default:
278 			break;
279 		}
280 		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 					 1, &nq->nq_db.dbinfo.flags);
282 	}
283 	spin_unlock_bh(&hwq->lock);
284 }
285 
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287  * this CQ.
288  */
289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290 {
291 	u32 retry_cnt = 100;
292 
293 	while (retry_cnt--) {
294 		if (cnq_events == cq->cnq_events)
295 			return;
296 		usleep_range(50, 100);
297 		clean_nq(cq->nq, cq);
298 	}
299 }
300 
301 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
302 {
303 	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 	struct bnxt_qplib_cq *cq;
306 	int budget = nq->budget;
307 	struct nq_base *nqe;
308 	uintptr_t q_handle;
309 	u32 hw_polled = 0;
310 	u16 type;
311 
312 	spin_lock_bh(&hwq->lock);
313 	/* Service the NQ until empty */
314 	while (budget--) {
315 		nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
317 			break;
318 
319 		/*
320 		 * The valid test of the entry must be done first before
321 		 * reading any further.
322 		 */
323 		dma_rmb();
324 
325 		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 		switch (type) {
327 		case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 		{
329 			struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 			struct bnxt_re_cq *cq_p;
331 
332 			q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 						     << 32;
335 			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336 			if (!cq)
337 				break;
338 			cq->toggle = (le16_to_cpu(nqe->info10_type) &
339 					NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
340 			cq->dbinfo.toggle = cq->toggle;
341 			cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
342 			if (cq_p->uctx_cq_page)
343 				*((u32 *)cq_p->uctx_cq_page) = cq->toggle;
344 
345 			bnxt_qplib_armen_db(&cq->dbinfo,
346 					    DBC_DBC_TYPE_CQ_ARMENA);
347 			spin_lock_bh(&cq->compl_lock);
348 			atomic_set(&cq->arm_state, 0);
349 			if (nq->cqn_handler(nq, (cq)))
350 				dev_warn(&nq->pdev->dev,
351 					 "cqn - type 0x%x not handled\n", type);
352 			cq->cnq_events++;
353 			spin_unlock_bh(&cq->compl_lock);
354 			break;
355 		}
356 		case NQ_BASE_TYPE_SRQ_EVENT:
357 		{
358 			struct bnxt_qplib_srq *srq;
359 			struct bnxt_re_srq *srq_p;
360 			struct nq_srq_event *nqsrqe =
361 						(struct nq_srq_event *)nqe;
362 
363 			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
364 			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
365 				     << 32;
366 			srq = (struct bnxt_qplib_srq *)q_handle;
367 			srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
368 				      >> NQ_CN_TOGGLE_SFT;
369 			srq->dbinfo.toggle = srq->toggle;
370 			srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
371 			if (srq_p->uctx_srq_page)
372 				*((u32 *)srq_p->uctx_srq_page) = srq->toggle;
373 			bnxt_qplib_armen_db(&srq->dbinfo,
374 					    DBC_DBC_TYPE_SRQ_ARMENA);
375 			if (nq->srqn_handler(nq,
376 					     (struct bnxt_qplib_srq *)q_handle,
377 					     nqsrqe->event))
378 				dev_warn(&nq->pdev->dev,
379 					 "SRQ event 0x%x not handled\n",
380 					 nqsrqe->event);
381 			break;
382 		}
383 		case NQ_BASE_TYPE_DBQ_EVENT:
384 			break;
385 		default:
386 			dev_warn(&nq->pdev->dev,
387 				 "nqe with type = 0x%x not handled\n", type);
388 			break;
389 		}
390 		hw_polled++;
391 		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
392 					 1, &nq->nq_db.dbinfo.flags);
393 	}
394 	if (hw_polled)
395 		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
396 	spin_unlock_bh(&hwq->lock);
397 }
398 
399 /* bnxt_re_synchronize_nq - self polling notification queue.
400  * @nq      -     notification queue pointer
401  *
402  * This function will start polling entries of a given notification queue
403  * for all pending  entries.
404  * This function is useful to synchronize notification entries while resources
405  * are going away.
406  */
407 
408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
409 {
410 	int budget = nq->budget;
411 
412 	nq->budget = nq->hwq.max_elements;
413 	bnxt_qplib_service_nq(&nq->nq_tasklet);
414 	nq->budget = budget;
415 }
416 
417 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
418 {
419 	struct bnxt_qplib_nq *nq = dev_instance;
420 	struct bnxt_qplib_hwq *hwq = &nq->hwq;
421 	u32 sw_cons;
422 
423 	/* Prefetch the NQ element */
424 	sw_cons = HWQ_CMP(hwq->cons, hwq);
425 	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
426 
427 	/* Fan out to CPU affinitized kthreads? */
428 	tasklet_schedule(&nq->nq_tasklet);
429 
430 	return IRQ_HANDLED;
431 }
432 
433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
434 {
435 	if (!nq->requested)
436 		return;
437 
438 	nq->requested = false;
439 	/* Mask h/w interrupt */
440 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
441 	/* Sync with last running IRQ handler */
442 	synchronize_irq(nq->msix_vec);
443 	irq_set_affinity_hint(nq->msix_vec, NULL);
444 	free_irq(nq->msix_vec, nq);
445 	kfree(nq->name);
446 	nq->name = NULL;
447 
448 	if (kill)
449 		tasklet_kill(&nq->nq_tasklet);
450 	tasklet_disable(&nq->nq_tasklet);
451 }
452 
453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
454 {
455 	if (nq->cqn_wq) {
456 		destroy_workqueue(nq->cqn_wq);
457 		nq->cqn_wq = NULL;
458 	}
459 
460 	/* Make sure the HW is stopped! */
461 	bnxt_qplib_nq_stop_irq(nq, true);
462 
463 	if (nq->nq_db.reg.bar_reg) {
464 		iounmap(nq->nq_db.reg.bar_reg);
465 		nq->nq_db.reg.bar_reg = NULL;
466 	}
467 
468 	nq->cqn_handler = NULL;
469 	nq->srqn_handler = NULL;
470 	nq->msix_vec = 0;
471 }
472 
473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
474 			    int msix_vector, bool need_init)
475 {
476 	struct bnxt_qplib_res *res = nq->res;
477 	int rc;
478 
479 	if (nq->requested)
480 		return -EFAULT;
481 
482 	nq->msix_vec = msix_vector;
483 	if (need_init)
484 		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
485 	else
486 		tasklet_enable(&nq->nq_tasklet);
487 
488 	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
489 			     nq_indx, pci_name(res->pdev));
490 	if (!nq->name)
491 		return -ENOMEM;
492 	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
493 	if (rc) {
494 		kfree(nq->name);
495 		nq->name = NULL;
496 		tasklet_disable(&nq->nq_tasklet);
497 		return rc;
498 	}
499 
500 	cpumask_clear(&nq->mask);
501 	cpumask_set_cpu(nq_indx, &nq->mask);
502 	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
503 	if (rc) {
504 		dev_warn(&nq->pdev->dev,
505 			 "set affinity failed; vector: %d nq_idx: %d\n",
506 			 nq->msix_vec, nq_indx);
507 	}
508 	nq->requested = true;
509 	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
510 
511 	return rc;
512 }
513 
514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
515 {
516 	resource_size_t reg_base;
517 	struct bnxt_qplib_nq_db *nq_db;
518 	struct pci_dev *pdev;
519 
520 	pdev = nq->pdev;
521 	nq_db = &nq->nq_db;
522 
523 	nq_db->dbinfo.flags = 0;
524 	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
525 	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
526 	if (!nq_db->reg.bar_base) {
527 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
528 			nq_db->reg.bar_id);
529 		return -ENOMEM;
530 	}
531 
532 	reg_base = nq_db->reg.bar_base + reg_offt;
533 	/* Unconditionally map 8 bytes to support 57500 series */
534 	nq_db->reg.len = 8;
535 	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
536 	if (!nq_db->reg.bar_reg) {
537 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
538 			nq_db->reg.bar_id);
539 		return -ENOMEM;
540 	}
541 
542 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
543 	nq_db->dbinfo.hwq = &nq->hwq;
544 	nq_db->dbinfo.xid = nq->ring_id;
545 
546 	return 0;
547 }
548 
549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
550 			 int nq_idx, int msix_vector, int bar_reg_offset,
551 			 cqn_handler_t cqn_handler,
552 			 srqn_handler_t srqn_handler)
553 {
554 	int rc;
555 
556 	nq->pdev = pdev;
557 	nq->cqn_handler = cqn_handler;
558 	nq->srqn_handler = srqn_handler;
559 	nq->load = 0;
560 
561 	/* Have a task to schedule CQ notifiers in post send case */
562 	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
563 	if (!nq->cqn_wq)
564 		return -ENOMEM;
565 
566 	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
567 	if (rc)
568 		goto fail;
569 
570 	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
571 	if (rc) {
572 		dev_err(&nq->pdev->dev,
573 			"Failed to request irq for nq-idx %d\n", nq_idx);
574 		goto fail;
575 	}
576 
577 	return 0;
578 fail:
579 	bnxt_qplib_disable_nq(nq);
580 	return rc;
581 }
582 
583 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
584 {
585 	if (nq->hwq.max_elements) {
586 		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
587 		nq->hwq.max_elements = 0;
588 	}
589 }
590 
591 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
592 {
593 	struct bnxt_qplib_hwq_attr hwq_attr = {};
594 	struct bnxt_qplib_sg_info sginfo = {};
595 
596 	nq->pdev = res->pdev;
597 	nq->res = res;
598 	if (!nq->hwq.max_elements ||
599 	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
600 		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
601 
602 	sginfo.pgsize = PAGE_SIZE;
603 	sginfo.pgshft = PAGE_SHIFT;
604 	hwq_attr.res = res;
605 	hwq_attr.sginfo = &sginfo;
606 	hwq_attr.depth = nq->hwq.max_elements;
607 	hwq_attr.stride = sizeof(struct nq_base);
608 	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
609 	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
610 		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
611 		return -ENOMEM;
612 	}
613 	nq->budget = 8;
614 	return 0;
615 }
616 
617 /* SRQ */
618 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
619 			   struct bnxt_qplib_srq *srq)
620 {
621 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
622 	struct creq_destroy_srq_resp resp = {};
623 	struct bnxt_qplib_cmdqmsg msg = {};
624 	struct cmdq_destroy_srq req = {};
625 	int rc;
626 
627 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
628 				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
629 				 sizeof(req));
630 
631 	/* Configure the request */
632 	req.srq_cid = cpu_to_le32(srq->id);
633 
634 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
635 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
636 	kfree(srq->swq);
637 	if (rc)
638 		return;
639 	bnxt_qplib_free_hwq(res, &srq->hwq);
640 }
641 
642 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
643 			  struct bnxt_qplib_srq *srq)
644 {
645 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
646 	struct bnxt_qplib_hwq_attr hwq_attr = {};
647 	struct creq_create_srq_resp resp = {};
648 	struct bnxt_qplib_cmdqmsg msg = {};
649 	struct cmdq_create_srq req = {};
650 	struct bnxt_qplib_pbl *pbl;
651 	u16 pg_sz_lvl;
652 	int rc, idx;
653 
654 	hwq_attr.res = res;
655 	hwq_attr.sginfo = &srq->sg_info;
656 	hwq_attr.depth = srq->max_wqe;
657 	hwq_attr.stride = srq->wqe_size;
658 	hwq_attr.type = HWQ_TYPE_QUEUE;
659 	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
660 	if (rc)
661 		return rc;
662 	srq->dbinfo.flags = 0;
663 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
664 				 CMDQ_BASE_OPCODE_CREATE_SRQ,
665 				 sizeof(req));
666 
667 	/* Configure the request */
668 	req.dpi = cpu_to_le32(srq->dpi->dpi);
669 	req.srq_handle = cpu_to_le64((uintptr_t)srq);
670 
671 	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
672 	pbl = &srq->hwq.pbl[PBL_LVL_0];
673 	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
674 		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
675 	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
676 		      CMDQ_CREATE_SRQ_LVL_SFT;
677 	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
678 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
679 	req.pd_id = cpu_to_le32(srq->pd->id);
680 	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
681 
682 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
683 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
684 	if (rc)
685 		goto fail;
686 
687 	spin_lock_init(&srq->lock);
688 	srq->start_idx = 0;
689 	srq->last_idx = srq->hwq.max_elements - 1;
690 	if (!srq->hwq.is_user) {
691 		srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
692 				   GFP_KERNEL);
693 		if (!srq->swq) {
694 			rc = -ENOMEM;
695 			goto fail;
696 		}
697 		for (idx = 0; idx < srq->hwq.max_elements; idx++)
698 			srq->swq[idx].next_idx = idx + 1;
699 		srq->swq[srq->last_idx].next_idx = -1;
700 	}
701 
702 	srq->id = le32_to_cpu(resp.xid);
703 	srq->dbinfo.hwq = &srq->hwq;
704 	srq->dbinfo.xid = srq->id;
705 	srq->dbinfo.db = srq->dpi->dbr;
706 	srq->dbinfo.max_slot = 1;
707 	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
708 	if (srq->threshold)
709 		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
710 	srq->arm_req = false;
711 
712 	return 0;
713 fail:
714 	bnxt_qplib_free_hwq(res, &srq->hwq);
715 	kfree(srq->swq);
716 
717 	return rc;
718 }
719 
720 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
721 			  struct bnxt_qplib_srq *srq)
722 {
723 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
724 	u32 count;
725 
726 	count = __bnxt_qplib_get_avail(srq_hwq);
727 	if (count > srq->threshold) {
728 		srq->arm_req = false;
729 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
730 	} else {
731 		/* Deferred arming */
732 		srq->arm_req = true;
733 	}
734 
735 	return 0;
736 }
737 
738 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
739 			 struct bnxt_qplib_srq *srq)
740 {
741 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
742 	struct creq_query_srq_resp resp = {};
743 	struct bnxt_qplib_cmdqmsg msg = {};
744 	struct bnxt_qplib_rcfw_sbuf sbuf;
745 	struct creq_query_srq_resp_sb *sb;
746 	struct cmdq_query_srq req = {};
747 	int rc;
748 
749 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
750 				 CMDQ_BASE_OPCODE_QUERY_SRQ,
751 				 sizeof(req));
752 
753 	/* Configure the request */
754 	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
755 	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
756 				     &sbuf.dma_addr, GFP_KERNEL);
757 	if (!sbuf.sb)
758 		return -ENOMEM;
759 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
760 	req.srq_cid = cpu_to_le32(srq->id);
761 	sb = sbuf.sb;
762 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
763 				sizeof(resp), 0);
764 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
765 	if (!rc)
766 		srq->threshold = le16_to_cpu(sb->srq_limit);
767 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
768 			  sbuf.sb, sbuf.dma_addr);
769 
770 	return rc;
771 }
772 
773 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
774 			     struct bnxt_qplib_swqe *wqe)
775 {
776 	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
777 	struct rq_wqe *srqe;
778 	struct sq_sge *hw_sge;
779 	u32 count = 0;
780 	int i, next;
781 
782 	spin_lock(&srq_hwq->lock);
783 	if (srq->start_idx == srq->last_idx) {
784 		dev_err(&srq_hwq->pdev->dev,
785 			"FP: SRQ (0x%x) is full!\n", srq->id);
786 		spin_unlock(&srq_hwq->lock);
787 		return -EINVAL;
788 	}
789 	next = srq->start_idx;
790 	srq->start_idx = srq->swq[next].next_idx;
791 	spin_unlock(&srq_hwq->lock);
792 
793 	srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
794 	memset(srqe, 0, srq->wqe_size);
795 	/* Calculate wqe_size16 and data_len */
796 	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
797 	     i < wqe->num_sge; i++, hw_sge++) {
798 		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
799 		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
800 		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
801 	}
802 	srqe->wqe_type = wqe->type;
803 	srqe->flags = wqe->flags;
804 	srqe->wqe_size = wqe->num_sge +
805 			((offsetof(typeof(*srqe), data) + 15) >> 4);
806 	srqe->wr_id[0] = cpu_to_le32((u32)next);
807 	srq->swq[next].wr_id = wqe->wr_id;
808 
809 	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
810 
811 	spin_lock(&srq_hwq->lock);
812 	count = __bnxt_qplib_get_avail(srq_hwq);
813 	spin_unlock(&srq_hwq->lock);
814 	/* Ring DB */
815 	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
816 	if (srq->arm_req == true && count > srq->threshold) {
817 		srq->arm_req = false;
818 		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
819 	}
820 
821 	return 0;
822 }
823 
824 /* QP */
825 
826 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
827 {
828 	int indx;
829 
830 	que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
831 	if (!que->swq)
832 		return -ENOMEM;
833 
834 	que->swq_start = 0;
835 	que->swq_last = que->max_sw_wqe - 1;
836 	for (indx = 0; indx < que->max_sw_wqe; indx++)
837 		que->swq[indx].next_idx = indx + 1;
838 	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
839 	que->swq_last = 0;
840 
841 	return 0;
842 }
843 
844 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
845 {
846 	struct bnxt_qplib_hwq_attr hwq_attr = {};
847 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
848 	struct creq_create_qp1_resp resp = {};
849 	struct bnxt_qplib_cmdqmsg msg = {};
850 	struct bnxt_qplib_q *sq = &qp->sq;
851 	struct bnxt_qplib_q *rq = &qp->rq;
852 	struct cmdq_create_qp1 req = {};
853 	struct bnxt_qplib_pbl *pbl;
854 	u32 qp_flags = 0;
855 	u8 pg_sz_lvl;
856 	u32 tbl_indx;
857 	int rc;
858 
859 	sq->dbinfo.flags = 0;
860 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
861 				 CMDQ_BASE_OPCODE_CREATE_QP1,
862 				 sizeof(req));
863 	/* General */
864 	req.type = qp->type;
865 	req.dpi = cpu_to_le32(qp->dpi->dpi);
866 	req.qp_handle = cpu_to_le64(qp->qp_handle);
867 
868 	/* SQ */
869 	hwq_attr.res = res;
870 	hwq_attr.sginfo = &sq->sg_info;
871 	hwq_attr.stride = sizeof(struct sq_sge);
872 	hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
873 	hwq_attr.type = HWQ_TYPE_QUEUE;
874 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
875 	if (rc)
876 		return rc;
877 
878 	rc = bnxt_qplib_alloc_init_swq(sq);
879 	if (rc)
880 		goto fail_sq;
881 
882 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
883 	pbl = &sq->hwq.pbl[PBL_LVL_0];
884 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
885 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
886 		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
887 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
888 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
889 	req.sq_fwo_sq_sge =
890 		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
891 			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
892 	req.scq_cid = cpu_to_le32(qp->scq->id);
893 
894 	/* RQ */
895 	if (rq->max_wqe) {
896 		rq->dbinfo.flags = 0;
897 		hwq_attr.res = res;
898 		hwq_attr.sginfo = &rq->sg_info;
899 		hwq_attr.stride = sizeof(struct sq_sge);
900 		hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
901 		hwq_attr.type = HWQ_TYPE_QUEUE;
902 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
903 		if (rc)
904 			goto sq_swq;
905 		rc = bnxt_qplib_alloc_init_swq(rq);
906 		if (rc)
907 			goto fail_rq;
908 		req.rq_size = cpu_to_le32(rq->max_wqe);
909 		pbl = &rq->hwq.pbl[PBL_LVL_0];
910 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
911 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
912 			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
913 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
914 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
915 		req.rq_fwo_rq_sge =
916 			cpu_to_le16((rq->max_sge &
917 				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
918 				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
919 	}
920 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
921 	/* Header buffer - allow hdr_buf pass in */
922 	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
923 	if (rc) {
924 		rc = -ENOMEM;
925 		goto rq_rwq;
926 	}
927 	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
928 	req.qp_flags = cpu_to_le32(qp_flags);
929 	req.pd_id = cpu_to_le32(qp->pd->id);
930 
931 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
932 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
933 	if (rc)
934 		goto fail;
935 
936 	qp->id = le32_to_cpu(resp.xid);
937 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
938 	qp->cctx = res->cctx;
939 	sq->dbinfo.hwq = &sq->hwq;
940 	sq->dbinfo.xid = qp->id;
941 	sq->dbinfo.db = qp->dpi->dbr;
942 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
943 	if (rq->max_wqe) {
944 		rq->dbinfo.hwq = &rq->hwq;
945 		rq->dbinfo.xid = qp->id;
946 		rq->dbinfo.db = qp->dpi->dbr;
947 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
948 	}
949 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
950 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
951 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
952 
953 	return 0;
954 
955 fail:
956 	bnxt_qplib_free_qp_hdr_buf(res, qp);
957 rq_rwq:
958 	kfree(rq->swq);
959 fail_rq:
960 	bnxt_qplib_free_hwq(res, &rq->hwq);
961 sq_swq:
962 	kfree(sq->swq);
963 fail_sq:
964 	bnxt_qplib_free_hwq(res, &sq->hwq);
965 	return rc;
966 }
967 
968 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
969 {
970 	struct bnxt_qplib_hwq *hwq;
971 	struct bnxt_qplib_q *sq;
972 	u64 fpsne, psn_pg;
973 	u16 indx_pad = 0;
974 
975 	sq = &qp->sq;
976 	hwq = &sq->hwq;
977 	/* First psn entry */
978 	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
979 	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
980 		indx_pad = (fpsne & ~PAGE_MASK) / size;
981 	hwq->pad_pgofft = indx_pad;
982 	hwq->pad_pg = (u64 *)psn_pg;
983 	hwq->pad_stride = size;
984 }
985 
986 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
987 {
988 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
989 	struct bnxt_qplib_hwq_attr hwq_attr = {};
990 	struct bnxt_qplib_sg_info sginfo = {};
991 	struct creq_create_qp_resp resp = {};
992 	struct bnxt_qplib_cmdqmsg msg = {};
993 	struct bnxt_qplib_q *sq = &qp->sq;
994 	struct bnxt_qplib_q *rq = &qp->rq;
995 	struct cmdq_create_qp req = {};
996 	int rc, req_size, psn_sz = 0;
997 	struct bnxt_qplib_hwq *xrrq;
998 	struct bnxt_qplib_pbl *pbl;
999 	u32 qp_flags = 0;
1000 	u8 pg_sz_lvl;
1001 	u32 tbl_indx;
1002 	u16 nsge;
1003 
1004 	qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
1005 	sq->dbinfo.flags = 0;
1006 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1007 				 CMDQ_BASE_OPCODE_CREATE_QP,
1008 				 sizeof(req));
1009 
1010 	/* General */
1011 	req.type = qp->type;
1012 	req.dpi = cpu_to_le32(qp->dpi->dpi);
1013 	req.qp_handle = cpu_to_le64(qp->qp_handle);
1014 
1015 	/* SQ */
1016 	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1017 		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1018 			 sizeof(struct sq_psn_search_ext) :
1019 			 sizeof(struct sq_psn_search);
1020 
1021 		if (qp->is_host_msn_tbl) {
1022 			psn_sz = sizeof(struct sq_msn_search);
1023 			qp->msn = 0;
1024 		}
1025 	}
1026 
1027 	hwq_attr.res = res;
1028 	hwq_attr.sginfo = &sq->sg_info;
1029 	hwq_attr.stride = sizeof(struct sq_sge);
1030 	hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1031 	hwq_attr.aux_stride = psn_sz;
1032 	hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1033 				    : 0;
1034 	/* Update msn tbl size */
1035 	if (qp->is_host_msn_tbl && psn_sz) {
1036 		if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1037 			hwq_attr.aux_depth =
1038 				roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1039 		else
1040 			hwq_attr.aux_depth =
1041 				roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1042 		qp->msn_tbl_sz = hwq_attr.aux_depth;
1043 		qp->msn = 0;
1044 	}
1045 
1046 	hwq_attr.type = HWQ_TYPE_QUEUE;
1047 	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1048 	if (rc)
1049 		return rc;
1050 
1051 	if (!sq->hwq.is_user) {
1052 		rc = bnxt_qplib_alloc_init_swq(sq);
1053 		if (rc)
1054 			goto fail_sq;
1055 
1056 		if (psn_sz)
1057 			bnxt_qplib_init_psn_ptr(qp, psn_sz);
1058 	}
1059 	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1060 	pbl = &sq->hwq.pbl[PBL_LVL_0];
1061 	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1062 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1063 		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1064 	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1065 	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1066 	req.sq_fwo_sq_sge =
1067 		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1068 			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1069 	req.scq_cid = cpu_to_le32(qp->scq->id);
1070 
1071 	/* RQ */
1072 	if (!qp->srq) {
1073 		rq->dbinfo.flags = 0;
1074 		hwq_attr.res = res;
1075 		hwq_attr.sginfo = &rq->sg_info;
1076 		hwq_attr.stride = sizeof(struct sq_sge);
1077 		hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1078 		hwq_attr.aux_stride = 0;
1079 		hwq_attr.aux_depth = 0;
1080 		hwq_attr.type = HWQ_TYPE_QUEUE;
1081 		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1082 		if (rc)
1083 			goto sq_swq;
1084 		if (!rq->hwq.is_user) {
1085 			rc = bnxt_qplib_alloc_init_swq(rq);
1086 			if (rc)
1087 				goto fail_rq;
1088 		}
1089 
1090 		req.rq_size = cpu_to_le32(rq->max_wqe);
1091 		pbl = &rq->hwq.pbl[PBL_LVL_0];
1092 		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1093 		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1094 			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1095 		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1096 		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1097 		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1098 			6 : rq->max_sge;
1099 		req.rq_fwo_rq_sge =
1100 			cpu_to_le16(((nsge &
1101 				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1102 				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1103 	} else {
1104 		/* SRQ */
1105 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1106 		req.srq_cid = cpu_to_le32(qp->srq->id);
1107 	}
1108 	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1109 
1110 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1111 	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1112 	if (qp->sig_type)
1113 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1114 	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1115 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1116 	if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
1117 		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1118 
1119 	req.qp_flags = cpu_to_le32(qp_flags);
1120 
1121 	/* ORRQ and IRRQ */
1122 	if (psn_sz) {
1123 		xrrq = &qp->orrq;
1124 		xrrq->max_elements =
1125 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1126 		req_size = xrrq->max_elements *
1127 			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1128 		req_size &= ~(PAGE_SIZE - 1);
1129 		sginfo.pgsize = req_size;
1130 		sginfo.pgshft = PAGE_SHIFT;
1131 
1132 		hwq_attr.res = res;
1133 		hwq_attr.sginfo = &sginfo;
1134 		hwq_attr.depth = xrrq->max_elements;
1135 		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1136 		hwq_attr.aux_stride = 0;
1137 		hwq_attr.aux_depth = 0;
1138 		hwq_attr.type = HWQ_TYPE_CTX;
1139 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1140 		if (rc)
1141 			goto rq_swq;
1142 		pbl = &xrrq->pbl[PBL_LVL_0];
1143 		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1144 
1145 		xrrq = &qp->irrq;
1146 		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1147 						qp->max_dest_rd_atomic);
1148 		req_size = xrrq->max_elements *
1149 			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1150 		req_size &= ~(PAGE_SIZE - 1);
1151 		sginfo.pgsize = req_size;
1152 		hwq_attr.depth =  xrrq->max_elements;
1153 		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1154 		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1155 		if (rc)
1156 			goto fail_orrq;
1157 
1158 		pbl = &xrrq->pbl[PBL_LVL_0];
1159 		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1160 	}
1161 	req.pd_id = cpu_to_le32(qp->pd->id);
1162 
1163 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1164 				sizeof(resp), 0);
1165 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1166 	if (rc)
1167 		goto fail;
1168 
1169 	qp->id = le32_to_cpu(resp.xid);
1170 	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1171 	INIT_LIST_HEAD(&qp->sq_flush);
1172 	INIT_LIST_HEAD(&qp->rq_flush);
1173 	qp->cctx = res->cctx;
1174 	sq->dbinfo.hwq = &sq->hwq;
1175 	sq->dbinfo.xid = qp->id;
1176 	sq->dbinfo.db = qp->dpi->dbr;
1177 	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1178 	if (rq->max_wqe) {
1179 		rq->dbinfo.hwq = &rq->hwq;
1180 		rq->dbinfo.xid = qp->id;
1181 		rq->dbinfo.db = qp->dpi->dbr;
1182 		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1183 	}
1184 	spin_lock_bh(&rcfw->tbl_lock);
1185 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1186 	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1187 	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1188 	spin_unlock_bh(&rcfw->tbl_lock);
1189 
1190 	return 0;
1191 fail:
1192 	bnxt_qplib_free_hwq(res, &qp->irrq);
1193 fail_orrq:
1194 	bnxt_qplib_free_hwq(res, &qp->orrq);
1195 rq_swq:
1196 	kfree(rq->swq);
1197 fail_rq:
1198 	bnxt_qplib_free_hwq(res, &rq->hwq);
1199 sq_swq:
1200 	kfree(sq->swq);
1201 fail_sq:
1202 	bnxt_qplib_free_hwq(res, &sq->hwq);
1203 	return rc;
1204 }
1205 
1206 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1207 {
1208 	switch (qp->state) {
1209 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1210 		/* INIT->RTR, configure the path_mtu to the default
1211 		 * 2048 if not being requested
1212 		 */
1213 		if (!(qp->modify_flags &
1214 		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1215 			qp->modify_flags |=
1216 				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1217 			qp->path_mtu =
1218 				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1219 		}
1220 		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1221 		if (qp->max_dest_rd_atomic < 1)
1222 			qp->max_dest_rd_atomic = 1;
1223 		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1224 		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1225 		if (!(qp->modify_flags &
1226 		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1227 			qp->modify_flags |=
1228 				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1229 			qp->ah.sgid_index = 0;
1230 		}
1231 		break;
1232 	default:
1233 		break;
1234 	}
1235 }
1236 
1237 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1238 {
1239 	switch (qp->state) {
1240 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1241 		/* Bono FW requires the max_rd_atomic to be >= 1 */
1242 		if (qp->max_rd_atomic < 1)
1243 			qp->max_rd_atomic = 1;
1244 		/* Bono FW does not allow PKEY_INDEX,
1245 		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1246 		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1247 		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1248 		 * modification
1249 		 */
1250 		qp->modify_flags &=
1251 			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1252 			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1253 			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1254 			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1255 			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1256 			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1257 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1258 			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1259 			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1260 			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1261 			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1262 			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1263 		break;
1264 	default:
1265 		break;
1266 	}
1267 }
1268 
1269 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1270 {
1271 	switch (qp->cur_qp_state) {
1272 	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1273 		break;
1274 	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1275 		__modify_flags_from_init_state(qp);
1276 		break;
1277 	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1278 		__modify_flags_from_rtr_state(qp);
1279 		break;
1280 	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1281 		break;
1282 	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1283 		break;
1284 	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1285 		break;
1286 	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1287 		break;
1288 	default:
1289 		break;
1290 	}
1291 }
1292 
1293 static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
1294 					  struct bnxt_qplib_qp *qp,
1295 					  struct cmdq_modify_qp *req)
1296 {
1297 	u32 mandatory_flags = 0;
1298 
1299 	if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1300 		mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1301 
1302 	if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1303 	    qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
1304 		if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
1305 			req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
1306 		mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1307 	}
1308 
1309 	if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
1310 	    (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1311 	     qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
1312 		if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1313 			mandatory_flags |=
1314 				CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1315 	}
1316 
1317 	if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
1318 	    qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
1319 		mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1320 
1321 	qp->modify_flags |= mandatory_flags;
1322 	req->qp_type = qp->type;
1323 }
1324 
1325 static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
1326 {
1327 	if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1328 	     qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
1329 	    (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1330 	     qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
1331 		return true;
1332 
1333 	return false;
1334 }
1335 
1336 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1337 {
1338 	struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
1339 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1340 	struct creq_modify_qp_resp resp = {};
1341 	struct bnxt_qplib_cmdqmsg msg = {};
1342 	struct cmdq_modify_qp req = {};
1343 	u16 vlan_pcp_vlan_dei_vlan_id;
1344 	u32 temp32[4];
1345 	u32 bmask;
1346 	int rc;
1347 
1348 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1349 				 CMDQ_BASE_OPCODE_MODIFY_QP,
1350 				 sizeof(req));
1351 
1352 	/* Filter out the qp_attr_mask based on the state->new transition */
1353 	__filter_modify_flags(qp);
1354 	if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1355 		/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
1356 		if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
1357 		    is_optimized_state_transition(qp))
1358 			bnxt_set_mandatory_attributes(res, qp, &req);
1359 	}
1360 	bmask = qp->modify_flags;
1361 	req.modify_mask = cpu_to_le32(qp->modify_flags);
1362 	req.qp_cid = cpu_to_le32(qp->id);
1363 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1364 		req.network_type_en_sqd_async_notify_new_state =
1365 				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1366 				(qp->en_sqd_async_notify ?
1367 					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1368 	}
1369 	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1370 
1371 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1372 		req.access = qp->access;
1373 
1374 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1375 		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1376 
1377 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1378 		req.qkey = cpu_to_le32(qp->qkey);
1379 
1380 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1381 		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1382 		req.dgid[0] = cpu_to_le32(temp32[0]);
1383 		req.dgid[1] = cpu_to_le32(temp32[1]);
1384 		req.dgid[2] = cpu_to_le32(temp32[2]);
1385 		req.dgid[3] = cpu_to_le32(temp32[3]);
1386 	}
1387 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1388 		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1389 
1390 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
1391 		if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
1392 			req.sgid_index =
1393 				cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
1394 		else
1395 			req.sgid_index =
1396 				cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
1397 	}
1398 
1399 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1400 		req.hop_limit = qp->ah.hop_limit;
1401 
1402 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1403 		req.traffic_class = qp->ah.traffic_class;
1404 
1405 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1406 		memcpy(req.dest_mac, qp->ah.dmac, 6);
1407 
1408 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1409 		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1410 
1411 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1412 		req.timeout = qp->timeout;
1413 
1414 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1415 		req.retry_cnt = qp->retry_cnt;
1416 
1417 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1418 		req.rnr_retry = qp->rnr_retry;
1419 
1420 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1421 		req.min_rnr_timer = qp->min_rnr_timer;
1422 
1423 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1424 		req.rq_psn = cpu_to_le32(qp->rq.psn);
1425 
1426 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1427 		req.sq_psn = cpu_to_le32(qp->sq.psn);
1428 
1429 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1430 		req.max_rd_atomic =
1431 			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1432 
1433 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1434 		req.max_dest_rd_atomic =
1435 			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1436 
1437 	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1438 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1439 	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1440 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1441 	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1442 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1443 		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1444 
1445 	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
1446 		vlan_pcp_vlan_dei_vlan_id =
1447 			((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
1448 			  CMDQ_MODIFY_QP_VLAN_ID_SFT) &
1449 			 CMDQ_MODIFY_QP_VLAN_ID_MASK);
1450 		vlan_pcp_vlan_dei_vlan_id |=
1451 			((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
1452 			 CMDQ_MODIFY_QP_VLAN_PCP_MASK);
1453 		req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
1454 	}
1455 
1456 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1457 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1458 	if (rc)
1459 		return rc;
1460 	qp->cur_qp_state = qp->state;
1461 	return 0;
1462 }
1463 
1464 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1465 {
1466 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1467 	struct creq_query_qp_resp resp = {};
1468 	struct bnxt_qplib_cmdqmsg msg = {};
1469 	struct bnxt_qplib_rcfw_sbuf sbuf;
1470 	struct creq_query_qp_resp_sb *sb;
1471 	struct cmdq_query_qp req = {};
1472 	u32 temp32[4];
1473 	int i, rc;
1474 
1475 	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1476 	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1477 				     &sbuf.dma_addr, GFP_KERNEL);
1478 	if (!sbuf.sb)
1479 		return -ENOMEM;
1480 	sb = sbuf.sb;
1481 
1482 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1483 				 CMDQ_BASE_OPCODE_QUERY_QP,
1484 				 sizeof(req));
1485 
1486 	req.qp_cid = cpu_to_le32(qp->id);
1487 	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1488 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1489 				sizeof(resp), 0);
1490 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1491 	if (rc)
1492 		goto bail;
1493 	/* Extract the context from the side buffer */
1494 	qp->state = sb->en_sqd_async_notify_state &
1495 			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1496 	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1497 				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1498 	qp->access = sb->access;
1499 	qp->pkey_index = le16_to_cpu(sb->pkey);
1500 	qp->qkey = le32_to_cpu(sb->qkey);
1501 	qp->udp_sport = le16_to_cpu(sb->udp_src_port);
1502 
1503 	temp32[0] = le32_to_cpu(sb->dgid[0]);
1504 	temp32[1] = le32_to_cpu(sb->dgid[1]);
1505 	temp32[2] = le32_to_cpu(sb->dgid[2]);
1506 	temp32[3] = le32_to_cpu(sb->dgid[3]);
1507 	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1508 
1509 	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1510 
1511 	qp->ah.sgid_index = 0;
1512 	for (i = 0; i < res->sgid_tbl.max; i++) {
1513 		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1514 			qp->ah.sgid_index = i;
1515 			break;
1516 		}
1517 	}
1518 	if (i == res->sgid_tbl.max)
1519 		dev_warn(&res->pdev->dev, "SGID not found??\n");
1520 
1521 	qp->ah.hop_limit = sb->hop_limit;
1522 	qp->ah.traffic_class = sb->traffic_class;
1523 	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1524 	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1525 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1526 				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1527 	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1528 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1529 				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1530 	qp->timeout = sb->timeout;
1531 	qp->retry_cnt = sb->retry_cnt;
1532 	qp->rnr_retry = sb->rnr_retry;
1533 	qp->min_rnr_timer = sb->min_rnr_timer;
1534 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1535 	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1536 	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1537 	qp->max_dest_rd_atomic =
1538 			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1539 	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1540 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1541 	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1542 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1543 	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1544 	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1545 	memcpy(qp->smac, sb->src_mac, 6);
1546 	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1547 	qp->port_id = le16_to_cpu(sb->port_id);
1548 bail:
1549 	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1550 			  sbuf.sb, sbuf.dma_addr);
1551 	return rc;
1552 }
1553 
1554 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1555 {
1556 	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1557 	u32 peek_flags, peek_cons;
1558 	struct cq_base *hw_cqe;
1559 	int i;
1560 
1561 	peek_flags = cq->dbinfo.flags;
1562 	peek_cons = cq_hwq->cons;
1563 	for (i = 0; i < cq_hwq->max_elements; i++) {
1564 		hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1565 		if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1566 			continue;
1567 		/*
1568 		 * The valid test of the entry must be done first before
1569 		 * reading any further.
1570 		 */
1571 		dma_rmb();
1572 		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1573 		case CQ_BASE_CQE_TYPE_REQ:
1574 		case CQ_BASE_CQE_TYPE_TERMINAL:
1575 		{
1576 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1577 
1578 			if (qp == le64_to_cpu(cqe->qp_handle))
1579 				cqe->qp_handle = 0;
1580 			break;
1581 		}
1582 		case CQ_BASE_CQE_TYPE_RES_RC:
1583 		case CQ_BASE_CQE_TYPE_RES_UD:
1584 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1585 		{
1586 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1587 
1588 			if (qp == le64_to_cpu(cqe->qp_handle))
1589 				cqe->qp_handle = 0;
1590 			break;
1591 		}
1592 		default:
1593 			break;
1594 		}
1595 		bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1596 					 1, &peek_flags);
1597 	}
1598 }
1599 
1600 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1601 			  struct bnxt_qplib_qp *qp)
1602 {
1603 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1604 	struct creq_destroy_qp_resp resp = {};
1605 	struct bnxt_qplib_cmdqmsg msg = {};
1606 	struct cmdq_destroy_qp req = {};
1607 	u32 tbl_indx;
1608 	int rc;
1609 
1610 	spin_lock_bh(&rcfw->tbl_lock);
1611 	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1612 	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1613 	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1614 	spin_unlock_bh(&rcfw->tbl_lock);
1615 
1616 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1617 				 CMDQ_BASE_OPCODE_DESTROY_QP,
1618 				 sizeof(req));
1619 
1620 	req.qp_cid = cpu_to_le32(qp->id);
1621 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1622 				sizeof(resp), 0);
1623 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1624 	if (rc) {
1625 		spin_lock_bh(&rcfw->tbl_lock);
1626 		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1627 		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1628 		spin_unlock_bh(&rcfw->tbl_lock);
1629 		return rc;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1636 			    struct bnxt_qplib_qp *qp)
1637 {
1638 	bnxt_qplib_free_qp_hdr_buf(res, qp);
1639 	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1640 	kfree(qp->sq.swq);
1641 
1642 	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1643 	kfree(qp->rq.swq);
1644 
1645 	if (qp->irrq.max_elements)
1646 		bnxt_qplib_free_hwq(res, &qp->irrq);
1647 	if (qp->orrq.max_elements)
1648 		bnxt_qplib_free_hwq(res, &qp->orrq);
1649 
1650 }
1651 
1652 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1653 				struct bnxt_qplib_sge *sge)
1654 {
1655 	struct bnxt_qplib_q *sq = &qp->sq;
1656 	u32 sw_prod;
1657 
1658 	memset(sge, 0, sizeof(*sge));
1659 
1660 	if (qp->sq_hdr_buf) {
1661 		sw_prod = sq->swq_start;
1662 		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1663 					 sw_prod * qp->sq_hdr_buf_size);
1664 		sge->lkey = 0xFFFFFFFF;
1665 		sge->size = qp->sq_hdr_buf_size;
1666 		return qp->sq_hdr_buf + sw_prod * sge->size;
1667 	}
1668 	return NULL;
1669 }
1670 
1671 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1672 {
1673 	struct bnxt_qplib_q *rq = &qp->rq;
1674 
1675 	return rq->swq_start;
1676 }
1677 
1678 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1679 {
1680 	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1681 }
1682 
1683 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1684 				struct bnxt_qplib_sge *sge)
1685 {
1686 	struct bnxt_qplib_q *rq = &qp->rq;
1687 	u32 sw_prod;
1688 
1689 	memset(sge, 0, sizeof(*sge));
1690 
1691 	if (qp->rq_hdr_buf) {
1692 		sw_prod = rq->swq_start;
1693 		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1694 					 sw_prod * qp->rq_hdr_buf_size);
1695 		sge->lkey = 0xFFFFFFFF;
1696 		sge->size = qp->rq_hdr_buf_size;
1697 		return qp->rq_hdr_buf + sw_prod * sge->size;
1698 	}
1699 	return NULL;
1700 }
1701 
1702 /* Fil the MSN table into the next psn row */
1703 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1704 				       struct bnxt_qplib_swqe *wqe,
1705 				       struct bnxt_qplib_swq *swq)
1706 {
1707 	struct sq_msn_search *msns;
1708 	u32 start_psn, next_psn;
1709 	u16 start_idx;
1710 
1711 	msns = (struct sq_msn_search *)swq->psn_search;
1712 	msns->start_idx_next_psn_start_psn = 0;
1713 
1714 	start_psn = swq->start_psn;
1715 	next_psn = swq->next_psn;
1716 	start_idx = swq->slot_idx;
1717 	msns->start_idx_next_psn_start_psn |=
1718 		bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1719 	qp->msn++;
1720 	qp->msn %= qp->msn_tbl_sz;
1721 }
1722 
1723 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1724 				       struct bnxt_qplib_swqe *wqe,
1725 				       struct bnxt_qplib_swq *swq)
1726 {
1727 	struct sq_psn_search_ext *psns_ext;
1728 	struct sq_psn_search *psns;
1729 	u32 flg_npsn;
1730 	u32 op_spsn;
1731 
1732 	if (!swq->psn_search)
1733 		return;
1734 	/* Handle MSN differently on cap flags  */
1735 	if (qp->is_host_msn_tbl) {
1736 		bnxt_qplib_fill_msn_search(qp, wqe, swq);
1737 		return;
1738 	}
1739 	psns = (struct sq_psn_search *)swq->psn_search;
1740 	psns = swq->psn_search;
1741 	psns_ext = swq->psn_ext;
1742 
1743 	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1744 		    SQ_PSN_SEARCH_START_PSN_MASK);
1745 	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1746 		     SQ_PSN_SEARCH_OPCODE_MASK);
1747 	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1748 		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1749 
1750 	if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1751 		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1752 		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1753 		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1754 	} else {
1755 		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1756 		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1757 	}
1758 }
1759 
1760 static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1761 					  struct bnxt_qplib_swqe *wqe,
1762 					  u32 *idx)
1763 {
1764 	struct bnxt_qplib_hwq *hwq;
1765 	int len, t_len, offt;
1766 	bool pull_dst = true;
1767 	void *il_dst = NULL;
1768 	void *il_src = NULL;
1769 	int t_cplen, cplen;
1770 	int indx;
1771 
1772 	hwq = &qp->sq.hwq;
1773 	t_len = 0;
1774 	for (indx = 0; indx < wqe->num_sge; indx++) {
1775 		len = wqe->sg_list[indx].size;
1776 		il_src = (void *)wqe->sg_list[indx].addr;
1777 		t_len += len;
1778 		if (t_len > qp->max_inline_data)
1779 			return BNXT_RE_INVAL_MSG_SIZE;
1780 		while (len) {
1781 			if (pull_dst) {
1782 				pull_dst = false;
1783 				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1784 				(*idx)++;
1785 				t_cplen = 0;
1786 				offt = 0;
1787 			}
1788 			cplen = min_t(int, len, sizeof(struct sq_sge));
1789 			cplen = min_t(int, cplen,
1790 					(sizeof(struct sq_sge) - offt));
1791 			memcpy(il_dst, il_src, cplen);
1792 			t_cplen += cplen;
1793 			il_src += cplen;
1794 			il_dst += cplen;
1795 			offt += cplen;
1796 			len -= cplen;
1797 			if (t_cplen == sizeof(struct sq_sge))
1798 				pull_dst = true;
1799 		}
1800 	}
1801 
1802 	return t_len;
1803 }
1804 
1805 static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1806 					struct bnxt_qplib_sge *ssge,
1807 					u32 nsge, u32 *idx)
1808 {
1809 	struct sq_sge *dsge;
1810 	int indx, len = 0;
1811 
1812 	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1813 		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1814 		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1815 		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1816 		dsge->size = cpu_to_le32(ssge[indx].size);
1817 		len += ssge[indx].size;
1818 	}
1819 
1820 	return len;
1821 }
1822 
1823 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1824 				     struct bnxt_qplib_swqe *wqe,
1825 				     u16 *wqe_sz, u16 *qdf, u8 mode)
1826 {
1827 	u32 ilsize, bytes;
1828 	u16 nsge;
1829 	u16 slot;
1830 
1831 	nsge = wqe->num_sge;
1832 	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1833 	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1834 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1835 		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1836 		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1837 		bytes += sizeof(struct sq_send_hdr);
1838 	}
1839 
1840 	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1841 	slot = bytes >> 4;
1842 	*wqe_sz = slot;
1843 	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1844 		slot = 8;
1845 	return slot;
1846 }
1847 
1848 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1849 				     struct bnxt_qplib_swq *swq, bool hw_retx)
1850 {
1851 	struct bnxt_qplib_hwq *hwq;
1852 	u32 pg_num, pg_indx;
1853 	void *buff;
1854 	u32 tail;
1855 
1856 	hwq = &sq->hwq;
1857 	if (!hwq->pad_pg)
1858 		return;
1859 	tail = swq->slot_idx / sq->dbinfo.max_slot;
1860 	if (hw_retx) {
1861 		/* For HW retx use qp msn index */
1862 		tail = qp->msn;
1863 		tail %= qp->msn_tbl_sz;
1864 	}
1865 	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1866 	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1867 	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1868 	swq->psn_ext = buff;
1869 	swq->psn_search = buff;
1870 }
1871 
1872 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1873 {
1874 	struct bnxt_qplib_q *sq = &qp->sq;
1875 
1876 	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1877 }
1878 
1879 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1880 			 struct bnxt_qplib_swqe *wqe)
1881 {
1882 	struct bnxt_qplib_nq_work *nq_work = NULL;
1883 	int i, rc = 0, data_len = 0, pkt_num = 0;
1884 	struct bnxt_qplib_q *sq = &qp->sq;
1885 	struct bnxt_qplib_hwq *hwq;
1886 	struct bnxt_qplib_swq *swq;
1887 	bool sch_handler = false;
1888 	u32 wqe_idx, slots, idx;
1889 	u16 wqe_sz, qdf = 0;
1890 	bool msn_update;
1891 	void *base_hdr;
1892 	void *ext_hdr;
1893 	__le32 temp32;
1894 
1895 	hwq = &sq->hwq;
1896 	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1897 	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1898 		dev_err(&hwq->pdev->dev,
1899 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1900 			qp->id, qp->state);
1901 		rc = -EINVAL;
1902 		goto done;
1903 	}
1904 
1905 	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1906 	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1907 		dev_err(&hwq->pdev->dev,
1908 			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1909 			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1910 		rc = -ENOMEM;
1911 		goto done;
1912 	}
1913 
1914 	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1915 	bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1916 
1917 	idx = 0;
1918 	swq->slot_idx = hwq->prod;
1919 	swq->slots = slots;
1920 	swq->wr_id = wqe->wr_id;
1921 	swq->type = wqe->type;
1922 	swq->flags = wqe->flags;
1923 	swq->start_psn = sq->psn & BTH_PSN_MASK;
1924 	if (qp->sig_type)
1925 		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1926 
1927 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1928 		sch_handler = true;
1929 		dev_dbg(&hwq->pdev->dev,
1930 			"%s Error QP. Scheduling for poll_cq\n", __func__);
1931 		goto queue_err;
1932 	}
1933 
1934 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1935 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1936 	memset(base_hdr, 0, sizeof(struct sq_sge));
1937 	memset(ext_hdr, 0, sizeof(struct sq_sge));
1938 
1939 	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1940 		/* Copy the inline data */
1941 		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1942 	else
1943 		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1944 					       &idx);
1945 	if (data_len > BNXT_RE_MAX_MSG_SIZE) {
1946 		rc = -EINVAL;
1947 		goto done;
1948 	}
1949 	/* Make sure we update MSN table only for wired wqes */
1950 	msn_update = true;
1951 	/* Specifics */
1952 	switch (wqe->type) {
1953 	case BNXT_QPLIB_SWQE_TYPE_SEND:
1954 		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1955 			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1956 			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1957 			/* Assemble info for Raw Ethertype QPs */
1958 
1959 			sqe->wqe_type = wqe->type;
1960 			sqe->flags = wqe->flags;
1961 			sqe->wqe_size = wqe_sz;
1962 			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1963 			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1964 			sqe->length = cpu_to_le32(data_len);
1965 			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1966 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1967 				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1968 
1969 			break;
1970 		}
1971 		fallthrough;
1972 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1973 	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1974 	{
1975 		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1976 		struct sq_send_hdr *sqe = base_hdr;
1977 
1978 		sqe->wqe_type = wqe->type;
1979 		sqe->flags = wqe->flags;
1980 		sqe->wqe_size = wqe_sz;
1981 		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1982 		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1983 		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1984 			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1985 			sqe->length = cpu_to_le32(data_len);
1986 			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1987 			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1988 						      SQ_SEND_DST_QP_MASK);
1989 			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1990 						    SQ_SEND_AVID_MASK);
1991 			msn_update = false;
1992 		} else {
1993 			sqe->length = cpu_to_le32(data_len);
1994 			if (qp->mtu)
1995 				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1996 			if (!pkt_num)
1997 				pkt_num = 1;
1998 			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1999 		}
2000 		break;
2001 	}
2002 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2003 	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2004 	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2005 	{
2006 		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
2007 		struct sq_rdma_hdr *sqe = base_hdr;
2008 
2009 		sqe->wqe_type = wqe->type;
2010 		sqe->flags = wqe->flags;
2011 		sqe->wqe_size = wqe_sz;
2012 		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
2013 		sqe->length = cpu_to_le32((u32)data_len);
2014 		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
2015 		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
2016 		if (qp->mtu)
2017 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2018 		if (!pkt_num)
2019 			pkt_num = 1;
2020 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2021 		break;
2022 	}
2023 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2024 	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2025 	{
2026 		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
2027 		struct sq_atomic_hdr *sqe = base_hdr;
2028 
2029 		sqe->wqe_type = wqe->type;
2030 		sqe->flags = wqe->flags;
2031 		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
2032 		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
2033 		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
2034 		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
2035 		if (qp->mtu)
2036 			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2037 		if (!pkt_num)
2038 			pkt_num = 1;
2039 		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2040 		break;
2041 	}
2042 	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2043 	{
2044 		struct sq_localinvalidate *sqe = base_hdr;
2045 
2046 		sqe->wqe_type = wqe->type;
2047 		sqe->flags = wqe->flags;
2048 		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
2049 		msn_update = false;
2050 		break;
2051 	}
2052 	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
2053 	{
2054 		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
2055 		struct sq_fr_pmr_hdr *sqe = base_hdr;
2056 
2057 		sqe->wqe_type = wqe->type;
2058 		sqe->flags = wqe->flags;
2059 		sqe->access_cntl = wqe->frmr.access_cntl |
2060 				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2061 		sqe->zero_based_page_size_log =
2062 			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
2063 			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
2064 			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
2065 		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
2066 		temp32 = cpu_to_le32(wqe->frmr.length);
2067 		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
2068 		sqe->numlevels_pbl_page_size_log =
2069 			((wqe->frmr.pbl_pg_sz_log <<
2070 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
2071 					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
2072 			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
2073 					SQ_FR_PMR_NUMLEVELS_MASK);
2074 
2075 		for (i = 0; i < wqe->frmr.page_list_len; i++)
2076 			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
2077 						wqe->frmr.page_list[i] |
2078 						PTU_PTE_VALID);
2079 		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
2080 		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
2081 		msn_update = false;
2082 
2083 		break;
2084 	}
2085 	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2086 	{
2087 		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2088 		struct sq_bind_hdr *sqe = base_hdr;
2089 
2090 		sqe->wqe_type = wqe->type;
2091 		sqe->flags = wqe->flags;
2092 		sqe->access_cntl = wqe->bind.access_cntl;
2093 		sqe->mw_type_zero_based = wqe->bind.mw_type |
2094 			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2095 		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2096 		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2097 		ext_sqe->va = cpu_to_le64(wqe->bind.va);
2098 		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2099 		msn_update = false;
2100 		break;
2101 	}
2102 	default:
2103 		/* Bad wqe, return error */
2104 		rc = -EINVAL;
2105 		goto done;
2106 	}
2107 	if (!qp->is_host_msn_tbl || msn_update) {
2108 		swq->next_psn = sq->psn & BTH_PSN_MASK;
2109 		bnxt_qplib_fill_psn_search(qp, wqe, swq);
2110 	}
2111 queue_err:
2112 	bnxt_qplib_swq_mod_start(sq, wqe_idx);
2113 	bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2114 	qp->wqe_cnt++;
2115 done:
2116 	if (sch_handler) {
2117 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2118 		if (nq_work) {
2119 			nq_work->cq = qp->scq;
2120 			nq_work->nq = qp->scq->nq;
2121 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2122 			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2123 		} else {
2124 			dev_err(&hwq->pdev->dev,
2125 				"FP: Failed to allocate SQ nq_work!\n");
2126 			rc = -ENOMEM;
2127 		}
2128 	}
2129 	return rc;
2130 }
2131 
2132 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2133 {
2134 	struct bnxt_qplib_q *rq = &qp->rq;
2135 
2136 	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2137 }
2138 
2139 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2140 			 struct bnxt_qplib_swqe *wqe)
2141 {
2142 	struct bnxt_qplib_nq_work *nq_work = NULL;
2143 	struct bnxt_qplib_q *rq = &qp->rq;
2144 	struct rq_wqe_hdr *base_hdr;
2145 	struct rq_ext_hdr *ext_hdr;
2146 	struct bnxt_qplib_hwq *hwq;
2147 	struct bnxt_qplib_swq *swq;
2148 	bool sch_handler = false;
2149 	u32 wqe_idx, idx;
2150 	u16 wqe_sz;
2151 	int rc = 0;
2152 
2153 	hwq = &rq->hwq;
2154 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2155 		dev_err(&hwq->pdev->dev,
2156 			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2157 			qp->id, qp->state);
2158 		rc = -EINVAL;
2159 		goto done;
2160 	}
2161 
2162 	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2163 		dev_err(&hwq->pdev->dev,
2164 			"FP: QP (0x%x) RQ is full!\n", qp->id);
2165 		rc = -EINVAL;
2166 		goto done;
2167 	}
2168 
2169 	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2170 	swq->wr_id = wqe->wr_id;
2171 	swq->slots = rq->dbinfo.max_slot;
2172 
2173 	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2174 		sch_handler = true;
2175 		dev_dbg(&hwq->pdev->dev,
2176 			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2177 		goto queue_err;
2178 	}
2179 
2180 	idx = 0;
2181 	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2182 	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2183 	memset(base_hdr, 0, sizeof(struct sq_sge));
2184 	memset(ext_hdr, 0, sizeof(struct sq_sge));
2185 	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2186 	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2187 	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2188 	if (!wqe->num_sge) {
2189 		struct sq_sge *sge;
2190 
2191 		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2192 		sge->size = 0;
2193 		wqe_sz++;
2194 	}
2195 	base_hdr->wqe_type = wqe->type;
2196 	base_hdr->flags = wqe->flags;
2197 	base_hdr->wqe_size = wqe_sz;
2198 	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2199 queue_err:
2200 	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2201 	bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2202 done:
2203 	if (sch_handler) {
2204 		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2205 		if (nq_work) {
2206 			nq_work->cq = qp->rcq;
2207 			nq_work->nq = qp->rcq->nq;
2208 			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2209 			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2210 		} else {
2211 			dev_err(&hwq->pdev->dev,
2212 				"FP: Failed to allocate RQ nq_work!\n");
2213 			rc = -ENOMEM;
2214 		}
2215 	}
2216 
2217 	return rc;
2218 }
2219 
2220 /* CQ */
2221 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2222 {
2223 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2224 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2225 	struct creq_create_cq_resp resp = {};
2226 	struct bnxt_qplib_cmdqmsg msg = {};
2227 	struct cmdq_create_cq req = {};
2228 	struct bnxt_qplib_pbl *pbl;
2229 	u32 coalescing = 0;
2230 	u32 pg_sz_lvl;
2231 	int rc;
2232 
2233 	if (!cq->dpi) {
2234 		dev_err(&rcfw->pdev->dev,
2235 			"FP: CREATE_CQ failed due to NULL DPI\n");
2236 		return -EINVAL;
2237 	}
2238 
2239 	cq->dbinfo.flags = 0;
2240 	hwq_attr.res = res;
2241 	hwq_attr.depth = cq->max_wqe;
2242 	hwq_attr.stride = sizeof(struct cq_base);
2243 	hwq_attr.type = HWQ_TYPE_QUEUE;
2244 	hwq_attr.sginfo = &cq->sg_info;
2245 	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2246 	if (rc)
2247 		return rc;
2248 
2249 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2250 				 CMDQ_BASE_OPCODE_CREATE_CQ,
2251 				 sizeof(req));
2252 
2253 	req.dpi = cpu_to_le32(cq->dpi->dpi);
2254 	req.cq_handle = cpu_to_le64(cq->cq_handle);
2255 	req.cq_size = cpu_to_le32(cq->max_wqe);
2256 
2257 	if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) {
2258 		req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
2259 		coalescing |= ((cq->coalescing->buf_maxtime <<
2260 				CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
2261 			       CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
2262 		coalescing |= ((cq->coalescing->normal_maxbuf <<
2263 				CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
2264 			       CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
2265 		coalescing |= ((cq->coalescing->during_maxbuf <<
2266 				CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
2267 			       CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
2268 		if (cq->coalescing->en_ring_idle_mode)
2269 			coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2270 		else
2271 			coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2272 		req.coalescing = cpu_to_le32(coalescing);
2273 	}
2274 
2275 	pbl = &cq->hwq.pbl[PBL_LVL_0];
2276 	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2277 		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2278 	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2279 	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2280 	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2281 	req.cq_fco_cnq_id = cpu_to_le32(
2282 			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2283 			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2284 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2285 				sizeof(resp), 0);
2286 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2287 	if (rc)
2288 		goto fail;
2289 
2290 	cq->id = le32_to_cpu(resp.xid);
2291 	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2292 	init_waitqueue_head(&cq->waitq);
2293 	INIT_LIST_HEAD(&cq->sqf_head);
2294 	INIT_LIST_HEAD(&cq->rqf_head);
2295 	spin_lock_init(&cq->compl_lock);
2296 	spin_lock_init(&cq->flush_lock);
2297 
2298 	cq->dbinfo.hwq = &cq->hwq;
2299 	cq->dbinfo.xid = cq->id;
2300 	cq->dbinfo.db = cq->dpi->dbr;
2301 	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2302 	cq->dbinfo.flags = 0;
2303 	cq->dbinfo.toggle = 0;
2304 
2305 	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2306 
2307 	return 0;
2308 
2309 fail:
2310 	bnxt_qplib_free_hwq(res, &cq->hwq);
2311 	return rc;
2312 }
2313 
2314 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2315 				   struct bnxt_qplib_cq *cq)
2316 {
2317 	bnxt_qplib_free_hwq(res, &cq->hwq);
2318 	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2319        /* Reset only the cons bit in the flags */
2320 	cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2321 }
2322 
2323 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2324 			 int new_cqes)
2325 {
2326 	struct bnxt_qplib_hwq_attr hwq_attr = {};
2327 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2328 	struct creq_resize_cq_resp resp = {};
2329 	struct bnxt_qplib_cmdqmsg msg = {};
2330 	struct cmdq_resize_cq req = {};
2331 	struct bnxt_qplib_pbl *pbl;
2332 	u32 pg_sz, lvl, new_sz;
2333 	int rc;
2334 
2335 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2336 				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2337 				 sizeof(req));
2338 	hwq_attr.sginfo = &cq->sg_info;
2339 	hwq_attr.res = res;
2340 	hwq_attr.depth = new_cqes;
2341 	hwq_attr.stride = sizeof(struct cq_base);
2342 	hwq_attr.type = HWQ_TYPE_QUEUE;
2343 	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2344 	if (rc)
2345 		return rc;
2346 
2347 	req.cq_cid = cpu_to_le32(cq->id);
2348 	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2349 	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2350 	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2351 				       CMDQ_RESIZE_CQ_LVL_MASK;
2352 	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2353 		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2354 	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2355 	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2356 
2357 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2358 				sizeof(resp), 0);
2359 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2360 	return rc;
2361 }
2362 
2363 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2364 {
2365 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2366 	struct creq_destroy_cq_resp resp = {};
2367 	struct bnxt_qplib_cmdqmsg msg = {};
2368 	struct cmdq_destroy_cq req = {};
2369 	u16 total_cnq_events;
2370 	int rc;
2371 
2372 	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2373 				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2374 				 sizeof(req));
2375 
2376 	req.cq_cid = cpu_to_le32(cq->id);
2377 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2378 				sizeof(resp), 0);
2379 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2380 	if (rc)
2381 		return rc;
2382 	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2383 	__wait_for_all_nqes(cq, total_cnq_events);
2384 	bnxt_qplib_free_hwq(res, &cq->hwq);
2385 	return 0;
2386 }
2387 
2388 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2389 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2390 {
2391 	struct bnxt_qplib_cqe *cqe;
2392 	u32 start, last;
2393 	int rc = 0;
2394 
2395 	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2396 	start = sq->swq_start;
2397 	cqe = *pcqe;
2398 	while (*budget) {
2399 		last = sq->swq_last;
2400 		if (start == last)
2401 			break;
2402 		/* Skip the FENCE WQE completions */
2403 		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2404 			bnxt_qplib_cancel_phantom_processing(qp);
2405 			goto skip_compl;
2406 		}
2407 		memset(cqe, 0, sizeof(*cqe));
2408 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2409 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2410 		cqe->qp_handle = (u64)(unsigned long)qp;
2411 		cqe->wr_id = sq->swq[last].wr_id;
2412 		cqe->src_qp = qp->id;
2413 		cqe->type = sq->swq[last].type;
2414 		cqe++;
2415 		(*budget)--;
2416 skip_compl:
2417 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2418 					 sq->swq[last].slots, &sq->dbinfo.flags);
2419 		sq->swq_last = sq->swq[last].next_idx;
2420 	}
2421 	*pcqe = cqe;
2422 	if (!(*budget) && sq->swq_last != start)
2423 		/* Out of budget */
2424 		rc = -EAGAIN;
2425 
2426 	return rc;
2427 }
2428 
2429 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2430 		      struct bnxt_qplib_cqe **pcqe, int *budget)
2431 {
2432 	struct bnxt_qplib_cqe *cqe;
2433 	u32 start, last;
2434 	int opcode = 0;
2435 	int rc = 0;
2436 
2437 	switch (qp->type) {
2438 	case CMDQ_CREATE_QP1_TYPE_GSI:
2439 		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2440 		break;
2441 	case CMDQ_CREATE_QP_TYPE_RC:
2442 		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2443 		break;
2444 	case CMDQ_CREATE_QP_TYPE_UD:
2445 	case CMDQ_CREATE_QP_TYPE_GSI:
2446 		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2447 		break;
2448 	}
2449 
2450 	/* Flush the rest of the RQ */
2451 	start = rq->swq_start;
2452 	cqe = *pcqe;
2453 	while (*budget) {
2454 		last = rq->swq_last;
2455 		if (last == start)
2456 			break;
2457 		memset(cqe, 0, sizeof(*cqe));
2458 		cqe->status =
2459 		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2460 		cqe->opcode = opcode;
2461 		cqe->qp_handle = (unsigned long)qp;
2462 		cqe->wr_id = rq->swq[last].wr_id;
2463 		cqe++;
2464 		(*budget)--;
2465 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2466 					 rq->swq[last].slots, &rq->dbinfo.flags);
2467 		rq->swq_last = rq->swq[last].next_idx;
2468 	}
2469 	*pcqe = cqe;
2470 	if (!*budget && rq->swq_last != start)
2471 		/* Out of budget */
2472 		rc = -EAGAIN;
2473 
2474 	return rc;
2475 }
2476 
2477 void bnxt_qplib_mark_qp_error(void *qp_handle)
2478 {
2479 	struct bnxt_qplib_qp *qp = qp_handle;
2480 
2481 	if (!qp)
2482 		return;
2483 
2484 	/* Must block new posting of SQ and RQ */
2485 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2486 	bnxt_qplib_cancel_phantom_processing(qp);
2487 }
2488 
2489 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2490  *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2491  */
2492 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2493 		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2494 {
2495 	u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2496 	struct bnxt_qplib_q *sq = &qp->sq;
2497 	struct cq_req *peek_req_hwcqe;
2498 	struct bnxt_qplib_qp *peek_qp;
2499 	struct bnxt_qplib_q *peek_sq;
2500 	struct bnxt_qplib_swq *swq;
2501 	struct cq_base *peek_hwcqe;
2502 	int i, rc = 0;
2503 
2504 	/* Normal mode */
2505 	/* Check for the psn_search marking before completing */
2506 	swq = &sq->swq[swq_last];
2507 	if (swq->psn_search &&
2508 	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2509 		/* Unmark */
2510 		swq->psn_search->flags_next_psn = cpu_to_le32
2511 			(le32_to_cpu(swq->psn_search->flags_next_psn)
2512 				     & ~0x80000000);
2513 		dev_dbg(&cq->hwq.pdev->dev,
2514 			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2515 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2516 		sq->condition = true;
2517 		sq->send_phantom = true;
2518 
2519 		/* TODO: Only ARM if the previous SQE is ARMALL */
2520 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2521 		rc = -EAGAIN;
2522 		goto out;
2523 	}
2524 	if (sq->condition) {
2525 		/* Peek at the completions */
2526 		peek_flags = cq->dbinfo.flags;
2527 		peek_sw_cq_cons = cq_cons;
2528 		i = cq->hwq.max_elements;
2529 		while (i--) {
2530 			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2531 						       peek_sw_cq_cons, NULL);
2532 			/* If the next hwcqe is VALID */
2533 			if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2534 			/*
2535 			 * The valid test of the entry must be done first before
2536 			 * reading any further.
2537 			 */
2538 				dma_rmb();
2539 				/* If the next hwcqe is a REQ */
2540 				if ((peek_hwcqe->cqe_type_toggle &
2541 				    CQ_BASE_CQE_TYPE_MASK) ==
2542 				    CQ_BASE_CQE_TYPE_REQ) {
2543 					peek_req_hwcqe = (struct cq_req *)
2544 							 peek_hwcqe;
2545 					peek_qp = (struct bnxt_qplib_qp *)
2546 						((unsigned long)
2547 						 le64_to_cpu
2548 						 (peek_req_hwcqe->qp_handle));
2549 					peek_sq = &peek_qp->sq;
2550 					peek_sq_cons_idx =
2551 						((le16_to_cpu(
2552 						  peek_req_hwcqe->sq_cons_idx)
2553 						  - 1) % sq->max_wqe);
2554 					/* If the hwcqe's sq's wr_id matches */
2555 					if (peek_sq == sq &&
2556 					    sq->swq[peek_sq_cons_idx].wr_id ==
2557 					    BNXT_QPLIB_FENCE_WRID) {
2558 						/*
2559 						 *  Unbreak only if the phantom
2560 						 *  comes back
2561 						 */
2562 						dev_dbg(&cq->hwq.pdev->dev,
2563 							"FP: Got Phantom CQE\n");
2564 						sq->condition = false;
2565 						sq->single = true;
2566 						rc = 0;
2567 						goto out;
2568 					}
2569 				}
2570 				/* Valid but not the phantom, so keep looping */
2571 			} else {
2572 				/* Not valid yet, just exit and wait */
2573 				rc = -EINVAL;
2574 				goto out;
2575 			}
2576 			bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2577 						 &peek_sw_cq_cons,
2578 						 1, &peek_flags);
2579 		}
2580 		dev_err(&cq->hwq.pdev->dev,
2581 			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2582 			cq_cons, qp->id, swq_last, cqe_sq_cons);
2583 		rc = -EINVAL;
2584 	}
2585 out:
2586 	return rc;
2587 }
2588 
2589 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2590 {
2591 	struct bnxt_qplib_hwq *sq_hwq;
2592 	struct bnxt_qplib_swq *swq;
2593 	int cqe_sq_cons = -1;
2594 	u32 start, last;
2595 
2596 	sq_hwq = &sq->hwq;
2597 
2598 	start = sq->swq_start;
2599 	last = sq->swq_last;
2600 
2601 	while (last != start) {
2602 		swq = &sq->swq[last];
2603 		if (swq->slot_idx  == cqe_slot) {
2604 			cqe_sq_cons = swq->next_idx;
2605 			dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2606 				__func__, cqe_sq_cons, cqe_slot);
2607 			break;
2608 		}
2609 
2610 		last = swq->next_idx;
2611 	}
2612 	return cqe_sq_cons;
2613 }
2614 
2615 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2616 				     struct cq_req *hwcqe,
2617 				     struct bnxt_qplib_cqe **pcqe, int *budget,
2618 				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2619 {
2620 	struct bnxt_qplib_swq *swq;
2621 	struct bnxt_qplib_cqe *cqe;
2622 	u32 cqe_sq_cons, slot_num;
2623 	struct bnxt_qplib_qp *qp;
2624 	struct bnxt_qplib_q *sq;
2625 	int cqe_cons;
2626 	int rc = 0;
2627 
2628 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2629 				      le64_to_cpu(hwcqe->qp_handle));
2630 	if (!qp) {
2631 		dev_err(&cq->hwq.pdev->dev,
2632 			"FP: Process Req qp is NULL\n");
2633 		return -EINVAL;
2634 	}
2635 	sq = &qp->sq;
2636 
2637 	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2638 	if (qp->sq.flushed) {
2639 		dev_dbg(&cq->hwq.pdev->dev,
2640 			"%s: QP in Flush QP = %p\n", __func__, qp);
2641 		goto done;
2642 	}
2643 
2644 	if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2645 		slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2646 		cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2647 		if (cqe_cons < 0) {
2648 			dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2649 				__func__, slot_num);
2650 			goto done;
2651 		}
2652 		cqe_sq_cons = cqe_cons;
2653 		dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2654 			__func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2655 	}
2656 
2657 	/* Require to walk the sq's swq to fabricate CQEs for all previously
2658 	 * signaled SWQEs due to CQE aggregation from the current sq cons
2659 	 * to the cqe_sq_cons
2660 	 */
2661 	cqe = *pcqe;
2662 	while (*budget) {
2663 		if (sq->swq_last == cqe_sq_cons)
2664 			/* Done */
2665 			break;
2666 
2667 		swq = &sq->swq[sq->swq_last];
2668 		memset(cqe, 0, sizeof(*cqe));
2669 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2670 		cqe->qp_handle = (u64)(unsigned long)qp;
2671 		cqe->src_qp = qp->id;
2672 		cqe->wr_id = swq->wr_id;
2673 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2674 			goto skip;
2675 		cqe->type = swq->type;
2676 
2677 		/* For the last CQE, check for status.  For errors, regardless
2678 		 * of the request being signaled or not, it must complete with
2679 		 * the hwcqe error status
2680 		 */
2681 		if (swq->next_idx == cqe_sq_cons &&
2682 		    hwcqe->status != CQ_REQ_STATUS_OK) {
2683 			cqe->status = hwcqe->status;
2684 			dev_err(&cq->hwq.pdev->dev,
2685 				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2686 				sq->swq_last, cqe->wr_id, cqe->status);
2687 			cqe++;
2688 			(*budget)--;
2689 			bnxt_qplib_mark_qp_error(qp);
2690 			/* Add qp to flush list of the CQ */
2691 			bnxt_qplib_add_flush_qp(qp);
2692 		} else {
2693 			/* Before we complete, do WA 9060 */
2694 			if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2695 				if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2696 					      cqe_sq_cons)) {
2697 					*lib_qp = qp;
2698 					goto out;
2699 				}
2700 			}
2701 			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2702 				cqe->status = CQ_REQ_STATUS_OK;
2703 				cqe++;
2704 				(*budget)--;
2705 			}
2706 		}
2707 skip:
2708 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2709 					 swq->slots, &sq->dbinfo.flags);
2710 		sq->swq_last = swq->next_idx;
2711 		if (sq->single)
2712 			break;
2713 	}
2714 out:
2715 	*pcqe = cqe;
2716 	if (sq->swq_last != cqe_sq_cons) {
2717 		/* Out of budget */
2718 		rc = -EAGAIN;
2719 		goto done;
2720 	}
2721 	/*
2722 	 * Back to normal completion mode only after it has completed all of
2723 	 * the WC for this CQE
2724 	 */
2725 	sq->single = false;
2726 done:
2727 	return rc;
2728 }
2729 
2730 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2731 {
2732 	spin_lock(&srq->hwq.lock);
2733 	srq->swq[srq->last_idx].next_idx = (int)tag;
2734 	srq->last_idx = (int)tag;
2735 	srq->swq[srq->last_idx].next_idx = -1;
2736 	bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2737 				 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2738 	spin_unlock(&srq->hwq.lock);
2739 }
2740 
2741 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2742 					struct cq_res_rc *hwcqe,
2743 					struct bnxt_qplib_cqe **pcqe,
2744 					int *budget)
2745 {
2746 	struct bnxt_qplib_srq *srq;
2747 	struct bnxt_qplib_cqe *cqe;
2748 	struct bnxt_qplib_qp *qp;
2749 	struct bnxt_qplib_q *rq;
2750 	u32 wr_id_idx;
2751 
2752 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2753 				      le64_to_cpu(hwcqe->qp_handle));
2754 	if (!qp) {
2755 		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2756 		return -EINVAL;
2757 	}
2758 	if (qp->rq.flushed) {
2759 		dev_dbg(&cq->hwq.pdev->dev,
2760 			"%s: QP in Flush QP = %p\n", __func__, qp);
2761 		return 0;
2762 	}
2763 
2764 	cqe = *pcqe;
2765 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2766 	cqe->length = le32_to_cpu(hwcqe->length);
2767 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2768 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2769 	cqe->flags = le16_to_cpu(hwcqe->flags);
2770 	cqe->status = hwcqe->status;
2771 	cqe->qp_handle = (u64)(unsigned long)qp;
2772 
2773 	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2774 				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2775 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2776 		srq = qp->srq;
2777 		if (!srq)
2778 			return -EINVAL;
2779 		if (wr_id_idx >= srq->hwq.max_elements) {
2780 			dev_err(&cq->hwq.pdev->dev,
2781 				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2782 				wr_id_idx, srq->hwq.max_elements);
2783 			return -EINVAL;
2784 		}
2785 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2786 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2787 		cqe++;
2788 		(*budget)--;
2789 		*pcqe = cqe;
2790 	} else {
2791 		struct bnxt_qplib_swq *swq;
2792 
2793 		rq = &qp->rq;
2794 		if (wr_id_idx > (rq->max_wqe - 1)) {
2795 			dev_err(&cq->hwq.pdev->dev,
2796 				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2797 				wr_id_idx, rq->max_wqe);
2798 			return -EINVAL;
2799 		}
2800 		if (wr_id_idx != rq->swq_last)
2801 			return -EINVAL;
2802 		swq = &rq->swq[rq->swq_last];
2803 		cqe->wr_id = swq->wr_id;
2804 		cqe++;
2805 		(*budget)--;
2806 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2807 					 swq->slots, &rq->dbinfo.flags);
2808 		rq->swq_last = swq->next_idx;
2809 		*pcqe = cqe;
2810 
2811 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2812 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2813 			/* Add qp to flush list of the CQ */
2814 			bnxt_qplib_add_flush_qp(qp);
2815 		}
2816 	}
2817 
2818 	return 0;
2819 }
2820 
2821 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2822 					struct cq_res_ud *hwcqe,
2823 					struct bnxt_qplib_cqe **pcqe,
2824 					int *budget)
2825 {
2826 	struct bnxt_qplib_srq *srq;
2827 	struct bnxt_qplib_cqe *cqe;
2828 	struct bnxt_qplib_qp *qp;
2829 	struct bnxt_qplib_q *rq;
2830 	u32 wr_id_idx;
2831 
2832 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2833 				      le64_to_cpu(hwcqe->qp_handle));
2834 	if (!qp) {
2835 		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2836 		return -EINVAL;
2837 	}
2838 	if (qp->rq.flushed) {
2839 		dev_dbg(&cq->hwq.pdev->dev,
2840 			"%s: QP in Flush QP = %p\n", __func__, qp);
2841 		return 0;
2842 	}
2843 	cqe = *pcqe;
2844 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2845 	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2846 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2847 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2848 	cqe->flags = le16_to_cpu(hwcqe->flags);
2849 	cqe->status = hwcqe->status;
2850 	cqe->qp_handle = (u64)(unsigned long)qp;
2851 	/*FIXME: Endianness fix needed for smace */
2852 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2853 	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2854 				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2855 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2856 				  ((le32_to_cpu(
2857 				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2858 				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2859 
2860 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2861 		srq = qp->srq;
2862 		if (!srq)
2863 			return -EINVAL;
2864 
2865 		if (wr_id_idx >= srq->hwq.max_elements) {
2866 			dev_err(&cq->hwq.pdev->dev,
2867 				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2868 				wr_id_idx, srq->hwq.max_elements);
2869 			return -EINVAL;
2870 		}
2871 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2872 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2873 		cqe++;
2874 		(*budget)--;
2875 		*pcqe = cqe;
2876 	} else {
2877 		struct bnxt_qplib_swq *swq;
2878 
2879 		rq = &qp->rq;
2880 		if (wr_id_idx > (rq->max_wqe - 1)) {
2881 			dev_err(&cq->hwq.pdev->dev,
2882 				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2883 				wr_id_idx, rq->max_wqe);
2884 			return -EINVAL;
2885 		}
2886 
2887 		if (rq->swq_last != wr_id_idx)
2888 			return -EINVAL;
2889 		swq = &rq->swq[rq->swq_last];
2890 		cqe->wr_id = swq->wr_id;
2891 		cqe++;
2892 		(*budget)--;
2893 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2894 					 swq->slots, &rq->dbinfo.flags);
2895 		rq->swq_last = swq->next_idx;
2896 		*pcqe = cqe;
2897 
2898 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2899 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2900 			/* Add qp to flush list of the CQ */
2901 			bnxt_qplib_add_flush_qp(qp);
2902 		}
2903 	}
2904 
2905 	return 0;
2906 }
2907 
2908 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2909 {
2910 	struct cq_base *hw_cqe;
2911 	bool rc = true;
2912 
2913 	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2914 	 /* Check for Valid bit. If the CQE is valid, return false */
2915 	rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2916 	return rc;
2917 }
2918 
2919 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2920 						struct cq_res_raweth_qp1 *hwcqe,
2921 						struct bnxt_qplib_cqe **pcqe,
2922 						int *budget)
2923 {
2924 	struct bnxt_qplib_qp *qp;
2925 	struct bnxt_qplib_q *rq;
2926 	struct bnxt_qplib_srq *srq;
2927 	struct bnxt_qplib_cqe *cqe;
2928 	u32 wr_id_idx;
2929 
2930 	qp = (struct bnxt_qplib_qp *)((unsigned long)
2931 				      le64_to_cpu(hwcqe->qp_handle));
2932 	if (!qp) {
2933 		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2934 		return -EINVAL;
2935 	}
2936 	if (qp->rq.flushed) {
2937 		dev_dbg(&cq->hwq.pdev->dev,
2938 			"%s: QP in Flush QP = %p\n", __func__, qp);
2939 		return 0;
2940 	}
2941 	cqe = *pcqe;
2942 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2943 	cqe->flags = le16_to_cpu(hwcqe->flags);
2944 	cqe->qp_handle = (u64)(unsigned long)qp;
2945 
2946 	wr_id_idx =
2947 		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2948 				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2949 	cqe->src_qp = qp->id;
2950 	if (qp->id == 1 && !cqe->length) {
2951 		/* Add workaround for the length misdetection */
2952 		cqe->length = 296;
2953 	} else {
2954 		cqe->length = le16_to_cpu(hwcqe->length);
2955 	}
2956 	cqe->pkey_index = qp->pkey_index;
2957 	memcpy(cqe->smac, qp->smac, 6);
2958 
2959 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2960 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2961 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2962 
2963 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2964 		srq = qp->srq;
2965 		if (!srq) {
2966 			dev_err(&cq->hwq.pdev->dev,
2967 				"FP: SRQ used but not defined??\n");
2968 			return -EINVAL;
2969 		}
2970 		if (wr_id_idx >= srq->hwq.max_elements) {
2971 			dev_err(&cq->hwq.pdev->dev,
2972 				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2973 				wr_id_idx, srq->hwq.max_elements);
2974 			return -EINVAL;
2975 		}
2976 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2977 		bnxt_qplib_release_srqe(srq, wr_id_idx);
2978 		cqe++;
2979 		(*budget)--;
2980 		*pcqe = cqe;
2981 	} else {
2982 		struct bnxt_qplib_swq *swq;
2983 
2984 		rq = &qp->rq;
2985 		if (wr_id_idx > (rq->max_wqe - 1)) {
2986 			dev_err(&cq->hwq.pdev->dev,
2987 				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2988 				wr_id_idx, rq->max_wqe);
2989 			return -EINVAL;
2990 		}
2991 		if (rq->swq_last != wr_id_idx)
2992 			return -EINVAL;
2993 		swq = &rq->swq[rq->swq_last];
2994 		cqe->wr_id = swq->wr_id;
2995 		cqe++;
2996 		(*budget)--;
2997 		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2998 					 swq->slots, &rq->dbinfo.flags);
2999 		rq->swq_last = swq->next_idx;
3000 		*pcqe = cqe;
3001 
3002 		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
3003 			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
3004 			/* Add qp to flush list of the CQ */
3005 			bnxt_qplib_add_flush_qp(qp);
3006 		}
3007 	}
3008 
3009 	return 0;
3010 }
3011 
3012 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
3013 					  struct cq_terminal *hwcqe,
3014 					  struct bnxt_qplib_cqe **pcqe,
3015 					  int *budget)
3016 {
3017 	struct bnxt_qplib_qp *qp;
3018 	struct bnxt_qplib_q *sq, *rq;
3019 	struct bnxt_qplib_cqe *cqe;
3020 	u32 swq_last = 0, cqe_cons;
3021 	int rc = 0;
3022 
3023 	/* Check the Status */
3024 	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
3025 		dev_warn(&cq->hwq.pdev->dev,
3026 			 "FP: CQ Process Terminal Error status = 0x%x\n",
3027 			 hwcqe->status);
3028 
3029 	qp = (struct bnxt_qplib_qp *)((unsigned long)
3030 				      le64_to_cpu(hwcqe->qp_handle));
3031 	if (!qp)
3032 		return -EINVAL;
3033 
3034 	/* Must block new posting of SQ and RQ */
3035 	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
3036 
3037 	sq = &qp->sq;
3038 	rq = &qp->rq;
3039 
3040 	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
3041 	if (cqe_cons == 0xFFFF)
3042 		goto do_rq;
3043 	cqe_cons %= sq->max_sw_wqe;
3044 
3045 	if (qp->sq.flushed) {
3046 		dev_dbg(&cq->hwq.pdev->dev,
3047 			"%s: QP in Flush QP = %p\n", __func__, qp);
3048 		goto sq_done;
3049 	}
3050 
3051 	/* Terminal CQE can also include aggregated successful CQEs prior.
3052 	 * So we must complete all CQEs from the current sq's cons to the
3053 	 * cq_cons with status OK
3054 	 */
3055 	cqe = *pcqe;
3056 	while (*budget) {
3057 		swq_last = sq->swq_last;
3058 		if (swq_last == cqe_cons)
3059 			break;
3060 		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
3061 			memset(cqe, 0, sizeof(*cqe));
3062 			cqe->status = CQ_REQ_STATUS_OK;
3063 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
3064 			cqe->qp_handle = (u64)(unsigned long)qp;
3065 			cqe->src_qp = qp->id;
3066 			cqe->wr_id = sq->swq[swq_last].wr_id;
3067 			cqe->type = sq->swq[swq_last].type;
3068 			cqe++;
3069 			(*budget)--;
3070 		}
3071 		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
3072 					 sq->swq[swq_last].slots, &sq->dbinfo.flags);
3073 		sq->swq_last = sq->swq[swq_last].next_idx;
3074 	}
3075 	*pcqe = cqe;
3076 	if (!(*budget) && swq_last != cqe_cons) {
3077 		/* Out of budget */
3078 		rc = -EAGAIN;
3079 		goto sq_done;
3080 	}
3081 sq_done:
3082 	if (rc)
3083 		return rc;
3084 do_rq:
3085 	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
3086 	if (cqe_cons == 0xFFFF) {
3087 		goto done;
3088 	} else if (cqe_cons > rq->max_wqe - 1) {
3089 		dev_err(&cq->hwq.pdev->dev,
3090 			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
3091 			cqe_cons, rq->max_wqe);
3092 		rc = -EINVAL;
3093 		goto done;
3094 	}
3095 
3096 	if (qp->rq.flushed) {
3097 		dev_dbg(&cq->hwq.pdev->dev,
3098 			"%s: QP in Flush QP = %p\n", __func__, qp);
3099 		rc = 0;
3100 		goto done;
3101 	}
3102 
3103 	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3104 	 * from the current rq->cons to the rq->prod regardless what the
3105 	 * rq->cons the terminal CQE indicates
3106 	 */
3107 
3108 	/* Add qp to flush list of the CQ */
3109 	bnxt_qplib_add_flush_qp(qp);
3110 done:
3111 	return rc;
3112 }
3113 
3114 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3115 					struct cq_cutoff *hwcqe)
3116 {
3117 	/* Check the Status */
3118 	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3119 		dev_err(&cq->hwq.pdev->dev,
3120 			"FP: CQ Process Cutoff Error status = 0x%x\n",
3121 			hwcqe->status);
3122 		return -EINVAL;
3123 	}
3124 	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3125 	wake_up_interruptible(&cq->waitq);
3126 
3127 	return 0;
3128 }
3129 
3130 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3131 				  struct bnxt_qplib_cqe *cqe,
3132 				  int num_cqes)
3133 {
3134 	struct bnxt_qplib_qp *qp = NULL;
3135 	u32 budget = num_cqes;
3136 	unsigned long flags;
3137 
3138 	spin_lock_irqsave(&cq->flush_lock, flags);
3139 	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3140 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3141 		__flush_sq(&qp->sq, qp, &cqe, &budget);
3142 	}
3143 
3144 	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3145 		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3146 		__flush_rq(&qp->rq, qp, &cqe, &budget);
3147 	}
3148 	spin_unlock_irqrestore(&cq->flush_lock, flags);
3149 
3150 	return num_cqes - budget;
3151 }
3152 
3153 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3154 		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
3155 {
3156 	struct cq_base *hw_cqe;
3157 	int budget, rc = 0;
3158 	u32 hw_polled = 0;
3159 	u8 type;
3160 
3161 	budget = num_cqes;
3162 
3163 	while (budget) {
3164 		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3165 
3166 		/* Check for Valid bit */
3167 		if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3168 			break;
3169 
3170 		/*
3171 		 * The valid test of the entry must be done first before
3172 		 * reading any further.
3173 		 */
3174 		dma_rmb();
3175 		/* From the device's respective CQE format to qplib_wc*/
3176 		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3177 		switch (type) {
3178 		case CQ_BASE_CQE_TYPE_REQ:
3179 			rc = bnxt_qplib_cq_process_req(cq,
3180 						       (struct cq_req *)hw_cqe,
3181 						       &cqe, &budget,
3182 						       cq->hwq.cons, lib_qp);
3183 			break;
3184 		case CQ_BASE_CQE_TYPE_RES_RC:
3185 			rc = bnxt_qplib_cq_process_res_rc(cq,
3186 							  (struct cq_res_rc *)
3187 							  hw_cqe, &cqe,
3188 							  &budget);
3189 			break;
3190 		case CQ_BASE_CQE_TYPE_RES_UD:
3191 			rc = bnxt_qplib_cq_process_res_ud
3192 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
3193 					 &budget);
3194 			break;
3195 		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3196 			rc = bnxt_qplib_cq_process_res_raweth_qp1
3197 					(cq, (struct cq_res_raweth_qp1 *)
3198 					 hw_cqe, &cqe, &budget);
3199 			break;
3200 		case CQ_BASE_CQE_TYPE_TERMINAL:
3201 			rc = bnxt_qplib_cq_process_terminal
3202 					(cq, (struct cq_terminal *)hw_cqe,
3203 					 &cqe, &budget);
3204 			break;
3205 		case CQ_BASE_CQE_TYPE_CUT_OFF:
3206 			bnxt_qplib_cq_process_cutoff
3207 					(cq, (struct cq_cutoff *)hw_cqe);
3208 			/* Done processing this CQ */
3209 			goto exit;
3210 		default:
3211 			dev_err(&cq->hwq.pdev->dev,
3212 				"process_cq unknown type 0x%lx\n",
3213 				hw_cqe->cqe_type_toggle &
3214 				CQ_BASE_CQE_TYPE_MASK);
3215 			rc = -EINVAL;
3216 			break;
3217 		}
3218 		if (rc < 0) {
3219 			if (rc == -EAGAIN)
3220 				break;
3221 			/* Error while processing the CQE, just skip to the
3222 			 * next one
3223 			 */
3224 			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3225 				dev_err(&cq->hwq.pdev->dev,
3226 					"process_cqe error rc = 0x%x\n", rc);
3227 		}
3228 		hw_polled++;
3229 		bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3230 					 1, &cq->dbinfo.flags);
3231 
3232 	}
3233 	if (hw_polled)
3234 		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3235 exit:
3236 	return num_cqes - budget;
3237 }
3238 
3239 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3240 {
3241 	cq->dbinfo.toggle = cq->toggle;
3242 	if (arm_type)
3243 		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3244 	/* Using cq->arm_state variable to track whether to issue cq handler */
3245 	atomic_set(&cq->arm_state, 1);
3246 }
3247 
3248 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3249 {
3250 	flush_workqueue(qp->scq->nq->cqn_wq);
3251 	if (qp->scq != qp->rcq)
3252 		flush_workqueue(qp->rcq->nq->cqn_wq);
3253 }
3254