1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 #include <rdma/ib_addr.h>
58 #include "bnxt_ulp.h"
59 #include "bnxt_re.h"
60 #include "ib_verbs.h"
61
62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
63
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
65 {
66 qp->sq.condition = false;
67 qp->sq.send_phantom = false;
68 qp->sq.single = false;
69 }
70
71 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
73 {
74 struct bnxt_qplib_cq *scq, *rcq;
75
76 scq = qp->scq;
77 rcq = qp->rcq;
78
79 if (!qp->sq.flushed) {
80 dev_dbg(&scq->hwq.pdev->dev,
81 "FP: Adding to SQ Flush list = %p\n", qp);
82 bnxt_qplib_cancel_phantom_processing(qp);
83 list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 qp->sq.flushed = true;
85 }
86 if (!qp->srq) {
87 if (!qp->rq.flushed) {
88 dev_dbg(&rcq->hwq.pdev->dev,
89 "FP: Adding to RQ Flush list = %p\n", qp);
90 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 qp->rq.flushed = true;
92 }
93 }
94 }
95
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
97 unsigned long *flags)
98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
99 {
100 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 if (qp->scq == qp->rcq)
102 __acquire(&qp->rcq->flush_lock);
103 else
104 spin_lock(&qp->rcq->flush_lock);
105 }
106
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 unsigned long *flags)
109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
110 {
111 if (qp->scq == qp->rcq)
112 __release(&qp->rcq->flush_lock);
113 else
114 spin_unlock(&qp->rcq->flush_lock);
115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
116 }
117
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
119 {
120 unsigned long flags;
121
122 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 __bnxt_qplib_add_flush_qp(qp);
124 bnxt_qplib_release_cq_flush_locks(qp, &flags);
125 }
126
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
128 {
129 if (qp->sq.flushed) {
130 qp->sq.flushed = false;
131 list_del(&qp->sq_flush);
132 }
133 if (!qp->srq) {
134 if (qp->rq.flushed) {
135 qp->rq.flushed = false;
136 list_del(&qp->rq_flush);
137 }
138 }
139 }
140
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
142 {
143 unsigned long flags;
144
145 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 __clean_cq(qp->scq, (u64)(unsigned long)qp);
147 qp->sq.hwq.prod = 0;
148 qp->sq.hwq.cons = 0;
149 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
150 qp->rq.hwq.prod = 0;
151 qp->rq.hwq.cons = 0;
152
153 __bnxt_qplib_del_flush_qp(qp);
154 bnxt_qplib_release_cq_flush_locks(qp, &flags);
155 }
156
bnxt_qpn_cqn_sched_task(struct work_struct * work)157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
158 {
159 struct bnxt_qplib_nq_work *nq_work =
160 container_of(work, struct bnxt_qplib_nq_work, work);
161
162 struct bnxt_qplib_cq *cq = nq_work->cq;
163 struct bnxt_qplib_nq *nq = nq_work->nq;
164
165 if (cq && nq) {
166 spin_lock_bh(&cq->compl_lock);
167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 dev_dbg(&nq->pdev->dev,
169 "%s:Trigger cq = %p event nq = %p\n",
170 __func__, cq, nq);
171 nq->cqn_handler(nq, cq);
172 }
173 spin_unlock_bh(&cq->compl_lock);
174 }
175 kfree(nq_work);
176 }
177
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 struct bnxt_qplib_qp *qp)
180 {
181 struct bnxt_qplib_q *rq = &qp->rq;
182 struct bnxt_qplib_q *sq = &qp->sq;
183
184 if (qp->rq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 rq->max_wqe * qp->rq_hdr_buf_size,
187 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
188 if (qp->sq_hdr_buf)
189 dma_free_coherent(&res->pdev->dev,
190 sq->max_wqe * qp->sq_hdr_buf_size,
191 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 qp->rq_hdr_buf = NULL;
193 qp->sq_hdr_buf = NULL;
194 qp->rq_hdr_buf_map = 0;
195 qp->sq_hdr_buf_map = 0;
196 qp->sq_hdr_buf_size = 0;
197 qp->rq_hdr_buf_size = 0;
198 }
199
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 struct bnxt_qplib_qp *qp)
202 {
203 struct bnxt_qplib_q *rq = &qp->rq;
204 struct bnxt_qplib_q *sq = &qp->sq;
205 int rc = 0;
206
207 if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 sq->max_wqe * qp->sq_hdr_buf_size,
210 &qp->sq_hdr_buf_map, GFP_KERNEL);
211 if (!qp->sq_hdr_buf) {
212 rc = -ENOMEM;
213 dev_err(&res->pdev->dev,
214 "Failed to create sq_hdr_buf\n");
215 goto fail;
216 }
217 }
218
219 if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 rq->max_wqe *
222 qp->rq_hdr_buf_size,
223 &qp->rq_hdr_buf_map,
224 GFP_KERNEL);
225 if (!qp->rq_hdr_buf) {
226 rc = -ENOMEM;
227 dev_err(&res->pdev->dev,
228 "Failed to create rq_hdr_buf\n");
229 goto fail;
230 }
231 }
232 return 0;
233
234 fail:
235 bnxt_qplib_free_qp_hdr_buf(res, qp);
236 return rc;
237 }
238
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
240 {
241 struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 struct nq_base *nqe, **nq_ptr;
243 int budget = nq->budget;
244 uintptr_t q_handle;
245 u16 type;
246
247 spin_lock_bh(&hwq->lock);
248 /* Service the NQ until empty */
249 while (budget--) {
250 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
253 break;
254
255 /*
256 * The valid test of the entry must be done first before
257 * reading any further.
258 */
259 dma_rmb();
260
261 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 switch (type) {
263 case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 {
265 struct nq_cn *nqcne = (struct nq_cn *)nqe;
266
267 q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 << 32;
270 if ((unsigned long)cq == q_handle) {
271 nqcne->cq_handle_low = 0;
272 nqcne->cq_handle_high = 0;
273 cq->cnq_events++;
274 }
275 break;
276 }
277 default:
278 break;
279 }
280 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 1, &nq->nq_db.dbinfo.flags);
282 }
283 spin_unlock_bh(&hwq->lock);
284 }
285
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287 * this CQ.
288 */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290 {
291 u32 retry_cnt = 100;
292
293 while (retry_cnt--) {
294 if (cnq_events == cq->cnq_events)
295 return;
296 usleep_range(50, 100);
297 clean_nq(cq->nq, cq);
298 }
299 }
300
bnxt_qplib_service_nq(struct tasklet_struct * t)301 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
302 {
303 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 struct bnxt_qplib_cq *cq;
306 int budget = nq->budget;
307 struct nq_base *nqe;
308 uintptr_t q_handle;
309 u32 hw_polled = 0;
310 u16 type;
311
312 spin_lock_bh(&hwq->lock);
313 /* Service the NQ until empty */
314 while (budget--) {
315 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
317 break;
318
319 /*
320 * The valid test of the entry must be done first before
321 * reading any further.
322 */
323 dma_rmb();
324
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 switch (type) {
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 {
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 struct bnxt_re_cq *cq_p;
331
332 q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 << 32;
335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336 if (!cq)
337 break;
338 cq->toggle = (le16_to_cpu(nqe->info10_type) &
339 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
340 cq->dbinfo.toggle = cq->toggle;
341 cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
342 if (cq_p->uctx_cq_page)
343 *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
344
345 bnxt_qplib_armen_db(&cq->dbinfo,
346 DBC_DBC_TYPE_CQ_ARMENA);
347 spin_lock_bh(&cq->compl_lock);
348 atomic_set(&cq->arm_state, 0);
349 if (nq->cqn_handler(nq, (cq)))
350 dev_warn(&nq->pdev->dev,
351 "cqn - type 0x%x not handled\n", type);
352 cq->cnq_events++;
353 spin_unlock_bh(&cq->compl_lock);
354 break;
355 }
356 case NQ_BASE_TYPE_SRQ_EVENT:
357 {
358 struct bnxt_qplib_srq *srq;
359 struct bnxt_re_srq *srq_p;
360 struct nq_srq_event *nqsrqe =
361 (struct nq_srq_event *)nqe;
362
363 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
364 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
365 << 32;
366 srq = (struct bnxt_qplib_srq *)q_handle;
367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
368 >> NQ_CN_TOGGLE_SFT;
369 srq->dbinfo.toggle = srq->toggle;
370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
371 if (srq_p->uctx_srq_page)
372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
373 bnxt_qplib_armen_db(&srq->dbinfo,
374 DBC_DBC_TYPE_SRQ_ARMENA);
375 if (nq->srqn_handler(nq,
376 (struct bnxt_qplib_srq *)q_handle,
377 nqsrqe->event))
378 dev_warn(&nq->pdev->dev,
379 "SRQ event 0x%x not handled\n",
380 nqsrqe->event);
381 break;
382 }
383 case NQ_BASE_TYPE_DBQ_EVENT:
384 break;
385 default:
386 dev_warn(&nq->pdev->dev,
387 "nqe with type = 0x%x not handled\n", type);
388 break;
389 }
390 hw_polled++;
391 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
392 1, &nq->nq_db.dbinfo.flags);
393 }
394 if (hw_polled)
395 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
396 spin_unlock_bh(&hwq->lock);
397 }
398
399 /* bnxt_re_synchronize_nq - self polling notification queue.
400 * @nq - notification queue pointer
401 *
402 * This function will start polling entries of a given notification queue
403 * for all pending entries.
404 * This function is useful to synchronize notification entries while resources
405 * are going away.
406 */
407
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
409 {
410 int budget = nq->budget;
411
412 nq->budget = nq->hwq.max_elements;
413 bnxt_qplib_service_nq(&nq->nq_tasklet);
414 nq->budget = budget;
415 }
416
bnxt_qplib_nq_irq(int irq,void * dev_instance)417 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
418 {
419 struct bnxt_qplib_nq *nq = dev_instance;
420 struct bnxt_qplib_hwq *hwq = &nq->hwq;
421 u32 sw_cons;
422
423 /* Prefetch the NQ element */
424 sw_cons = HWQ_CMP(hwq->cons, hwq);
425 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
426
427 /* Fan out to CPU affinitized kthreads? */
428 tasklet_schedule(&nq->nq_tasklet);
429
430 return IRQ_HANDLED;
431 }
432
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
434 {
435 if (!nq->requested)
436 return;
437
438 nq->requested = false;
439 /* Mask h/w interrupt */
440 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
441 /* Sync with last running IRQ handler */
442 synchronize_irq(nq->msix_vec);
443 irq_set_affinity_hint(nq->msix_vec, NULL);
444 free_irq(nq->msix_vec, nq);
445 kfree(nq->name);
446 nq->name = NULL;
447
448 if (kill)
449 tasklet_kill(&nq->nq_tasklet);
450 tasklet_disable(&nq->nq_tasklet);
451 }
452
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
454 {
455 if (nq->cqn_wq) {
456 destroy_workqueue(nq->cqn_wq);
457 nq->cqn_wq = NULL;
458 }
459
460 /* Make sure the HW is stopped! */
461 bnxt_qplib_nq_stop_irq(nq, true);
462
463 if (nq->nq_db.reg.bar_reg) {
464 iounmap(nq->nq_db.reg.bar_reg);
465 nq->nq_db.reg.bar_reg = NULL;
466 }
467
468 nq->cqn_handler = NULL;
469 nq->srqn_handler = NULL;
470 nq->msix_vec = 0;
471 }
472
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
474 int msix_vector, bool need_init)
475 {
476 struct bnxt_qplib_res *res = nq->res;
477 int rc;
478
479 if (nq->requested)
480 return -EFAULT;
481
482 nq->msix_vec = msix_vector;
483 if (need_init)
484 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
485 else
486 tasklet_enable(&nq->nq_tasklet);
487
488 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
489 nq_indx, pci_name(res->pdev));
490 if (!nq->name)
491 return -ENOMEM;
492 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
493 if (rc) {
494 kfree(nq->name);
495 nq->name = NULL;
496 tasklet_disable(&nq->nq_tasklet);
497 return rc;
498 }
499
500 cpumask_clear(&nq->mask);
501 cpumask_set_cpu(nq_indx, &nq->mask);
502 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
503 if (rc) {
504 dev_warn(&nq->pdev->dev,
505 "set affinity failed; vector: %d nq_idx: %d\n",
506 nq->msix_vec, nq_indx);
507 }
508 nq->requested = true;
509 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
510
511 return rc;
512 }
513
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
515 {
516 resource_size_t reg_base;
517 struct bnxt_qplib_nq_db *nq_db;
518 struct pci_dev *pdev;
519
520 pdev = nq->pdev;
521 nq_db = &nq->nq_db;
522
523 nq_db->dbinfo.flags = 0;
524 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
525 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
526 if (!nq_db->reg.bar_base) {
527 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
528 nq_db->reg.bar_id);
529 return -ENOMEM;
530 }
531
532 reg_base = nq_db->reg.bar_base + reg_offt;
533 /* Unconditionally map 8 bytes to support 57500 series */
534 nq_db->reg.len = 8;
535 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
536 if (!nq_db->reg.bar_reg) {
537 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
538 nq_db->reg.bar_id);
539 return -ENOMEM;
540 }
541
542 nq_db->dbinfo.db = nq_db->reg.bar_reg;
543 nq_db->dbinfo.hwq = &nq->hwq;
544 nq_db->dbinfo.xid = nq->ring_id;
545
546 return 0;
547 }
548
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
550 int nq_idx, int msix_vector, int bar_reg_offset,
551 cqn_handler_t cqn_handler,
552 srqn_handler_t srqn_handler)
553 {
554 int rc;
555
556 nq->pdev = pdev;
557 nq->cqn_handler = cqn_handler;
558 nq->srqn_handler = srqn_handler;
559
560 /* Have a task to schedule CQ notifiers in post send case */
561 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
562 if (!nq->cqn_wq)
563 return -ENOMEM;
564
565 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
566 if (rc)
567 goto fail;
568
569 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
570 if (rc) {
571 dev_err(&nq->pdev->dev,
572 "Failed to request irq for nq-idx %d\n", nq_idx);
573 goto fail;
574 }
575
576 return 0;
577 fail:
578 bnxt_qplib_disable_nq(nq);
579 return rc;
580 }
581
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)582 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
583 {
584 if (nq->hwq.max_elements) {
585 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
586 nq->hwq.max_elements = 0;
587 }
588 }
589
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)590 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
591 {
592 struct bnxt_qplib_hwq_attr hwq_attr = {};
593 struct bnxt_qplib_sg_info sginfo = {};
594
595 nq->pdev = res->pdev;
596 nq->res = res;
597 if (!nq->hwq.max_elements ||
598 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
599 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
600
601 sginfo.pgsize = PAGE_SIZE;
602 sginfo.pgshft = PAGE_SHIFT;
603 hwq_attr.res = res;
604 hwq_attr.sginfo = &sginfo;
605 hwq_attr.depth = nq->hwq.max_elements;
606 hwq_attr.stride = sizeof(struct nq_base);
607 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
608 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
609 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
610 return -ENOMEM;
611 }
612 nq->budget = 8;
613 return 0;
614 }
615
616 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)617 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
618 struct bnxt_qplib_srq *srq)
619 {
620 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
621 struct creq_destroy_srq_resp resp = {};
622 struct bnxt_qplib_cmdqmsg msg = {};
623 struct cmdq_destroy_srq req = {};
624 int rc;
625
626 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
627 CMDQ_BASE_OPCODE_DESTROY_SRQ,
628 sizeof(req));
629
630 /* Configure the request */
631 req.srq_cid = cpu_to_le32(srq->id);
632
633 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
634 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
635 kfree(srq->swq);
636 if (rc)
637 return;
638 bnxt_qplib_free_hwq(res, &srq->hwq);
639 }
640
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)641 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
642 struct bnxt_qplib_srq *srq)
643 {
644 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
645 struct bnxt_qplib_hwq_attr hwq_attr = {};
646 struct creq_create_srq_resp resp = {};
647 struct bnxt_qplib_cmdqmsg msg = {};
648 struct cmdq_create_srq req = {};
649 struct bnxt_qplib_pbl *pbl;
650 u16 pg_sz_lvl;
651 int rc, idx;
652
653 hwq_attr.res = res;
654 hwq_attr.sginfo = &srq->sg_info;
655 hwq_attr.depth = srq->max_wqe;
656 hwq_attr.stride = srq->wqe_size;
657 hwq_attr.type = HWQ_TYPE_QUEUE;
658 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
659 if (rc)
660 return rc;
661
662 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
663 GFP_KERNEL);
664 if (!srq->swq) {
665 rc = -ENOMEM;
666 goto fail;
667 }
668 srq->dbinfo.flags = 0;
669 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
670 CMDQ_BASE_OPCODE_CREATE_SRQ,
671 sizeof(req));
672
673 /* Configure the request */
674 req.dpi = cpu_to_le32(srq->dpi->dpi);
675 req.srq_handle = cpu_to_le64((uintptr_t)srq);
676
677 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
678 pbl = &srq->hwq.pbl[PBL_LVL_0];
679 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
680 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
681 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
682 CMDQ_CREATE_SRQ_LVL_SFT;
683 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
684 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
685 req.pd_id = cpu_to_le32(srq->pd->id);
686 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
687
688 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
689 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
690 if (rc)
691 goto fail;
692
693 spin_lock_init(&srq->lock);
694 srq->start_idx = 0;
695 srq->last_idx = srq->hwq.max_elements - 1;
696 for (idx = 0; idx < srq->hwq.max_elements; idx++)
697 srq->swq[idx].next_idx = idx + 1;
698 srq->swq[srq->last_idx].next_idx = -1;
699
700 srq->id = le32_to_cpu(resp.xid);
701 srq->dbinfo.hwq = &srq->hwq;
702 srq->dbinfo.xid = srq->id;
703 srq->dbinfo.db = srq->dpi->dbr;
704 srq->dbinfo.max_slot = 1;
705 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
706 if (srq->threshold)
707 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
708 srq->arm_req = false;
709
710 return 0;
711 fail:
712 bnxt_qplib_free_hwq(res, &srq->hwq);
713 kfree(srq->swq);
714
715 return rc;
716 }
717
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)718 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
719 struct bnxt_qplib_srq *srq)
720 {
721 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
722 u32 count;
723
724 count = __bnxt_qplib_get_avail(srq_hwq);
725 if (count > srq->threshold) {
726 srq->arm_req = false;
727 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
728 } else {
729 /* Deferred arming */
730 srq->arm_req = true;
731 }
732
733 return 0;
734 }
735
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)736 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
737 struct bnxt_qplib_srq *srq)
738 {
739 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
740 struct creq_query_srq_resp resp = {};
741 struct bnxt_qplib_cmdqmsg msg = {};
742 struct bnxt_qplib_rcfw_sbuf sbuf;
743 struct creq_query_srq_resp_sb *sb;
744 struct cmdq_query_srq req = {};
745 int rc;
746
747 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
748 CMDQ_BASE_OPCODE_QUERY_SRQ,
749 sizeof(req));
750
751 /* Configure the request */
752 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
753 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
754 &sbuf.dma_addr, GFP_KERNEL);
755 if (!sbuf.sb)
756 return -ENOMEM;
757 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
758 req.srq_cid = cpu_to_le32(srq->id);
759 sb = sbuf.sb;
760 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
761 sizeof(resp), 0);
762 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
763 if (!rc)
764 srq->threshold = le16_to_cpu(sb->srq_limit);
765 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
766 sbuf.sb, sbuf.dma_addr);
767
768 return rc;
769 }
770
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)771 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
772 struct bnxt_qplib_swqe *wqe)
773 {
774 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
775 struct rq_wqe *srqe;
776 struct sq_sge *hw_sge;
777 u32 count = 0;
778 int i, next;
779
780 spin_lock(&srq_hwq->lock);
781 if (srq->start_idx == srq->last_idx) {
782 dev_err(&srq_hwq->pdev->dev,
783 "FP: SRQ (0x%x) is full!\n", srq->id);
784 spin_unlock(&srq_hwq->lock);
785 return -EINVAL;
786 }
787 next = srq->start_idx;
788 srq->start_idx = srq->swq[next].next_idx;
789 spin_unlock(&srq_hwq->lock);
790
791 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
792 memset(srqe, 0, srq->wqe_size);
793 /* Calculate wqe_size16 and data_len */
794 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
795 i < wqe->num_sge; i++, hw_sge++) {
796 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
797 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
798 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
799 }
800 srqe->wqe_type = wqe->type;
801 srqe->flags = wqe->flags;
802 srqe->wqe_size = wqe->num_sge +
803 ((offsetof(typeof(*srqe), data) + 15) >> 4);
804 srqe->wr_id[0] = cpu_to_le32((u32)next);
805 srq->swq[next].wr_id = wqe->wr_id;
806
807 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
808
809 spin_lock(&srq_hwq->lock);
810 count = __bnxt_qplib_get_avail(srq_hwq);
811 spin_unlock(&srq_hwq->lock);
812 /* Ring DB */
813 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
814 if (srq->arm_req == true && count > srq->threshold) {
815 srq->arm_req = false;
816 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
817 }
818
819 return 0;
820 }
821
822 /* QP */
823
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)824 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
825 {
826 int indx;
827
828 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
829 if (!que->swq)
830 return -ENOMEM;
831
832 que->swq_start = 0;
833 que->swq_last = que->max_sw_wqe - 1;
834 for (indx = 0; indx < que->max_sw_wqe; indx++)
835 que->swq[indx].next_idx = indx + 1;
836 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
837 que->swq_last = 0;
838
839 return 0;
840 }
841
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)842 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
843 {
844 struct bnxt_qplib_hwq_attr hwq_attr = {};
845 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
846 struct creq_create_qp1_resp resp = {};
847 struct bnxt_qplib_cmdqmsg msg = {};
848 struct bnxt_qplib_q *sq = &qp->sq;
849 struct bnxt_qplib_q *rq = &qp->rq;
850 struct cmdq_create_qp1 req = {};
851 struct bnxt_qplib_pbl *pbl;
852 u32 qp_flags = 0;
853 u8 pg_sz_lvl;
854 u32 tbl_indx;
855 int rc;
856
857 sq->dbinfo.flags = 0;
858 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
859 CMDQ_BASE_OPCODE_CREATE_QP1,
860 sizeof(req));
861 /* General */
862 req.type = qp->type;
863 req.dpi = cpu_to_le32(qp->dpi->dpi);
864 req.qp_handle = cpu_to_le64(qp->qp_handle);
865
866 /* SQ */
867 hwq_attr.res = res;
868 hwq_attr.sginfo = &sq->sg_info;
869 hwq_attr.stride = sizeof(struct sq_sge);
870 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
871 hwq_attr.type = HWQ_TYPE_QUEUE;
872 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
873 if (rc)
874 return rc;
875
876 rc = bnxt_qplib_alloc_init_swq(sq);
877 if (rc)
878 goto fail_sq;
879
880 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
881 pbl = &sq->hwq.pbl[PBL_LVL_0];
882 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
883 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
884 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
885 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
886 req.sq_pg_size_sq_lvl = pg_sz_lvl;
887 req.sq_fwo_sq_sge =
888 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
889 CMDQ_CREATE_QP1_SQ_SGE_SFT);
890 req.scq_cid = cpu_to_le32(qp->scq->id);
891
892 /* RQ */
893 if (rq->max_wqe) {
894 rq->dbinfo.flags = 0;
895 hwq_attr.res = res;
896 hwq_attr.sginfo = &rq->sg_info;
897 hwq_attr.stride = sizeof(struct sq_sge);
898 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
899 hwq_attr.type = HWQ_TYPE_QUEUE;
900 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
901 if (rc)
902 goto sq_swq;
903 rc = bnxt_qplib_alloc_init_swq(rq);
904 if (rc)
905 goto fail_rq;
906 req.rq_size = cpu_to_le32(rq->max_wqe);
907 pbl = &rq->hwq.pbl[PBL_LVL_0];
908 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
909 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
910 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
911 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
912 req.rq_pg_size_rq_lvl = pg_sz_lvl;
913 req.rq_fwo_rq_sge =
914 cpu_to_le16((rq->max_sge &
915 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
916 CMDQ_CREATE_QP1_RQ_SGE_SFT);
917 }
918 req.rcq_cid = cpu_to_le32(qp->rcq->id);
919 /* Header buffer - allow hdr_buf pass in */
920 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
921 if (rc) {
922 rc = -ENOMEM;
923 goto rq_rwq;
924 }
925 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
926 req.qp_flags = cpu_to_le32(qp_flags);
927 req.pd_id = cpu_to_le32(qp->pd->id);
928
929 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
930 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
931 if (rc)
932 goto fail;
933
934 qp->id = le32_to_cpu(resp.xid);
935 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
936 qp->cctx = res->cctx;
937 sq->dbinfo.hwq = &sq->hwq;
938 sq->dbinfo.xid = qp->id;
939 sq->dbinfo.db = qp->dpi->dbr;
940 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
941 if (rq->max_wqe) {
942 rq->dbinfo.hwq = &rq->hwq;
943 rq->dbinfo.xid = qp->id;
944 rq->dbinfo.db = qp->dpi->dbr;
945 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
946 }
947 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
948 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
949 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
950
951 return 0;
952
953 fail:
954 bnxt_qplib_free_qp_hdr_buf(res, qp);
955 rq_rwq:
956 kfree(rq->swq);
957 fail_rq:
958 bnxt_qplib_free_hwq(res, &rq->hwq);
959 sq_swq:
960 kfree(sq->swq);
961 fail_sq:
962 bnxt_qplib_free_hwq(res, &sq->hwq);
963 return rc;
964 }
965
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)966 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
967 {
968 struct bnxt_qplib_hwq *hwq;
969 struct bnxt_qplib_q *sq;
970 u64 fpsne, psn_pg;
971 u16 indx_pad = 0;
972
973 sq = &qp->sq;
974 hwq = &sq->hwq;
975 /* First psn entry */
976 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
977 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
978 indx_pad = (fpsne & ~PAGE_MASK) / size;
979 hwq->pad_pgofft = indx_pad;
980 hwq->pad_pg = (u64 *)psn_pg;
981 hwq->pad_stride = size;
982 }
983
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)984 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
985 {
986 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
987 struct bnxt_qplib_hwq_attr hwq_attr = {};
988 struct bnxt_qplib_sg_info sginfo = {};
989 struct creq_create_qp_resp resp = {};
990 struct bnxt_qplib_cmdqmsg msg = {};
991 struct bnxt_qplib_q *sq = &qp->sq;
992 struct bnxt_qplib_q *rq = &qp->rq;
993 struct cmdq_create_qp req = {};
994 int rc, req_size, psn_sz = 0;
995 struct bnxt_qplib_hwq *xrrq;
996 struct bnxt_qplib_pbl *pbl;
997 u32 qp_flags = 0;
998 u8 pg_sz_lvl;
999 u32 tbl_indx;
1000 u16 nsge;
1001
1002 if (res->dattr)
1003 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
1004
1005 sq->dbinfo.flags = 0;
1006 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1007 CMDQ_BASE_OPCODE_CREATE_QP,
1008 sizeof(req));
1009
1010 /* General */
1011 req.type = qp->type;
1012 req.dpi = cpu_to_le32(qp->dpi->dpi);
1013 req.qp_handle = cpu_to_le64(qp->qp_handle);
1014
1015 /* SQ */
1016 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1017 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1018 sizeof(struct sq_psn_search_ext) :
1019 sizeof(struct sq_psn_search);
1020
1021 if (qp->is_host_msn_tbl) {
1022 psn_sz = sizeof(struct sq_msn_search);
1023 qp->msn = 0;
1024 }
1025 }
1026
1027 hwq_attr.res = res;
1028 hwq_attr.sginfo = &sq->sg_info;
1029 hwq_attr.stride = sizeof(struct sq_sge);
1030 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1031 hwq_attr.aux_stride = psn_sz;
1032 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1033 : 0;
1034 /* Update msn tbl size */
1035 if (qp->is_host_msn_tbl && psn_sz) {
1036 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1037 qp->msn_tbl_sz = hwq_attr.aux_depth;
1038 qp->msn = 0;
1039 }
1040
1041 hwq_attr.type = HWQ_TYPE_QUEUE;
1042 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1043 if (rc)
1044 return rc;
1045
1046 rc = bnxt_qplib_alloc_init_swq(sq);
1047 if (rc)
1048 goto fail_sq;
1049
1050 if (psn_sz)
1051 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1052
1053 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1054 pbl = &sq->hwq.pbl[PBL_LVL_0];
1055 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1056 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1057 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1058 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1059 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1060 req.sq_fwo_sq_sge =
1061 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1062 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1063 req.scq_cid = cpu_to_le32(qp->scq->id);
1064
1065 /* RQ */
1066 if (!qp->srq) {
1067 rq->dbinfo.flags = 0;
1068 hwq_attr.res = res;
1069 hwq_attr.sginfo = &rq->sg_info;
1070 hwq_attr.stride = sizeof(struct sq_sge);
1071 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1072 hwq_attr.aux_stride = 0;
1073 hwq_attr.aux_depth = 0;
1074 hwq_attr.type = HWQ_TYPE_QUEUE;
1075 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1076 if (rc)
1077 goto sq_swq;
1078 rc = bnxt_qplib_alloc_init_swq(rq);
1079 if (rc)
1080 goto fail_rq;
1081
1082 req.rq_size = cpu_to_le32(rq->max_wqe);
1083 pbl = &rq->hwq.pbl[PBL_LVL_0];
1084 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1085 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1086 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1087 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1088 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1089 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1090 6 : rq->max_sge;
1091 req.rq_fwo_rq_sge =
1092 cpu_to_le16(((nsge &
1093 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1094 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1095 } else {
1096 /* SRQ */
1097 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1098 req.srq_cid = cpu_to_le32(qp->srq->id);
1099 }
1100 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1101
1102 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1103 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1104 if (qp->sig_type)
1105 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1106 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1107 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1108 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1109 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1110
1111 req.qp_flags = cpu_to_le32(qp_flags);
1112
1113 /* ORRQ and IRRQ */
1114 if (psn_sz) {
1115 xrrq = &qp->orrq;
1116 xrrq->max_elements =
1117 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1118 req_size = xrrq->max_elements *
1119 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1120 req_size &= ~(PAGE_SIZE - 1);
1121 sginfo.pgsize = req_size;
1122 sginfo.pgshft = PAGE_SHIFT;
1123
1124 hwq_attr.res = res;
1125 hwq_attr.sginfo = &sginfo;
1126 hwq_attr.depth = xrrq->max_elements;
1127 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1128 hwq_attr.aux_stride = 0;
1129 hwq_attr.aux_depth = 0;
1130 hwq_attr.type = HWQ_TYPE_CTX;
1131 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1132 if (rc)
1133 goto rq_swq;
1134 pbl = &xrrq->pbl[PBL_LVL_0];
1135 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1136
1137 xrrq = &qp->irrq;
1138 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1139 qp->max_dest_rd_atomic);
1140 req_size = xrrq->max_elements *
1141 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1142 req_size &= ~(PAGE_SIZE - 1);
1143 sginfo.pgsize = req_size;
1144 hwq_attr.depth = xrrq->max_elements;
1145 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1146 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1147 if (rc)
1148 goto fail_orrq;
1149
1150 pbl = &xrrq->pbl[PBL_LVL_0];
1151 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1152 }
1153 req.pd_id = cpu_to_le32(qp->pd->id);
1154
1155 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1156 sizeof(resp), 0);
1157 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1158 if (rc)
1159 goto fail;
1160
1161 qp->id = le32_to_cpu(resp.xid);
1162 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1163 INIT_LIST_HEAD(&qp->sq_flush);
1164 INIT_LIST_HEAD(&qp->rq_flush);
1165 qp->cctx = res->cctx;
1166 sq->dbinfo.hwq = &sq->hwq;
1167 sq->dbinfo.xid = qp->id;
1168 sq->dbinfo.db = qp->dpi->dbr;
1169 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1170 if (rq->max_wqe) {
1171 rq->dbinfo.hwq = &rq->hwq;
1172 rq->dbinfo.xid = qp->id;
1173 rq->dbinfo.db = qp->dpi->dbr;
1174 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1175 }
1176 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1177 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1178 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1179
1180 return 0;
1181 fail:
1182 bnxt_qplib_free_hwq(res, &qp->irrq);
1183 fail_orrq:
1184 bnxt_qplib_free_hwq(res, &qp->orrq);
1185 rq_swq:
1186 kfree(rq->swq);
1187 fail_rq:
1188 bnxt_qplib_free_hwq(res, &rq->hwq);
1189 sq_swq:
1190 kfree(sq->swq);
1191 fail_sq:
1192 bnxt_qplib_free_hwq(res, &sq->hwq);
1193 return rc;
1194 }
1195
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1196 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1197 {
1198 switch (qp->state) {
1199 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1200 /* INIT->RTR, configure the path_mtu to the default
1201 * 2048 if not being requested
1202 */
1203 if (!(qp->modify_flags &
1204 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1205 qp->modify_flags |=
1206 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1207 qp->path_mtu =
1208 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1209 }
1210 qp->modify_flags &=
1211 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1212 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1213 if (qp->max_dest_rd_atomic < 1)
1214 qp->max_dest_rd_atomic = 1;
1215 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1216 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1217 if (!(qp->modify_flags &
1218 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1219 qp->modify_flags |=
1220 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1221 qp->ah.sgid_index = 0;
1222 }
1223 break;
1224 default:
1225 break;
1226 }
1227 }
1228
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1229 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1230 {
1231 switch (qp->state) {
1232 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1233 /* Bono FW requires the max_rd_atomic to be >= 1 */
1234 if (qp->max_rd_atomic < 1)
1235 qp->max_rd_atomic = 1;
1236 /* Bono FW does not allow PKEY_INDEX,
1237 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1238 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1239 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1240 * modification
1241 */
1242 qp->modify_flags &=
1243 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1244 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1245 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1246 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1247 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1248 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1249 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1250 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1251 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1252 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1253 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1254 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1255 break;
1256 default:
1257 break;
1258 }
1259 }
1260
__filter_modify_flags(struct bnxt_qplib_qp * qp)1261 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1262 {
1263 switch (qp->cur_qp_state) {
1264 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1265 break;
1266 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1267 __modify_flags_from_init_state(qp);
1268 break;
1269 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1270 __modify_flags_from_rtr_state(qp);
1271 break;
1272 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1273 break;
1274 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1275 break;
1276 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1277 break;
1278 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1279 break;
1280 default:
1281 break;
1282 }
1283 }
1284
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1285 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1286 {
1287 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1288 struct creq_modify_qp_resp resp = {};
1289 struct bnxt_qplib_cmdqmsg msg = {};
1290 struct cmdq_modify_qp req = {};
1291 u32 temp32[4];
1292 u32 bmask;
1293 int rc;
1294
1295 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1296 CMDQ_BASE_OPCODE_MODIFY_QP,
1297 sizeof(req));
1298
1299 /* Filter out the qp_attr_mask based on the state->new transition */
1300 __filter_modify_flags(qp);
1301 bmask = qp->modify_flags;
1302 req.modify_mask = cpu_to_le32(qp->modify_flags);
1303 req.qp_cid = cpu_to_le32(qp->id);
1304 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1305 req.network_type_en_sqd_async_notify_new_state =
1306 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1307 (qp->en_sqd_async_notify ?
1308 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1309 }
1310 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1311
1312 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1313 req.access = qp->access;
1314
1315 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1316 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1317
1318 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1319 req.qkey = cpu_to_le32(qp->qkey);
1320
1321 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1322 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1323 req.dgid[0] = cpu_to_le32(temp32[0]);
1324 req.dgid[1] = cpu_to_le32(temp32[1]);
1325 req.dgid[2] = cpu_to_le32(temp32[2]);
1326 req.dgid[3] = cpu_to_le32(temp32[3]);
1327 }
1328 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1329 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1330
1331 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1332 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1333 [qp->ah.sgid_index]);
1334
1335 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1336 req.hop_limit = qp->ah.hop_limit;
1337
1338 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1339 req.traffic_class = qp->ah.traffic_class;
1340
1341 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1342 memcpy(req.dest_mac, qp->ah.dmac, 6);
1343
1344 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1345 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1346
1347 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1348 req.timeout = qp->timeout;
1349
1350 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1351 req.retry_cnt = qp->retry_cnt;
1352
1353 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1354 req.rnr_retry = qp->rnr_retry;
1355
1356 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1357 req.min_rnr_timer = qp->min_rnr_timer;
1358
1359 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1360 req.rq_psn = cpu_to_le32(qp->rq.psn);
1361
1362 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1363 req.sq_psn = cpu_to_le32(qp->sq.psn);
1364
1365 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1366 req.max_rd_atomic =
1367 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1368
1369 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1370 req.max_dest_rd_atomic =
1371 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1372
1373 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1374 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1375 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1376 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1377 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1378 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1379 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1380
1381 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1382
1383 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1384 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1385 if (rc)
1386 return rc;
1387 qp->cur_qp_state = qp->state;
1388 return 0;
1389 }
1390
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1391 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1392 {
1393 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1394 struct creq_query_qp_resp resp = {};
1395 struct bnxt_qplib_cmdqmsg msg = {};
1396 struct bnxt_qplib_rcfw_sbuf sbuf;
1397 struct creq_query_qp_resp_sb *sb;
1398 struct cmdq_query_qp req = {};
1399 u32 temp32[4];
1400 int i, rc;
1401
1402 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1403 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1404 &sbuf.dma_addr, GFP_KERNEL);
1405 if (!sbuf.sb)
1406 return -ENOMEM;
1407 sb = sbuf.sb;
1408
1409 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1410 CMDQ_BASE_OPCODE_QUERY_QP,
1411 sizeof(req));
1412
1413 req.qp_cid = cpu_to_le32(qp->id);
1414 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1415 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1416 sizeof(resp), 0);
1417 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1418 if (rc)
1419 goto bail;
1420 /* Extract the context from the side buffer */
1421 qp->state = sb->en_sqd_async_notify_state &
1422 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1423 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1424 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1425 qp->access = sb->access;
1426 qp->pkey_index = le16_to_cpu(sb->pkey);
1427 qp->qkey = le32_to_cpu(sb->qkey);
1428
1429 temp32[0] = le32_to_cpu(sb->dgid[0]);
1430 temp32[1] = le32_to_cpu(sb->dgid[1]);
1431 temp32[2] = le32_to_cpu(sb->dgid[2]);
1432 temp32[3] = le32_to_cpu(sb->dgid[3]);
1433 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1434
1435 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1436
1437 qp->ah.sgid_index = 0;
1438 for (i = 0; i < res->sgid_tbl.max; i++) {
1439 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1440 qp->ah.sgid_index = i;
1441 break;
1442 }
1443 }
1444 if (i == res->sgid_tbl.max)
1445 dev_warn(&res->pdev->dev, "SGID not found??\n");
1446
1447 qp->ah.hop_limit = sb->hop_limit;
1448 qp->ah.traffic_class = sb->traffic_class;
1449 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1450 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1451 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1452 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1453 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1454 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1455 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1456 qp->timeout = sb->timeout;
1457 qp->retry_cnt = sb->retry_cnt;
1458 qp->rnr_retry = sb->rnr_retry;
1459 qp->min_rnr_timer = sb->min_rnr_timer;
1460 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1461 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1462 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1463 qp->max_dest_rd_atomic =
1464 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1465 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1466 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1467 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1468 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1469 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1470 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1471 memcpy(qp->smac, sb->src_mac, 6);
1472 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1473 bail:
1474 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1475 sbuf.sb, sbuf.dma_addr);
1476 return rc;
1477 }
1478
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1479 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1480 {
1481 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1482 u32 peek_flags, peek_cons;
1483 struct cq_base *hw_cqe;
1484 int i;
1485
1486 peek_flags = cq->dbinfo.flags;
1487 peek_cons = cq_hwq->cons;
1488 for (i = 0; i < cq_hwq->max_elements; i++) {
1489 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1490 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1491 continue;
1492 /*
1493 * The valid test of the entry must be done first before
1494 * reading any further.
1495 */
1496 dma_rmb();
1497 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1498 case CQ_BASE_CQE_TYPE_REQ:
1499 case CQ_BASE_CQE_TYPE_TERMINAL:
1500 {
1501 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1502
1503 if (qp == le64_to_cpu(cqe->qp_handle))
1504 cqe->qp_handle = 0;
1505 break;
1506 }
1507 case CQ_BASE_CQE_TYPE_RES_RC:
1508 case CQ_BASE_CQE_TYPE_RES_UD:
1509 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1510 {
1511 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1512
1513 if (qp == le64_to_cpu(cqe->qp_handle))
1514 cqe->qp_handle = 0;
1515 break;
1516 }
1517 default:
1518 break;
1519 }
1520 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1521 1, &peek_flags);
1522 }
1523 }
1524
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1525 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1526 struct bnxt_qplib_qp *qp)
1527 {
1528 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1529 struct creq_destroy_qp_resp resp = {};
1530 struct bnxt_qplib_cmdqmsg msg = {};
1531 struct cmdq_destroy_qp req = {};
1532 u32 tbl_indx;
1533 int rc;
1534
1535 spin_lock_bh(&rcfw->tbl_lock);
1536 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1537 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1538 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1539 spin_unlock_bh(&rcfw->tbl_lock);
1540
1541 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1542 CMDQ_BASE_OPCODE_DESTROY_QP,
1543 sizeof(req));
1544
1545 req.qp_cid = cpu_to_le32(qp->id);
1546 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1547 sizeof(resp), 0);
1548 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1549 if (rc) {
1550 spin_lock_bh(&rcfw->tbl_lock);
1551 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1552 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1553 spin_unlock_bh(&rcfw->tbl_lock);
1554 return rc;
1555 }
1556
1557 return 0;
1558 }
1559
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1560 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1561 struct bnxt_qplib_qp *qp)
1562 {
1563 bnxt_qplib_free_qp_hdr_buf(res, qp);
1564 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1565 kfree(qp->sq.swq);
1566
1567 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1568 kfree(qp->rq.swq);
1569
1570 if (qp->irrq.max_elements)
1571 bnxt_qplib_free_hwq(res, &qp->irrq);
1572 if (qp->orrq.max_elements)
1573 bnxt_qplib_free_hwq(res, &qp->orrq);
1574
1575 }
1576
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1577 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1578 struct bnxt_qplib_sge *sge)
1579 {
1580 struct bnxt_qplib_q *sq = &qp->sq;
1581 u32 sw_prod;
1582
1583 memset(sge, 0, sizeof(*sge));
1584
1585 if (qp->sq_hdr_buf) {
1586 sw_prod = sq->swq_start;
1587 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1588 sw_prod * qp->sq_hdr_buf_size);
1589 sge->lkey = 0xFFFFFFFF;
1590 sge->size = qp->sq_hdr_buf_size;
1591 return qp->sq_hdr_buf + sw_prod * sge->size;
1592 }
1593 return NULL;
1594 }
1595
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1596 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1597 {
1598 struct bnxt_qplib_q *rq = &qp->rq;
1599
1600 return rq->swq_start;
1601 }
1602
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1603 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1604 {
1605 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1606 }
1607
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1608 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1609 struct bnxt_qplib_sge *sge)
1610 {
1611 struct bnxt_qplib_q *rq = &qp->rq;
1612 u32 sw_prod;
1613
1614 memset(sge, 0, sizeof(*sge));
1615
1616 if (qp->rq_hdr_buf) {
1617 sw_prod = rq->swq_start;
1618 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1619 sw_prod * qp->rq_hdr_buf_size);
1620 sge->lkey = 0xFFFFFFFF;
1621 sge->size = qp->rq_hdr_buf_size;
1622 return qp->rq_hdr_buf + sw_prod * sge->size;
1623 }
1624 return NULL;
1625 }
1626
1627 /* Fil the MSN table into the next psn row */
bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1628 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1629 struct bnxt_qplib_swqe *wqe,
1630 struct bnxt_qplib_swq *swq)
1631 {
1632 struct sq_msn_search *msns;
1633 u32 start_psn, next_psn;
1634 u16 start_idx;
1635
1636 msns = (struct sq_msn_search *)swq->psn_search;
1637 msns->start_idx_next_psn_start_psn = 0;
1638
1639 start_psn = swq->start_psn;
1640 next_psn = swq->next_psn;
1641 start_idx = swq->slot_idx;
1642 msns->start_idx_next_psn_start_psn |=
1643 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1644 qp->msn++;
1645 qp->msn %= qp->msn_tbl_sz;
1646 }
1647
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1648 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1649 struct bnxt_qplib_swqe *wqe,
1650 struct bnxt_qplib_swq *swq)
1651 {
1652 struct sq_psn_search_ext *psns_ext;
1653 struct sq_psn_search *psns;
1654 u32 flg_npsn;
1655 u32 op_spsn;
1656
1657 if (!swq->psn_search)
1658 return;
1659 /* Handle MSN differently on cap flags */
1660 if (qp->is_host_msn_tbl) {
1661 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1662 return;
1663 }
1664 psns = (struct sq_psn_search *)swq->psn_search;
1665 psns = swq->psn_search;
1666 psns_ext = swq->psn_ext;
1667
1668 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1669 SQ_PSN_SEARCH_START_PSN_MASK);
1670 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1671 SQ_PSN_SEARCH_OPCODE_MASK);
1672 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1673 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1674
1675 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1676 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1677 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1678 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1679 } else {
1680 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1681 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1682 }
1683 }
1684
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1685 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1686 struct bnxt_qplib_swqe *wqe,
1687 u16 *idx)
1688 {
1689 struct bnxt_qplib_hwq *hwq;
1690 int len, t_len, offt;
1691 bool pull_dst = true;
1692 void *il_dst = NULL;
1693 void *il_src = NULL;
1694 int t_cplen, cplen;
1695 int indx;
1696
1697 hwq = &qp->sq.hwq;
1698 t_len = 0;
1699 for (indx = 0; indx < wqe->num_sge; indx++) {
1700 len = wqe->sg_list[indx].size;
1701 il_src = (void *)wqe->sg_list[indx].addr;
1702 t_len += len;
1703 if (t_len > qp->max_inline_data)
1704 return -ENOMEM;
1705 while (len) {
1706 if (pull_dst) {
1707 pull_dst = false;
1708 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1709 (*idx)++;
1710 t_cplen = 0;
1711 offt = 0;
1712 }
1713 cplen = min_t(int, len, sizeof(struct sq_sge));
1714 cplen = min_t(int, cplen,
1715 (sizeof(struct sq_sge) - offt));
1716 memcpy(il_dst, il_src, cplen);
1717 t_cplen += cplen;
1718 il_src += cplen;
1719 il_dst += cplen;
1720 offt += cplen;
1721 len -= cplen;
1722 if (t_cplen == sizeof(struct sq_sge))
1723 pull_dst = true;
1724 }
1725 }
1726
1727 return t_len;
1728 }
1729
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1730 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1731 struct bnxt_qplib_sge *ssge,
1732 u16 nsge, u16 *idx)
1733 {
1734 struct sq_sge *dsge;
1735 int indx, len = 0;
1736
1737 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1738 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1739 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1740 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1741 dsge->size = cpu_to_le32(ssge[indx].size);
1742 len += ssge[indx].size;
1743 }
1744
1745 return len;
1746 }
1747
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1748 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1749 struct bnxt_qplib_swqe *wqe,
1750 u16 *wqe_sz, u16 *qdf, u8 mode)
1751 {
1752 u32 ilsize, bytes;
1753 u16 nsge;
1754 u16 slot;
1755
1756 nsge = wqe->num_sge;
1757 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1758 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1759 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1760 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1761 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1762 bytes += sizeof(struct sq_send_hdr);
1763 }
1764
1765 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1766 slot = bytes >> 4;
1767 *wqe_sz = slot;
1768 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1769 slot = 8;
1770 return slot;
1771 }
1772
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp * qp,struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq,bool hw_retx)1773 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1774 struct bnxt_qplib_swq *swq, bool hw_retx)
1775 {
1776 struct bnxt_qplib_hwq *hwq;
1777 u32 pg_num, pg_indx;
1778 void *buff;
1779 u32 tail;
1780
1781 hwq = &sq->hwq;
1782 if (!hwq->pad_pg)
1783 return;
1784 tail = swq->slot_idx / sq->dbinfo.max_slot;
1785 if (hw_retx) {
1786 /* For HW retx use qp msn index */
1787 tail = qp->msn;
1788 tail %= qp->msn_tbl_sz;
1789 }
1790 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1791 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1792 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1793 swq->psn_ext = buff;
1794 swq->psn_search = buff;
1795 }
1796
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1797 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1798 {
1799 struct bnxt_qplib_q *sq = &qp->sq;
1800
1801 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1802 }
1803
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1804 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1805 struct bnxt_qplib_swqe *wqe)
1806 {
1807 struct bnxt_qplib_nq_work *nq_work = NULL;
1808 int i, rc = 0, data_len = 0, pkt_num = 0;
1809 struct bnxt_qplib_q *sq = &qp->sq;
1810 struct bnxt_qplib_hwq *hwq;
1811 struct bnxt_qplib_swq *swq;
1812 bool sch_handler = false;
1813 u16 wqe_sz, qdf = 0;
1814 bool msn_update;
1815 void *base_hdr;
1816 void *ext_hdr;
1817 __le32 temp32;
1818 u32 wqe_idx;
1819 u32 slots;
1820 u16 idx;
1821
1822 hwq = &sq->hwq;
1823 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1824 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1825 dev_err(&hwq->pdev->dev,
1826 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1827 qp->id, qp->state);
1828 rc = -EINVAL;
1829 goto done;
1830 }
1831
1832 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1833 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1834 dev_err(&hwq->pdev->dev,
1835 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1836 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1837 rc = -ENOMEM;
1838 goto done;
1839 }
1840
1841 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1842 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1843
1844 idx = 0;
1845 swq->slot_idx = hwq->prod;
1846 swq->slots = slots;
1847 swq->wr_id = wqe->wr_id;
1848 swq->type = wqe->type;
1849 swq->flags = wqe->flags;
1850 swq->start_psn = sq->psn & BTH_PSN_MASK;
1851 if (qp->sig_type)
1852 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1853
1854 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1855 sch_handler = true;
1856 dev_dbg(&hwq->pdev->dev,
1857 "%s Error QP. Scheduling for poll_cq\n", __func__);
1858 goto queue_err;
1859 }
1860
1861 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1862 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1863 memset(base_hdr, 0, sizeof(struct sq_sge));
1864 memset(ext_hdr, 0, sizeof(struct sq_sge));
1865
1866 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1867 /* Copy the inline data */
1868 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1869 else
1870 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1871 &idx);
1872 if (data_len < 0)
1873 goto queue_err;
1874 /* Make sure we update MSN table only for wired wqes */
1875 msn_update = true;
1876 /* Specifics */
1877 switch (wqe->type) {
1878 case BNXT_QPLIB_SWQE_TYPE_SEND:
1879 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1880 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1881 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1882 /* Assemble info for Raw Ethertype QPs */
1883
1884 sqe->wqe_type = wqe->type;
1885 sqe->flags = wqe->flags;
1886 sqe->wqe_size = wqe_sz;
1887 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1888 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1889 sqe->length = cpu_to_le32(data_len);
1890 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1891 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1892 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1893
1894 break;
1895 }
1896 fallthrough;
1897 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1898 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1899 {
1900 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1901 struct sq_send_hdr *sqe = base_hdr;
1902
1903 sqe->wqe_type = wqe->type;
1904 sqe->flags = wqe->flags;
1905 sqe->wqe_size = wqe_sz;
1906 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1907 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1908 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1909 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1910 sqe->length = cpu_to_le32(data_len);
1911 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1912 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1913 SQ_SEND_DST_QP_MASK);
1914 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1915 SQ_SEND_AVID_MASK);
1916 msn_update = false;
1917 } else {
1918 sqe->length = cpu_to_le32(data_len);
1919 if (qp->mtu)
1920 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1921 if (!pkt_num)
1922 pkt_num = 1;
1923 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1924 }
1925 break;
1926 }
1927 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1928 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1929 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1930 {
1931 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1932 struct sq_rdma_hdr *sqe = base_hdr;
1933
1934 sqe->wqe_type = wqe->type;
1935 sqe->flags = wqe->flags;
1936 sqe->wqe_size = wqe_sz;
1937 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1938 sqe->length = cpu_to_le32((u32)data_len);
1939 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1940 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1941 if (qp->mtu)
1942 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1943 if (!pkt_num)
1944 pkt_num = 1;
1945 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1946 break;
1947 }
1948 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1949 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1950 {
1951 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1952 struct sq_atomic_hdr *sqe = base_hdr;
1953
1954 sqe->wqe_type = wqe->type;
1955 sqe->flags = wqe->flags;
1956 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1957 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1958 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1959 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1960 if (qp->mtu)
1961 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1962 if (!pkt_num)
1963 pkt_num = 1;
1964 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1965 break;
1966 }
1967 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1968 {
1969 struct sq_localinvalidate *sqe = base_hdr;
1970
1971 sqe->wqe_type = wqe->type;
1972 sqe->flags = wqe->flags;
1973 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1974 msn_update = false;
1975 break;
1976 }
1977 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1978 {
1979 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1980 struct sq_fr_pmr_hdr *sqe = base_hdr;
1981
1982 sqe->wqe_type = wqe->type;
1983 sqe->flags = wqe->flags;
1984 sqe->access_cntl = wqe->frmr.access_cntl |
1985 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1986 sqe->zero_based_page_size_log =
1987 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1988 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1989 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1990 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1991 temp32 = cpu_to_le32(wqe->frmr.length);
1992 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1993 sqe->numlevels_pbl_page_size_log =
1994 ((wqe->frmr.pbl_pg_sz_log <<
1995 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1996 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1997 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1998 SQ_FR_PMR_NUMLEVELS_MASK);
1999
2000 for (i = 0; i < wqe->frmr.page_list_len; i++)
2001 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
2002 wqe->frmr.page_list[i] |
2003 PTU_PTE_VALID);
2004 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
2005 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
2006 msn_update = false;
2007
2008 break;
2009 }
2010 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2011 {
2012 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2013 struct sq_bind_hdr *sqe = base_hdr;
2014
2015 sqe->wqe_type = wqe->type;
2016 sqe->flags = wqe->flags;
2017 sqe->access_cntl = wqe->bind.access_cntl;
2018 sqe->mw_type_zero_based = wqe->bind.mw_type |
2019 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2020 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2021 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2022 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2023 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2024 msn_update = false;
2025 break;
2026 }
2027 default:
2028 /* Bad wqe, return error */
2029 rc = -EINVAL;
2030 goto done;
2031 }
2032 if (!qp->is_host_msn_tbl || msn_update) {
2033 swq->next_psn = sq->psn & BTH_PSN_MASK;
2034 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2035 }
2036 queue_err:
2037 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2038 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2039 qp->wqe_cnt++;
2040 done:
2041 if (sch_handler) {
2042 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2043 if (nq_work) {
2044 nq_work->cq = qp->scq;
2045 nq_work->nq = qp->scq->nq;
2046 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2047 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2048 } else {
2049 dev_err(&hwq->pdev->dev,
2050 "FP: Failed to allocate SQ nq_work!\n");
2051 rc = -ENOMEM;
2052 }
2053 }
2054 return rc;
2055 }
2056
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)2057 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2058 {
2059 struct bnxt_qplib_q *rq = &qp->rq;
2060
2061 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2062 }
2063
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)2064 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2065 struct bnxt_qplib_swqe *wqe)
2066 {
2067 struct bnxt_qplib_nq_work *nq_work = NULL;
2068 struct bnxt_qplib_q *rq = &qp->rq;
2069 struct rq_wqe_hdr *base_hdr;
2070 struct rq_ext_hdr *ext_hdr;
2071 struct bnxt_qplib_hwq *hwq;
2072 struct bnxt_qplib_swq *swq;
2073 bool sch_handler = false;
2074 u16 wqe_sz, idx;
2075 u32 wqe_idx;
2076 int rc = 0;
2077
2078 hwq = &rq->hwq;
2079 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2080 dev_err(&hwq->pdev->dev,
2081 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2082 qp->id, qp->state);
2083 rc = -EINVAL;
2084 goto done;
2085 }
2086
2087 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2088 dev_err(&hwq->pdev->dev,
2089 "FP: QP (0x%x) RQ is full!\n", qp->id);
2090 rc = -EINVAL;
2091 goto done;
2092 }
2093
2094 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2095 swq->wr_id = wqe->wr_id;
2096 swq->slots = rq->dbinfo.max_slot;
2097
2098 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2099 sch_handler = true;
2100 dev_dbg(&hwq->pdev->dev,
2101 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2102 goto queue_err;
2103 }
2104
2105 idx = 0;
2106 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2107 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2108 memset(base_hdr, 0, sizeof(struct sq_sge));
2109 memset(ext_hdr, 0, sizeof(struct sq_sge));
2110 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2111 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2112 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2113 if (!wqe->num_sge) {
2114 struct sq_sge *sge;
2115
2116 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2117 sge->size = 0;
2118 wqe_sz++;
2119 }
2120 base_hdr->wqe_type = wqe->type;
2121 base_hdr->flags = wqe->flags;
2122 base_hdr->wqe_size = wqe_sz;
2123 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2124 queue_err:
2125 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2126 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2127 done:
2128 if (sch_handler) {
2129 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2130 if (nq_work) {
2131 nq_work->cq = qp->rcq;
2132 nq_work->nq = qp->rcq->nq;
2133 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2134 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2135 } else {
2136 dev_err(&hwq->pdev->dev,
2137 "FP: Failed to allocate RQ nq_work!\n");
2138 rc = -ENOMEM;
2139 }
2140 }
2141
2142 return rc;
2143 }
2144
2145 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2146 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2147 {
2148 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2149 struct bnxt_qplib_hwq_attr hwq_attr = {};
2150 struct creq_create_cq_resp resp = {};
2151 struct bnxt_qplib_cmdqmsg msg = {};
2152 struct cmdq_create_cq req = {};
2153 struct bnxt_qplib_pbl *pbl;
2154 u32 pg_sz_lvl;
2155 int rc;
2156
2157 if (!cq->dpi) {
2158 dev_err(&rcfw->pdev->dev,
2159 "FP: CREATE_CQ failed due to NULL DPI\n");
2160 return -EINVAL;
2161 }
2162
2163 cq->dbinfo.flags = 0;
2164 hwq_attr.res = res;
2165 hwq_attr.depth = cq->max_wqe;
2166 hwq_attr.stride = sizeof(struct cq_base);
2167 hwq_attr.type = HWQ_TYPE_QUEUE;
2168 hwq_attr.sginfo = &cq->sg_info;
2169 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2170 if (rc)
2171 return rc;
2172
2173 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2174 CMDQ_BASE_OPCODE_CREATE_CQ,
2175 sizeof(req));
2176
2177 req.dpi = cpu_to_le32(cq->dpi->dpi);
2178 req.cq_handle = cpu_to_le64(cq->cq_handle);
2179 req.cq_size = cpu_to_le32(cq->max_wqe);
2180 pbl = &cq->hwq.pbl[PBL_LVL_0];
2181 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2182 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2183 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2184 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2185 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2186 req.cq_fco_cnq_id = cpu_to_le32(
2187 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2188 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2189 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2190 sizeof(resp), 0);
2191 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2192 if (rc)
2193 goto fail;
2194
2195 cq->id = le32_to_cpu(resp.xid);
2196 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2197 init_waitqueue_head(&cq->waitq);
2198 INIT_LIST_HEAD(&cq->sqf_head);
2199 INIT_LIST_HEAD(&cq->rqf_head);
2200 spin_lock_init(&cq->compl_lock);
2201 spin_lock_init(&cq->flush_lock);
2202
2203 cq->dbinfo.hwq = &cq->hwq;
2204 cq->dbinfo.xid = cq->id;
2205 cq->dbinfo.db = cq->dpi->dbr;
2206 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2207 cq->dbinfo.flags = 0;
2208 cq->dbinfo.toggle = 0;
2209
2210 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2211
2212 return 0;
2213
2214 fail:
2215 bnxt_qplib_free_hwq(res, &cq->hwq);
2216 return rc;
2217 }
2218
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2219 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2220 struct bnxt_qplib_cq *cq)
2221 {
2222 bnxt_qplib_free_hwq(res, &cq->hwq);
2223 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2224 /* Reset only the cons bit in the flags */
2225 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2226 }
2227
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2228 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2229 int new_cqes)
2230 {
2231 struct bnxt_qplib_hwq_attr hwq_attr = {};
2232 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2233 struct creq_resize_cq_resp resp = {};
2234 struct bnxt_qplib_cmdqmsg msg = {};
2235 struct cmdq_resize_cq req = {};
2236 struct bnxt_qplib_pbl *pbl;
2237 u32 pg_sz, lvl, new_sz;
2238 int rc;
2239
2240 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2241 CMDQ_BASE_OPCODE_RESIZE_CQ,
2242 sizeof(req));
2243 hwq_attr.sginfo = &cq->sg_info;
2244 hwq_attr.res = res;
2245 hwq_attr.depth = new_cqes;
2246 hwq_attr.stride = sizeof(struct cq_base);
2247 hwq_attr.type = HWQ_TYPE_QUEUE;
2248 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2249 if (rc)
2250 return rc;
2251
2252 req.cq_cid = cpu_to_le32(cq->id);
2253 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2254 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2255 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2256 CMDQ_RESIZE_CQ_LVL_MASK;
2257 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2258 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2259 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2260 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2261
2262 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2263 sizeof(resp), 0);
2264 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2265 return rc;
2266 }
2267
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2268 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2269 {
2270 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2271 struct creq_destroy_cq_resp resp = {};
2272 struct bnxt_qplib_cmdqmsg msg = {};
2273 struct cmdq_destroy_cq req = {};
2274 u16 total_cnq_events;
2275 int rc;
2276
2277 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2278 CMDQ_BASE_OPCODE_DESTROY_CQ,
2279 sizeof(req));
2280
2281 req.cq_cid = cpu_to_le32(cq->id);
2282 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2283 sizeof(resp), 0);
2284 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2285 if (rc)
2286 return rc;
2287 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2288 __wait_for_all_nqes(cq, total_cnq_events);
2289 bnxt_qplib_free_hwq(res, &cq->hwq);
2290 return 0;
2291 }
2292
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2293 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2294 struct bnxt_qplib_cqe **pcqe, int *budget)
2295 {
2296 struct bnxt_qplib_cqe *cqe;
2297 u32 start, last;
2298 int rc = 0;
2299
2300 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2301 start = sq->swq_start;
2302 cqe = *pcqe;
2303 while (*budget) {
2304 last = sq->swq_last;
2305 if (start == last)
2306 break;
2307 /* Skip the FENCE WQE completions */
2308 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2309 bnxt_qplib_cancel_phantom_processing(qp);
2310 goto skip_compl;
2311 }
2312 memset(cqe, 0, sizeof(*cqe));
2313 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2314 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2315 cqe->qp_handle = (u64)(unsigned long)qp;
2316 cqe->wr_id = sq->swq[last].wr_id;
2317 cqe->src_qp = qp->id;
2318 cqe->type = sq->swq[last].type;
2319 cqe++;
2320 (*budget)--;
2321 skip_compl:
2322 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2323 sq->swq[last].slots, &sq->dbinfo.flags);
2324 sq->swq_last = sq->swq[last].next_idx;
2325 }
2326 *pcqe = cqe;
2327 if (!(*budget) && sq->swq_last != start)
2328 /* Out of budget */
2329 rc = -EAGAIN;
2330
2331 return rc;
2332 }
2333
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2334 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2335 struct bnxt_qplib_cqe **pcqe, int *budget)
2336 {
2337 struct bnxt_qplib_cqe *cqe;
2338 u32 start, last;
2339 int opcode = 0;
2340 int rc = 0;
2341
2342 switch (qp->type) {
2343 case CMDQ_CREATE_QP1_TYPE_GSI:
2344 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2345 break;
2346 case CMDQ_CREATE_QP_TYPE_RC:
2347 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2348 break;
2349 case CMDQ_CREATE_QP_TYPE_UD:
2350 case CMDQ_CREATE_QP_TYPE_GSI:
2351 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2352 break;
2353 }
2354
2355 /* Flush the rest of the RQ */
2356 start = rq->swq_start;
2357 cqe = *pcqe;
2358 while (*budget) {
2359 last = rq->swq_last;
2360 if (last == start)
2361 break;
2362 memset(cqe, 0, sizeof(*cqe));
2363 cqe->status =
2364 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2365 cqe->opcode = opcode;
2366 cqe->qp_handle = (unsigned long)qp;
2367 cqe->wr_id = rq->swq[last].wr_id;
2368 cqe++;
2369 (*budget)--;
2370 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2371 rq->swq[last].slots, &rq->dbinfo.flags);
2372 rq->swq_last = rq->swq[last].next_idx;
2373 }
2374 *pcqe = cqe;
2375 if (!*budget && rq->swq_last != start)
2376 /* Out of budget */
2377 rc = -EAGAIN;
2378
2379 return rc;
2380 }
2381
bnxt_qplib_mark_qp_error(void * qp_handle)2382 void bnxt_qplib_mark_qp_error(void *qp_handle)
2383 {
2384 struct bnxt_qplib_qp *qp = qp_handle;
2385
2386 if (!qp)
2387 return;
2388
2389 /* Must block new posting of SQ and RQ */
2390 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2391 bnxt_qplib_cancel_phantom_processing(qp);
2392 }
2393
2394 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2395 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2396 */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2397 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2398 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2399 {
2400 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2401 struct bnxt_qplib_q *sq = &qp->sq;
2402 struct cq_req *peek_req_hwcqe;
2403 struct bnxt_qplib_qp *peek_qp;
2404 struct bnxt_qplib_q *peek_sq;
2405 struct bnxt_qplib_swq *swq;
2406 struct cq_base *peek_hwcqe;
2407 int i, rc = 0;
2408
2409 /* Normal mode */
2410 /* Check for the psn_search marking before completing */
2411 swq = &sq->swq[swq_last];
2412 if (swq->psn_search &&
2413 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2414 /* Unmark */
2415 swq->psn_search->flags_next_psn = cpu_to_le32
2416 (le32_to_cpu(swq->psn_search->flags_next_psn)
2417 & ~0x80000000);
2418 dev_dbg(&cq->hwq.pdev->dev,
2419 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2420 cq_cons, qp->id, swq_last, cqe_sq_cons);
2421 sq->condition = true;
2422 sq->send_phantom = true;
2423
2424 /* TODO: Only ARM if the previous SQE is ARMALL */
2425 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2426 rc = -EAGAIN;
2427 goto out;
2428 }
2429 if (sq->condition) {
2430 /* Peek at the completions */
2431 peek_flags = cq->dbinfo.flags;
2432 peek_sw_cq_cons = cq_cons;
2433 i = cq->hwq.max_elements;
2434 while (i--) {
2435 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2436 peek_sw_cq_cons, NULL);
2437 /* If the next hwcqe is VALID */
2438 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2439 /*
2440 * The valid test of the entry must be done first before
2441 * reading any further.
2442 */
2443 dma_rmb();
2444 /* If the next hwcqe is a REQ */
2445 if ((peek_hwcqe->cqe_type_toggle &
2446 CQ_BASE_CQE_TYPE_MASK) ==
2447 CQ_BASE_CQE_TYPE_REQ) {
2448 peek_req_hwcqe = (struct cq_req *)
2449 peek_hwcqe;
2450 peek_qp = (struct bnxt_qplib_qp *)
2451 ((unsigned long)
2452 le64_to_cpu
2453 (peek_req_hwcqe->qp_handle));
2454 peek_sq = &peek_qp->sq;
2455 peek_sq_cons_idx =
2456 ((le16_to_cpu(
2457 peek_req_hwcqe->sq_cons_idx)
2458 - 1) % sq->max_wqe);
2459 /* If the hwcqe's sq's wr_id matches */
2460 if (peek_sq == sq &&
2461 sq->swq[peek_sq_cons_idx].wr_id ==
2462 BNXT_QPLIB_FENCE_WRID) {
2463 /*
2464 * Unbreak only if the phantom
2465 * comes back
2466 */
2467 dev_dbg(&cq->hwq.pdev->dev,
2468 "FP: Got Phantom CQE\n");
2469 sq->condition = false;
2470 sq->single = true;
2471 rc = 0;
2472 goto out;
2473 }
2474 }
2475 /* Valid but not the phantom, so keep looping */
2476 } else {
2477 /* Not valid yet, just exit and wait */
2478 rc = -EINVAL;
2479 goto out;
2480 }
2481 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2482 &peek_sw_cq_cons,
2483 1, &peek_flags);
2484 }
2485 dev_err(&cq->hwq.pdev->dev,
2486 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2487 cq_cons, qp->id, swq_last, cqe_sq_cons);
2488 rc = -EINVAL;
2489 }
2490 out:
2491 return rc;
2492 }
2493
bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q * sq,u32 cqe_slot)2494 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2495 {
2496 struct bnxt_qplib_hwq *sq_hwq;
2497 struct bnxt_qplib_swq *swq;
2498 int cqe_sq_cons = -1;
2499 u32 start, last;
2500
2501 sq_hwq = &sq->hwq;
2502
2503 start = sq->swq_start;
2504 last = sq->swq_last;
2505
2506 while (last != start) {
2507 swq = &sq->swq[last];
2508 if (swq->slot_idx == cqe_slot) {
2509 cqe_sq_cons = swq->next_idx;
2510 dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2511 __func__, cqe_sq_cons, cqe_slot);
2512 break;
2513 }
2514
2515 last = swq->next_idx;
2516 }
2517 return cqe_sq_cons;
2518 }
2519
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2520 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2521 struct cq_req *hwcqe,
2522 struct bnxt_qplib_cqe **pcqe, int *budget,
2523 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2524 {
2525 struct bnxt_qplib_swq *swq;
2526 struct bnxt_qplib_cqe *cqe;
2527 u32 cqe_sq_cons, slot_num;
2528 struct bnxt_qplib_qp *qp;
2529 struct bnxt_qplib_q *sq;
2530 int cqe_cons;
2531 int rc = 0;
2532
2533 qp = (struct bnxt_qplib_qp *)((unsigned long)
2534 le64_to_cpu(hwcqe->qp_handle));
2535 if (!qp) {
2536 dev_err(&cq->hwq.pdev->dev,
2537 "FP: Process Req qp is NULL\n");
2538 return -EINVAL;
2539 }
2540 sq = &qp->sq;
2541
2542 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2543 if (qp->sq.flushed) {
2544 dev_dbg(&cq->hwq.pdev->dev,
2545 "%s: QP in Flush QP = %p\n", __func__, qp);
2546 goto done;
2547 }
2548
2549 if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2550 slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2551 cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2552 if (cqe_cons < 0) {
2553 dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2554 __func__, slot_num);
2555 goto done;
2556 }
2557 cqe_sq_cons = cqe_cons;
2558 dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2559 __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2560 }
2561
2562 /* Require to walk the sq's swq to fabricate CQEs for all previously
2563 * signaled SWQEs due to CQE aggregation from the current sq cons
2564 * to the cqe_sq_cons
2565 */
2566 cqe = *pcqe;
2567 while (*budget) {
2568 if (sq->swq_last == cqe_sq_cons)
2569 /* Done */
2570 break;
2571
2572 swq = &sq->swq[sq->swq_last];
2573 memset(cqe, 0, sizeof(*cqe));
2574 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2575 cqe->qp_handle = (u64)(unsigned long)qp;
2576 cqe->src_qp = qp->id;
2577 cqe->wr_id = swq->wr_id;
2578 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2579 goto skip;
2580 cqe->type = swq->type;
2581
2582 /* For the last CQE, check for status. For errors, regardless
2583 * of the request being signaled or not, it must complete with
2584 * the hwcqe error status
2585 */
2586 if (swq->next_idx == cqe_sq_cons &&
2587 hwcqe->status != CQ_REQ_STATUS_OK) {
2588 cqe->status = hwcqe->status;
2589 dev_err(&cq->hwq.pdev->dev,
2590 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2591 sq->swq_last, cqe->wr_id, cqe->status);
2592 cqe++;
2593 (*budget)--;
2594 bnxt_qplib_mark_qp_error(qp);
2595 /* Add qp to flush list of the CQ */
2596 bnxt_qplib_add_flush_qp(qp);
2597 } else {
2598 /* Before we complete, do WA 9060 */
2599 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2600 cqe_sq_cons)) {
2601 *lib_qp = qp;
2602 goto out;
2603 }
2604 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2605 cqe->status = CQ_REQ_STATUS_OK;
2606 cqe++;
2607 (*budget)--;
2608 }
2609 }
2610 skip:
2611 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2612 swq->slots, &sq->dbinfo.flags);
2613 sq->swq_last = swq->next_idx;
2614 if (sq->single)
2615 break;
2616 }
2617 out:
2618 *pcqe = cqe;
2619 if (sq->swq_last != cqe_sq_cons) {
2620 /* Out of budget */
2621 rc = -EAGAIN;
2622 goto done;
2623 }
2624 /*
2625 * Back to normal completion mode only after it has completed all of
2626 * the WC for this CQE
2627 */
2628 sq->single = false;
2629 done:
2630 return rc;
2631 }
2632
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2633 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2634 {
2635 spin_lock(&srq->hwq.lock);
2636 srq->swq[srq->last_idx].next_idx = (int)tag;
2637 srq->last_idx = (int)tag;
2638 srq->swq[srq->last_idx].next_idx = -1;
2639 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2640 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2641 spin_unlock(&srq->hwq.lock);
2642 }
2643
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2644 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2645 struct cq_res_rc *hwcqe,
2646 struct bnxt_qplib_cqe **pcqe,
2647 int *budget)
2648 {
2649 struct bnxt_qplib_srq *srq;
2650 struct bnxt_qplib_cqe *cqe;
2651 struct bnxt_qplib_qp *qp;
2652 struct bnxt_qplib_q *rq;
2653 u32 wr_id_idx;
2654
2655 qp = (struct bnxt_qplib_qp *)((unsigned long)
2656 le64_to_cpu(hwcqe->qp_handle));
2657 if (!qp) {
2658 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2659 return -EINVAL;
2660 }
2661 if (qp->rq.flushed) {
2662 dev_dbg(&cq->hwq.pdev->dev,
2663 "%s: QP in Flush QP = %p\n", __func__, qp);
2664 return 0;
2665 }
2666
2667 cqe = *pcqe;
2668 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2669 cqe->length = le32_to_cpu(hwcqe->length);
2670 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2671 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2672 cqe->flags = le16_to_cpu(hwcqe->flags);
2673 cqe->status = hwcqe->status;
2674 cqe->qp_handle = (u64)(unsigned long)qp;
2675
2676 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2677 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2678 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2679 srq = qp->srq;
2680 if (!srq)
2681 return -EINVAL;
2682 if (wr_id_idx >= srq->hwq.max_elements) {
2683 dev_err(&cq->hwq.pdev->dev,
2684 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2685 wr_id_idx, srq->hwq.max_elements);
2686 return -EINVAL;
2687 }
2688 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2689 bnxt_qplib_release_srqe(srq, wr_id_idx);
2690 cqe++;
2691 (*budget)--;
2692 *pcqe = cqe;
2693 } else {
2694 struct bnxt_qplib_swq *swq;
2695
2696 rq = &qp->rq;
2697 if (wr_id_idx > (rq->max_wqe - 1)) {
2698 dev_err(&cq->hwq.pdev->dev,
2699 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2700 wr_id_idx, rq->max_wqe);
2701 return -EINVAL;
2702 }
2703 if (wr_id_idx != rq->swq_last)
2704 return -EINVAL;
2705 swq = &rq->swq[rq->swq_last];
2706 cqe->wr_id = swq->wr_id;
2707 cqe++;
2708 (*budget)--;
2709 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2710 swq->slots, &rq->dbinfo.flags);
2711 rq->swq_last = swq->next_idx;
2712 *pcqe = cqe;
2713
2714 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2715 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2716 /* Add qp to flush list of the CQ */
2717 bnxt_qplib_add_flush_qp(qp);
2718 }
2719 }
2720
2721 return 0;
2722 }
2723
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2724 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2725 struct cq_res_ud *hwcqe,
2726 struct bnxt_qplib_cqe **pcqe,
2727 int *budget)
2728 {
2729 struct bnxt_qplib_srq *srq;
2730 struct bnxt_qplib_cqe *cqe;
2731 struct bnxt_qplib_qp *qp;
2732 struct bnxt_qplib_q *rq;
2733 u32 wr_id_idx;
2734
2735 qp = (struct bnxt_qplib_qp *)((unsigned long)
2736 le64_to_cpu(hwcqe->qp_handle));
2737 if (!qp) {
2738 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2739 return -EINVAL;
2740 }
2741 if (qp->rq.flushed) {
2742 dev_dbg(&cq->hwq.pdev->dev,
2743 "%s: QP in Flush QP = %p\n", __func__, qp);
2744 return 0;
2745 }
2746 cqe = *pcqe;
2747 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2748 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2749 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2750 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2751 cqe->flags = le16_to_cpu(hwcqe->flags);
2752 cqe->status = hwcqe->status;
2753 cqe->qp_handle = (u64)(unsigned long)qp;
2754 /*FIXME: Endianness fix needed for smace */
2755 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2756 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2757 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2758 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2759 ((le32_to_cpu(
2760 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2761 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2762
2763 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2764 srq = qp->srq;
2765 if (!srq)
2766 return -EINVAL;
2767
2768 if (wr_id_idx >= srq->hwq.max_elements) {
2769 dev_err(&cq->hwq.pdev->dev,
2770 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2771 wr_id_idx, srq->hwq.max_elements);
2772 return -EINVAL;
2773 }
2774 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2775 bnxt_qplib_release_srqe(srq, wr_id_idx);
2776 cqe++;
2777 (*budget)--;
2778 *pcqe = cqe;
2779 } else {
2780 struct bnxt_qplib_swq *swq;
2781
2782 rq = &qp->rq;
2783 if (wr_id_idx > (rq->max_wqe - 1)) {
2784 dev_err(&cq->hwq.pdev->dev,
2785 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2786 wr_id_idx, rq->max_wqe);
2787 return -EINVAL;
2788 }
2789
2790 if (rq->swq_last != wr_id_idx)
2791 return -EINVAL;
2792 swq = &rq->swq[rq->swq_last];
2793 cqe->wr_id = swq->wr_id;
2794 cqe++;
2795 (*budget)--;
2796 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2797 swq->slots, &rq->dbinfo.flags);
2798 rq->swq_last = swq->next_idx;
2799 *pcqe = cqe;
2800
2801 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2802 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2803 /* Add qp to flush list of the CQ */
2804 bnxt_qplib_add_flush_qp(qp);
2805 }
2806 }
2807
2808 return 0;
2809 }
2810
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2811 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2812 {
2813 struct cq_base *hw_cqe;
2814 bool rc = true;
2815
2816 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2817 /* Check for Valid bit. If the CQE is valid, return false */
2818 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2819 return rc;
2820 }
2821
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2822 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2823 struct cq_res_raweth_qp1 *hwcqe,
2824 struct bnxt_qplib_cqe **pcqe,
2825 int *budget)
2826 {
2827 struct bnxt_qplib_qp *qp;
2828 struct bnxt_qplib_q *rq;
2829 struct bnxt_qplib_srq *srq;
2830 struct bnxt_qplib_cqe *cqe;
2831 u32 wr_id_idx;
2832
2833 qp = (struct bnxt_qplib_qp *)((unsigned long)
2834 le64_to_cpu(hwcqe->qp_handle));
2835 if (!qp) {
2836 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2837 return -EINVAL;
2838 }
2839 if (qp->rq.flushed) {
2840 dev_dbg(&cq->hwq.pdev->dev,
2841 "%s: QP in Flush QP = %p\n", __func__, qp);
2842 return 0;
2843 }
2844 cqe = *pcqe;
2845 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2846 cqe->flags = le16_to_cpu(hwcqe->flags);
2847 cqe->qp_handle = (u64)(unsigned long)qp;
2848
2849 wr_id_idx =
2850 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2851 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2852 cqe->src_qp = qp->id;
2853 if (qp->id == 1 && !cqe->length) {
2854 /* Add workaround for the length misdetection */
2855 cqe->length = 296;
2856 } else {
2857 cqe->length = le16_to_cpu(hwcqe->length);
2858 }
2859 cqe->pkey_index = qp->pkey_index;
2860 memcpy(cqe->smac, qp->smac, 6);
2861
2862 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2863 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2864 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2865
2866 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2867 srq = qp->srq;
2868 if (!srq) {
2869 dev_err(&cq->hwq.pdev->dev,
2870 "FP: SRQ used but not defined??\n");
2871 return -EINVAL;
2872 }
2873 if (wr_id_idx >= srq->hwq.max_elements) {
2874 dev_err(&cq->hwq.pdev->dev,
2875 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2876 wr_id_idx, srq->hwq.max_elements);
2877 return -EINVAL;
2878 }
2879 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2880 bnxt_qplib_release_srqe(srq, wr_id_idx);
2881 cqe++;
2882 (*budget)--;
2883 *pcqe = cqe;
2884 } else {
2885 struct bnxt_qplib_swq *swq;
2886
2887 rq = &qp->rq;
2888 if (wr_id_idx > (rq->max_wqe - 1)) {
2889 dev_err(&cq->hwq.pdev->dev,
2890 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2891 wr_id_idx, rq->max_wqe);
2892 return -EINVAL;
2893 }
2894 if (rq->swq_last != wr_id_idx)
2895 return -EINVAL;
2896 swq = &rq->swq[rq->swq_last];
2897 cqe->wr_id = swq->wr_id;
2898 cqe++;
2899 (*budget)--;
2900 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2901 swq->slots, &rq->dbinfo.flags);
2902 rq->swq_last = swq->next_idx;
2903 *pcqe = cqe;
2904
2905 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2906 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2907 /* Add qp to flush list of the CQ */
2908 bnxt_qplib_add_flush_qp(qp);
2909 }
2910 }
2911
2912 return 0;
2913 }
2914
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2915 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2916 struct cq_terminal *hwcqe,
2917 struct bnxt_qplib_cqe **pcqe,
2918 int *budget)
2919 {
2920 struct bnxt_qplib_qp *qp;
2921 struct bnxt_qplib_q *sq, *rq;
2922 struct bnxt_qplib_cqe *cqe;
2923 u32 swq_last = 0, cqe_cons;
2924 int rc = 0;
2925
2926 /* Check the Status */
2927 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2928 dev_warn(&cq->hwq.pdev->dev,
2929 "FP: CQ Process Terminal Error status = 0x%x\n",
2930 hwcqe->status);
2931
2932 qp = (struct bnxt_qplib_qp *)((unsigned long)
2933 le64_to_cpu(hwcqe->qp_handle));
2934 if (!qp)
2935 return -EINVAL;
2936
2937 /* Must block new posting of SQ and RQ */
2938 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2939
2940 sq = &qp->sq;
2941 rq = &qp->rq;
2942
2943 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2944 if (cqe_cons == 0xFFFF)
2945 goto do_rq;
2946 cqe_cons %= sq->max_sw_wqe;
2947
2948 if (qp->sq.flushed) {
2949 dev_dbg(&cq->hwq.pdev->dev,
2950 "%s: QP in Flush QP = %p\n", __func__, qp);
2951 goto sq_done;
2952 }
2953
2954 /* Terminal CQE can also include aggregated successful CQEs prior.
2955 * So we must complete all CQEs from the current sq's cons to the
2956 * cq_cons with status OK
2957 */
2958 cqe = *pcqe;
2959 while (*budget) {
2960 swq_last = sq->swq_last;
2961 if (swq_last == cqe_cons)
2962 break;
2963 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2964 memset(cqe, 0, sizeof(*cqe));
2965 cqe->status = CQ_REQ_STATUS_OK;
2966 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2967 cqe->qp_handle = (u64)(unsigned long)qp;
2968 cqe->src_qp = qp->id;
2969 cqe->wr_id = sq->swq[swq_last].wr_id;
2970 cqe->type = sq->swq[swq_last].type;
2971 cqe++;
2972 (*budget)--;
2973 }
2974 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2975 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2976 sq->swq_last = sq->swq[swq_last].next_idx;
2977 }
2978 *pcqe = cqe;
2979 if (!(*budget) && swq_last != cqe_cons) {
2980 /* Out of budget */
2981 rc = -EAGAIN;
2982 goto sq_done;
2983 }
2984 sq_done:
2985 if (rc)
2986 return rc;
2987 do_rq:
2988 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2989 if (cqe_cons == 0xFFFF) {
2990 goto done;
2991 } else if (cqe_cons > rq->max_wqe - 1) {
2992 dev_err(&cq->hwq.pdev->dev,
2993 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2994 cqe_cons, rq->max_wqe);
2995 rc = -EINVAL;
2996 goto done;
2997 }
2998
2999 if (qp->rq.flushed) {
3000 dev_dbg(&cq->hwq.pdev->dev,
3001 "%s: QP in Flush QP = %p\n", __func__, qp);
3002 rc = 0;
3003 goto done;
3004 }
3005
3006 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3007 * from the current rq->cons to the rq->prod regardless what the
3008 * rq->cons the terminal CQE indicates
3009 */
3010
3011 /* Add qp to flush list of the CQ */
3012 bnxt_qplib_add_flush_qp(qp);
3013 done:
3014 return rc;
3015 }
3016
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)3017 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3018 struct cq_cutoff *hwcqe)
3019 {
3020 /* Check the Status */
3021 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3022 dev_err(&cq->hwq.pdev->dev,
3023 "FP: CQ Process Cutoff Error status = 0x%x\n",
3024 hwcqe->status);
3025 return -EINVAL;
3026 }
3027 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3028 wake_up_interruptible(&cq->waitq);
3029
3030 return 0;
3031 }
3032
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)3033 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3034 struct bnxt_qplib_cqe *cqe,
3035 int num_cqes)
3036 {
3037 struct bnxt_qplib_qp *qp = NULL;
3038 u32 budget = num_cqes;
3039 unsigned long flags;
3040
3041 spin_lock_irqsave(&cq->flush_lock, flags);
3042 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3043 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3044 __flush_sq(&qp->sq, qp, &cqe, &budget);
3045 }
3046
3047 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3048 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3049 __flush_rq(&qp->rq, qp, &cqe, &budget);
3050 }
3051 spin_unlock_irqrestore(&cq->flush_lock, flags);
3052
3053 return num_cqes - budget;
3054 }
3055
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)3056 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3057 int num_cqes, struct bnxt_qplib_qp **lib_qp)
3058 {
3059 struct cq_base *hw_cqe;
3060 int budget, rc = 0;
3061 u32 hw_polled = 0;
3062 u8 type;
3063
3064 budget = num_cqes;
3065
3066 while (budget) {
3067 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3068
3069 /* Check for Valid bit */
3070 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3071 break;
3072
3073 /*
3074 * The valid test of the entry must be done first before
3075 * reading any further.
3076 */
3077 dma_rmb();
3078 /* From the device's respective CQE format to qplib_wc*/
3079 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3080 switch (type) {
3081 case CQ_BASE_CQE_TYPE_REQ:
3082 rc = bnxt_qplib_cq_process_req(cq,
3083 (struct cq_req *)hw_cqe,
3084 &cqe, &budget,
3085 cq->hwq.cons, lib_qp);
3086 break;
3087 case CQ_BASE_CQE_TYPE_RES_RC:
3088 rc = bnxt_qplib_cq_process_res_rc(cq,
3089 (struct cq_res_rc *)
3090 hw_cqe, &cqe,
3091 &budget);
3092 break;
3093 case CQ_BASE_CQE_TYPE_RES_UD:
3094 rc = bnxt_qplib_cq_process_res_ud
3095 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3096 &budget);
3097 break;
3098 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3099 rc = bnxt_qplib_cq_process_res_raweth_qp1
3100 (cq, (struct cq_res_raweth_qp1 *)
3101 hw_cqe, &cqe, &budget);
3102 break;
3103 case CQ_BASE_CQE_TYPE_TERMINAL:
3104 rc = bnxt_qplib_cq_process_terminal
3105 (cq, (struct cq_terminal *)hw_cqe,
3106 &cqe, &budget);
3107 break;
3108 case CQ_BASE_CQE_TYPE_CUT_OFF:
3109 bnxt_qplib_cq_process_cutoff
3110 (cq, (struct cq_cutoff *)hw_cqe);
3111 /* Done processing this CQ */
3112 goto exit;
3113 default:
3114 dev_err(&cq->hwq.pdev->dev,
3115 "process_cq unknown type 0x%lx\n",
3116 hw_cqe->cqe_type_toggle &
3117 CQ_BASE_CQE_TYPE_MASK);
3118 rc = -EINVAL;
3119 break;
3120 }
3121 if (rc < 0) {
3122 if (rc == -EAGAIN)
3123 break;
3124 /* Error while processing the CQE, just skip to the
3125 * next one
3126 */
3127 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3128 dev_err(&cq->hwq.pdev->dev,
3129 "process_cqe error rc = 0x%x\n", rc);
3130 }
3131 hw_polled++;
3132 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3133 1, &cq->dbinfo.flags);
3134
3135 }
3136 if (hw_polled)
3137 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3138 exit:
3139 return num_cqes - budget;
3140 }
3141
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3142 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3143 {
3144 cq->dbinfo.toggle = cq->toggle;
3145 if (arm_type)
3146 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3147 /* Using cq->arm_state variable to track whether to issue cq handler */
3148 atomic_set(&cq->arm_state, 1);
3149 }
3150
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3151 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3152 {
3153 flush_workqueue(qp->scq->nq->cqn_wq);
3154 if (qp->scq != qp->rcq)
3155 flush_workqueue(qp->rcq->nq->cqn_wq);
3156 }
3157