1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57
58 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)60 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61 {
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
65 }
66
67 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)68 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69 {
70 struct bnxt_qplib_cq *scq, *rcq;
71
72 scq = qp->scq;
73 rcq = qp->rcq;
74
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
81 }
82 if (!qp->srq) {
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
88 }
89 }
90 }
91
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)92 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 unsigned long *flags)
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95 {
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
99 else
100 spin_lock(&qp->rcq->flush_lock);
101 }
102
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)103 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106 {
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
109 else
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112 }
113
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)114 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115 {
116 unsigned long flags;
117
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
121 }
122
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)123 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124 {
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
128 }
129 if (!qp->srq) {
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
133 }
134 }
135 }
136
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)137 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138 {
139 unsigned long flags;
140
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 qp->rq.hwq.prod = 0;
147 qp->rq.hwq.cons = 0;
148
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
151 }
152
bnxt_qpn_cqn_sched_task(struct work_struct * work)153 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154 {
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
157
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
160
161 if (cq && nq) {
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
166 __func__, cq, nq);
167 nq->cqn_handler(nq, cq);
168 }
169 spin_unlock_bh(&cq->compl_lock);
170 }
171 kfree(nq_work);
172 }
173
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)174 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
176 {
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
179
180 if (qp->rq_hdr_buf)
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 if (qp->sq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
194 }
195
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)196 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
198 {
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
201 int rc = 0;
202
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
208 rc = -ENOMEM;
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
211 goto fail;
212 }
213 }
214
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 rq->max_wqe *
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
220 GFP_KERNEL);
221 if (!qp->rq_hdr_buf) {
222 rc = -ENOMEM;
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
225 goto fail;
226 }
227 }
228 return 0;
229
230 fail:
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
232 return rc;
233 }
234
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)235 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236 {
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
240 uintptr_t q_handle;
241 u16 type;
242
243 spin_lock_bh(&hwq->lock);
244 /* Service the NQ until empty */
245 while (budget--) {
246 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
249 break;
250
251 /*
252 * The valid test of the entry must be done first before
253 * reading any further.
254 */
255 dma_rmb();
256
257 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
258 switch (type) {
259 case NQ_BASE_TYPE_CQ_NOTIFICATION:
260 {
261 struct nq_cn *nqcne = (struct nq_cn *)nqe;
262
263 q_handle = le32_to_cpu(nqcne->cq_handle_low);
264 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
265 << 32;
266 if ((unsigned long)cq == q_handle) {
267 nqcne->cq_handle_low = 0;
268 nqcne->cq_handle_high = 0;
269 cq->cnq_events++;
270 }
271 break;
272 }
273 default:
274 break;
275 }
276 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277 1, &nq->nq_db.dbinfo.flags);
278 }
279 spin_unlock_bh(&hwq->lock);
280 }
281
282 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
283 * this CQ.
284 */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)285 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
286 {
287 u32 retry_cnt = 100;
288
289 while (retry_cnt--) {
290 if (cnq_events == cq->cnq_events)
291 return;
292 usleep_range(50, 100);
293 clean_nq(cq->nq, cq);
294 }
295 }
296
bnxt_qplib_service_nq(struct tasklet_struct * t)297 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
298 {
299 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300 struct bnxt_qplib_hwq *hwq = &nq->hwq;
301 struct bnxt_qplib_cq *cq;
302 int budget = nq->budget;
303 struct nq_base *nqe;
304 uintptr_t q_handle;
305 u32 hw_polled = 0;
306 u16 type;
307
308 spin_lock_bh(&hwq->lock);
309 /* Service the NQ until empty */
310 while (budget--) {
311 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
313 break;
314
315 /*
316 * The valid test of the entry must be done first before
317 * reading any further.
318 */
319 dma_rmb();
320
321 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
322 switch (type) {
323 case NQ_BASE_TYPE_CQ_NOTIFICATION:
324 {
325 struct nq_cn *nqcne = (struct nq_cn *)nqe;
326
327 q_handle = le32_to_cpu(nqcne->cq_handle_low);
328 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
329 << 32;
330 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
331 if (!cq)
332 break;
333 cq->toggle = (le16_to_cpu(nqe->info10_type) &
334 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
335 cq->dbinfo.toggle = cq->toggle;
336 bnxt_qplib_armen_db(&cq->dbinfo,
337 DBC_DBC_TYPE_CQ_ARMENA);
338 spin_lock_bh(&cq->compl_lock);
339 atomic_set(&cq->arm_state, 0);
340 if (nq->cqn_handler(nq, (cq)))
341 dev_warn(&nq->pdev->dev,
342 "cqn - type 0x%x not handled\n", type);
343 cq->cnq_events++;
344 spin_unlock_bh(&cq->compl_lock);
345 break;
346 }
347 case NQ_BASE_TYPE_SRQ_EVENT:
348 {
349 struct bnxt_qplib_srq *srq;
350 struct nq_srq_event *nqsrqe =
351 (struct nq_srq_event *)nqe;
352
353 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
354 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
355 << 32;
356 srq = (struct bnxt_qplib_srq *)q_handle;
357 bnxt_qplib_armen_db(&srq->dbinfo,
358 DBC_DBC_TYPE_SRQ_ARMENA);
359 if (nq->srqn_handler(nq,
360 (struct bnxt_qplib_srq *)q_handle,
361 nqsrqe->event))
362 dev_warn(&nq->pdev->dev,
363 "SRQ event 0x%x not handled\n",
364 nqsrqe->event);
365 break;
366 }
367 case NQ_BASE_TYPE_DBQ_EVENT:
368 break;
369 default:
370 dev_warn(&nq->pdev->dev,
371 "nqe with type = 0x%x not handled\n", type);
372 break;
373 }
374 hw_polled++;
375 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
376 1, &nq->nq_db.dbinfo.flags);
377 }
378 if (hw_polled)
379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 spin_unlock_bh(&hwq->lock);
381 }
382
383 /* bnxt_re_synchronize_nq - self polling notification queue.
384 * @nq - notification queue pointer
385 *
386 * This function will start polling entries of a given notification queue
387 * for all pending entries.
388 * This function is useful to synchronize notification entries while resources
389 * are going away.
390 */
391
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)392 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
393 {
394 int budget = nq->budget;
395
396 nq->budget = nq->hwq.max_elements;
397 bnxt_qplib_service_nq(&nq->nq_tasklet);
398 nq->budget = budget;
399 }
400
bnxt_qplib_nq_irq(int irq,void * dev_instance)401 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
402 {
403 struct bnxt_qplib_nq *nq = dev_instance;
404 struct bnxt_qplib_hwq *hwq = &nq->hwq;
405 u32 sw_cons;
406
407 /* Prefetch the NQ element */
408 sw_cons = HWQ_CMP(hwq->cons, hwq);
409 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
410
411 /* Fan out to CPU affinitized kthreads? */
412 tasklet_schedule(&nq->nq_tasklet);
413
414 return IRQ_HANDLED;
415 }
416
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)417 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
418 {
419 if (!nq->requested)
420 return;
421
422 nq->requested = false;
423 /* Mask h/w interrupt */
424 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
425 /* Sync with last running IRQ handler */
426 synchronize_irq(nq->msix_vec);
427 irq_set_affinity_hint(nq->msix_vec, NULL);
428 free_irq(nq->msix_vec, nq);
429 kfree(nq->name);
430 nq->name = NULL;
431
432 if (kill)
433 tasklet_kill(&nq->nq_tasklet);
434 tasklet_disable(&nq->nq_tasklet);
435 }
436
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)437 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
438 {
439 if (nq->cqn_wq) {
440 destroy_workqueue(nq->cqn_wq);
441 nq->cqn_wq = NULL;
442 }
443
444 /* Make sure the HW is stopped! */
445 bnxt_qplib_nq_stop_irq(nq, true);
446
447 if (nq->nq_db.reg.bar_reg) {
448 iounmap(nq->nq_db.reg.bar_reg);
449 nq->nq_db.reg.bar_reg = NULL;
450 }
451
452 nq->cqn_handler = NULL;
453 nq->srqn_handler = NULL;
454 nq->msix_vec = 0;
455 }
456
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)457 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
458 int msix_vector, bool need_init)
459 {
460 struct bnxt_qplib_res *res = nq->res;
461 int rc;
462
463 if (nq->requested)
464 return -EFAULT;
465
466 nq->msix_vec = msix_vector;
467 if (need_init)
468 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
469 else
470 tasklet_enable(&nq->nq_tasklet);
471
472 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
473 nq_indx, pci_name(res->pdev));
474 if (!nq->name)
475 return -ENOMEM;
476 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
477 if (rc) {
478 kfree(nq->name);
479 nq->name = NULL;
480 tasklet_disable(&nq->nq_tasklet);
481 return rc;
482 }
483
484 cpumask_clear(&nq->mask);
485 cpumask_set_cpu(nq_indx, &nq->mask);
486 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
487 if (rc) {
488 dev_warn(&nq->pdev->dev,
489 "set affinity failed; vector: %d nq_idx: %d\n",
490 nq->msix_vec, nq_indx);
491 }
492 nq->requested = true;
493 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
494
495 return rc;
496 }
497
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)498 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
499 {
500 resource_size_t reg_base;
501 struct bnxt_qplib_nq_db *nq_db;
502 struct pci_dev *pdev;
503
504 pdev = nq->pdev;
505 nq_db = &nq->nq_db;
506
507 nq_db->dbinfo.flags = 0;
508 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510 if (!nq_db->reg.bar_base) {
511 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
512 nq_db->reg.bar_id);
513 return -ENOMEM;
514 }
515
516 reg_base = nq_db->reg.bar_base + reg_offt;
517 /* Unconditionally map 8 bytes to support 57500 series */
518 nq_db->reg.len = 8;
519 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520 if (!nq_db->reg.bar_reg) {
521 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
522 nq_db->reg.bar_id);
523 return -ENOMEM;
524 }
525
526 nq_db->dbinfo.db = nq_db->reg.bar_reg;
527 nq_db->dbinfo.hwq = &nq->hwq;
528 nq_db->dbinfo.xid = nq->ring_id;
529
530 return 0;
531 }
532
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)533 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534 int nq_idx, int msix_vector, int bar_reg_offset,
535 cqn_handler_t cqn_handler,
536 srqn_handler_t srqn_handler)
537 {
538 int rc;
539
540 nq->pdev = pdev;
541 nq->cqn_handler = cqn_handler;
542 nq->srqn_handler = srqn_handler;
543
544 /* Have a task to schedule CQ notifiers in post send case */
545 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
546 if (!nq->cqn_wq)
547 return -ENOMEM;
548
549 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
550 if (rc)
551 goto fail;
552
553 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
554 if (rc) {
555 dev_err(&nq->pdev->dev,
556 "Failed to request irq for nq-idx %d\n", nq_idx);
557 goto fail;
558 }
559
560 return 0;
561 fail:
562 bnxt_qplib_disable_nq(nq);
563 return rc;
564 }
565
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)566 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
567 {
568 if (nq->hwq.max_elements) {
569 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570 nq->hwq.max_elements = 0;
571 }
572 }
573
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)574 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
575 {
576 struct bnxt_qplib_hwq_attr hwq_attr = {};
577 struct bnxt_qplib_sg_info sginfo = {};
578
579 nq->pdev = res->pdev;
580 nq->res = res;
581 if (!nq->hwq.max_elements ||
582 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
584
585 sginfo.pgsize = PAGE_SIZE;
586 sginfo.pgshft = PAGE_SHIFT;
587 hwq_attr.res = res;
588 hwq_attr.sginfo = &sginfo;
589 hwq_attr.depth = nq->hwq.max_elements;
590 hwq_attr.stride = sizeof(struct nq_base);
591 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
594 return -ENOMEM;
595 }
596 nq->budget = 8;
597 return 0;
598 }
599
600 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)601 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602 struct bnxt_qplib_srq *srq)
603 {
604 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 struct creq_destroy_srq_resp resp = {};
606 struct bnxt_qplib_cmdqmsg msg = {};
607 struct cmdq_destroy_srq req = {};
608 int rc;
609
610 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611 CMDQ_BASE_OPCODE_DESTROY_SRQ,
612 sizeof(req));
613
614 /* Configure the request */
615 req.srq_cid = cpu_to_le32(srq->id);
616
617 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
619 kfree(srq->swq);
620 if (rc)
621 return;
622 bnxt_qplib_free_hwq(res, &srq->hwq);
623 }
624
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)625 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626 struct bnxt_qplib_srq *srq)
627 {
628 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629 struct bnxt_qplib_hwq_attr hwq_attr = {};
630 struct creq_create_srq_resp resp = {};
631 struct bnxt_qplib_cmdqmsg msg = {};
632 struct cmdq_create_srq req = {};
633 struct bnxt_qplib_pbl *pbl;
634 u16 pg_sz_lvl;
635 int rc, idx;
636
637 hwq_attr.res = res;
638 hwq_attr.sginfo = &srq->sg_info;
639 hwq_attr.depth = srq->max_wqe;
640 hwq_attr.stride = srq->wqe_size;
641 hwq_attr.type = HWQ_TYPE_QUEUE;
642 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
643 if (rc)
644 return rc;
645
646 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
647 GFP_KERNEL);
648 if (!srq->swq) {
649 rc = -ENOMEM;
650 goto fail;
651 }
652 srq->dbinfo.flags = 0;
653 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654 CMDQ_BASE_OPCODE_CREATE_SRQ,
655 sizeof(req));
656
657 /* Configure the request */
658 req.dpi = cpu_to_le32(srq->dpi->dpi);
659 req.srq_handle = cpu_to_le64((uintptr_t)srq);
660
661 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662 pbl = &srq->hwq.pbl[PBL_LVL_0];
663 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666 CMDQ_CREATE_SRQ_LVL_SFT;
667 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669 req.pd_id = cpu_to_le32(srq->pd->id);
670 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
671
672 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
674 if (rc)
675 goto fail;
676
677 spin_lock_init(&srq->lock);
678 srq->start_idx = 0;
679 srq->last_idx = srq->hwq.max_elements - 1;
680 for (idx = 0; idx < srq->hwq.max_elements; idx++)
681 srq->swq[idx].next_idx = idx + 1;
682 srq->swq[srq->last_idx].next_idx = -1;
683
684 srq->id = le32_to_cpu(resp.xid);
685 srq->dbinfo.hwq = &srq->hwq;
686 srq->dbinfo.xid = srq->id;
687 srq->dbinfo.db = srq->dpi->dbr;
688 srq->dbinfo.max_slot = 1;
689 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
690 if (srq->threshold)
691 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692 srq->arm_req = false;
693
694 return 0;
695 fail:
696 bnxt_qplib_free_hwq(res, &srq->hwq);
697 kfree(srq->swq);
698
699 return rc;
700 }
701
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)702 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703 struct bnxt_qplib_srq *srq)
704 {
705 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
706 u32 count;
707
708 count = __bnxt_qplib_get_avail(srq_hwq);
709 if (count > srq->threshold) {
710 srq->arm_req = false;
711 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
712 } else {
713 /* Deferred arming */
714 srq->arm_req = true;
715 }
716
717 return 0;
718 }
719
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)720 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_srq *srq)
722 {
723 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
724 struct creq_query_srq_resp resp = {};
725 struct bnxt_qplib_cmdqmsg msg = {};
726 struct bnxt_qplib_rcfw_sbuf sbuf;
727 struct creq_query_srq_resp_sb *sb;
728 struct cmdq_query_srq req = {};
729 int rc;
730
731 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
732 CMDQ_BASE_OPCODE_QUERY_SRQ,
733 sizeof(req));
734
735 /* Configure the request */
736 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
737 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
738 &sbuf.dma_addr, GFP_KERNEL);
739 if (!sbuf.sb)
740 return -ENOMEM;
741 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
742 req.srq_cid = cpu_to_le32(srq->id);
743 sb = sbuf.sb;
744 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
745 sizeof(resp), 0);
746 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
747 if (!rc)
748 srq->threshold = le16_to_cpu(sb->srq_limit);
749 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
750 sbuf.sb, sbuf.dma_addr);
751
752 return rc;
753 }
754
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)755 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
756 struct bnxt_qplib_swqe *wqe)
757 {
758 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
759 struct rq_wqe *srqe;
760 struct sq_sge *hw_sge;
761 u32 count = 0;
762 int i, next;
763
764 spin_lock(&srq_hwq->lock);
765 if (srq->start_idx == srq->last_idx) {
766 dev_err(&srq_hwq->pdev->dev,
767 "FP: SRQ (0x%x) is full!\n", srq->id);
768 spin_unlock(&srq_hwq->lock);
769 return -EINVAL;
770 }
771 next = srq->start_idx;
772 srq->start_idx = srq->swq[next].next_idx;
773 spin_unlock(&srq_hwq->lock);
774
775 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
776 memset(srqe, 0, srq->wqe_size);
777 /* Calculate wqe_size16 and data_len */
778 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
779 i < wqe->num_sge; i++, hw_sge++) {
780 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
781 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
782 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
783 }
784 srqe->wqe_type = wqe->type;
785 srqe->flags = wqe->flags;
786 srqe->wqe_size = wqe->num_sge +
787 ((offsetof(typeof(*srqe), data) + 15) >> 4);
788 srqe->wr_id[0] = cpu_to_le32((u32)next);
789 srq->swq[next].wr_id = wqe->wr_id;
790
791 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
792
793 spin_lock(&srq_hwq->lock);
794 count = __bnxt_qplib_get_avail(srq_hwq);
795 spin_unlock(&srq_hwq->lock);
796 /* Ring DB */
797 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
798 if (srq->arm_req == true && count > srq->threshold) {
799 srq->arm_req = false;
800 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
801 }
802
803 return 0;
804 }
805
806 /* QP */
807
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)808 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
809 {
810 int indx;
811
812 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
813 if (!que->swq)
814 return -ENOMEM;
815
816 que->swq_start = 0;
817 que->swq_last = que->max_wqe - 1;
818 for (indx = 0; indx < que->max_wqe; indx++)
819 que->swq[indx].next_idx = indx + 1;
820 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
821 que->swq_last = 0;
822
823 return 0;
824 }
825
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)826 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
827 {
828 struct bnxt_qplib_hwq_attr hwq_attr = {};
829 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
830 struct creq_create_qp1_resp resp = {};
831 struct bnxt_qplib_cmdqmsg msg = {};
832 struct bnxt_qplib_q *sq = &qp->sq;
833 struct bnxt_qplib_q *rq = &qp->rq;
834 struct cmdq_create_qp1 req = {};
835 struct bnxt_qplib_pbl *pbl;
836 u32 qp_flags = 0;
837 u8 pg_sz_lvl;
838 u32 tbl_indx;
839 int rc;
840
841 sq->dbinfo.flags = 0;
842 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
843 CMDQ_BASE_OPCODE_CREATE_QP1,
844 sizeof(req));
845 /* General */
846 req.type = qp->type;
847 req.dpi = cpu_to_le32(qp->dpi->dpi);
848 req.qp_handle = cpu_to_le64(qp->qp_handle);
849
850 /* SQ */
851 hwq_attr.res = res;
852 hwq_attr.sginfo = &sq->sg_info;
853 hwq_attr.stride = sizeof(struct sq_sge);
854 hwq_attr.depth = bnxt_qplib_get_depth(sq);
855 hwq_attr.type = HWQ_TYPE_QUEUE;
856 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
857 if (rc)
858 return rc;
859
860 rc = bnxt_qplib_alloc_init_swq(sq);
861 if (rc)
862 goto fail_sq;
863
864 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
865 pbl = &sq->hwq.pbl[PBL_LVL_0];
866 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
867 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
868 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
869 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
870 req.sq_pg_size_sq_lvl = pg_sz_lvl;
871 req.sq_fwo_sq_sge =
872 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
873 CMDQ_CREATE_QP1_SQ_SGE_SFT);
874 req.scq_cid = cpu_to_le32(qp->scq->id);
875
876 /* RQ */
877 if (rq->max_wqe) {
878 rq->dbinfo.flags = 0;
879 hwq_attr.res = res;
880 hwq_attr.sginfo = &rq->sg_info;
881 hwq_attr.stride = sizeof(struct sq_sge);
882 hwq_attr.depth = bnxt_qplib_get_depth(rq);
883 hwq_attr.type = HWQ_TYPE_QUEUE;
884 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
885 if (rc)
886 goto sq_swq;
887 rc = bnxt_qplib_alloc_init_swq(rq);
888 if (rc)
889 goto fail_rq;
890 req.rq_size = cpu_to_le32(rq->max_wqe);
891 pbl = &rq->hwq.pbl[PBL_LVL_0];
892 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
893 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
894 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
895 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
896 req.rq_pg_size_rq_lvl = pg_sz_lvl;
897 req.rq_fwo_rq_sge =
898 cpu_to_le16((rq->max_sge &
899 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
900 CMDQ_CREATE_QP1_RQ_SGE_SFT);
901 }
902 req.rcq_cid = cpu_to_le32(qp->rcq->id);
903 /* Header buffer - allow hdr_buf pass in */
904 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
905 if (rc) {
906 rc = -ENOMEM;
907 goto rq_rwq;
908 }
909 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
910 req.qp_flags = cpu_to_le32(qp_flags);
911 req.pd_id = cpu_to_le32(qp->pd->id);
912
913 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
914 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
915 if (rc)
916 goto fail;
917
918 qp->id = le32_to_cpu(resp.xid);
919 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
920 qp->cctx = res->cctx;
921 sq->dbinfo.hwq = &sq->hwq;
922 sq->dbinfo.xid = qp->id;
923 sq->dbinfo.db = qp->dpi->dbr;
924 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
925 if (rq->max_wqe) {
926 rq->dbinfo.hwq = &rq->hwq;
927 rq->dbinfo.xid = qp->id;
928 rq->dbinfo.db = qp->dpi->dbr;
929 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
930 }
931 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
932 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
933 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
934
935 return 0;
936
937 fail:
938 bnxt_qplib_free_qp_hdr_buf(res, qp);
939 rq_rwq:
940 kfree(rq->swq);
941 fail_rq:
942 bnxt_qplib_free_hwq(res, &rq->hwq);
943 sq_swq:
944 kfree(sq->swq);
945 fail_sq:
946 bnxt_qplib_free_hwq(res, &sq->hwq);
947 return rc;
948 }
949
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)950 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
951 {
952 struct bnxt_qplib_hwq *hwq;
953 struct bnxt_qplib_q *sq;
954 u64 fpsne, psn_pg;
955 u16 indx_pad = 0;
956
957 sq = &qp->sq;
958 hwq = &sq->hwq;
959 /* First psn entry */
960 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
961 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
962 indx_pad = (fpsne & ~PAGE_MASK) / size;
963 hwq->pad_pgofft = indx_pad;
964 hwq->pad_pg = (u64 *)psn_pg;
965 hwq->pad_stride = size;
966 }
967
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)968 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
969 {
970 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
971 struct bnxt_qplib_hwq_attr hwq_attr = {};
972 struct bnxt_qplib_sg_info sginfo = {};
973 struct creq_create_qp_resp resp = {};
974 struct bnxt_qplib_cmdqmsg msg = {};
975 struct bnxt_qplib_q *sq = &qp->sq;
976 struct bnxt_qplib_q *rq = &qp->rq;
977 struct cmdq_create_qp req = {};
978 int rc, req_size, psn_sz = 0;
979 struct bnxt_qplib_hwq *xrrq;
980 struct bnxt_qplib_pbl *pbl;
981 u32 qp_flags = 0;
982 u8 pg_sz_lvl;
983 u32 tbl_indx;
984 u16 nsge;
985
986 if (res->dattr)
987 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
988
989 sq->dbinfo.flags = 0;
990 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
991 CMDQ_BASE_OPCODE_CREATE_QP,
992 sizeof(req));
993
994 /* General */
995 req.type = qp->type;
996 req.dpi = cpu_to_le32(qp->dpi->dpi);
997 req.qp_handle = cpu_to_le64(qp->qp_handle);
998
999 /* SQ */
1000 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1001 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1002 sizeof(struct sq_psn_search_ext) :
1003 sizeof(struct sq_psn_search);
1004
1005 if (qp->is_host_msn_tbl) {
1006 psn_sz = sizeof(struct sq_msn_search);
1007 qp->msn = 0;
1008 }
1009 }
1010
1011 hwq_attr.res = res;
1012 hwq_attr.sginfo = &sq->sg_info;
1013 hwq_attr.stride = sizeof(struct sq_sge);
1014 hwq_attr.depth = bnxt_qplib_get_depth(sq);
1015 hwq_attr.aux_stride = psn_sz;
1016 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1017 : 0;
1018 /* Update msn tbl size */
1019 if (qp->is_host_msn_tbl && psn_sz) {
1020 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1021 qp->msn_tbl_sz = hwq_attr.aux_depth;
1022 qp->msn = 0;
1023 }
1024
1025 hwq_attr.type = HWQ_TYPE_QUEUE;
1026 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1027 if (rc)
1028 return rc;
1029
1030 rc = bnxt_qplib_alloc_init_swq(sq);
1031 if (rc)
1032 goto fail_sq;
1033
1034 if (psn_sz)
1035 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1036
1037 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1038 pbl = &sq->hwq.pbl[PBL_LVL_0];
1039 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1040 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1041 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1042 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1043 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1044 req.sq_fwo_sq_sge =
1045 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1046 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1047 req.scq_cid = cpu_to_le32(qp->scq->id);
1048
1049 /* RQ */
1050 if (!qp->srq) {
1051 rq->dbinfo.flags = 0;
1052 hwq_attr.res = res;
1053 hwq_attr.sginfo = &rq->sg_info;
1054 hwq_attr.stride = sizeof(struct sq_sge);
1055 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1056 hwq_attr.aux_stride = 0;
1057 hwq_attr.aux_depth = 0;
1058 hwq_attr.type = HWQ_TYPE_QUEUE;
1059 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1060 if (rc)
1061 goto sq_swq;
1062 rc = bnxt_qplib_alloc_init_swq(rq);
1063 if (rc)
1064 goto fail_rq;
1065
1066 req.rq_size = cpu_to_le32(rq->max_wqe);
1067 pbl = &rq->hwq.pbl[PBL_LVL_0];
1068 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1069 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1070 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1071 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1072 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1073 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1074 6 : rq->max_sge;
1075 req.rq_fwo_rq_sge =
1076 cpu_to_le16(((nsge &
1077 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1078 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1079 } else {
1080 /* SRQ */
1081 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1082 req.srq_cid = cpu_to_le32(qp->srq->id);
1083 }
1084 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1085
1086 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1087 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1088 if (qp->sig_type)
1089 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1090 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1091 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1092 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1093 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1094
1095 req.qp_flags = cpu_to_le32(qp_flags);
1096
1097 /* ORRQ and IRRQ */
1098 if (psn_sz) {
1099 xrrq = &qp->orrq;
1100 xrrq->max_elements =
1101 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1102 req_size = xrrq->max_elements *
1103 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1104 req_size &= ~(PAGE_SIZE - 1);
1105 sginfo.pgsize = req_size;
1106 sginfo.pgshft = PAGE_SHIFT;
1107
1108 hwq_attr.res = res;
1109 hwq_attr.sginfo = &sginfo;
1110 hwq_attr.depth = xrrq->max_elements;
1111 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1112 hwq_attr.aux_stride = 0;
1113 hwq_attr.aux_depth = 0;
1114 hwq_attr.type = HWQ_TYPE_CTX;
1115 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1116 if (rc)
1117 goto rq_swq;
1118 pbl = &xrrq->pbl[PBL_LVL_0];
1119 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1120
1121 xrrq = &qp->irrq;
1122 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1123 qp->max_dest_rd_atomic);
1124 req_size = xrrq->max_elements *
1125 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1126 req_size &= ~(PAGE_SIZE - 1);
1127 sginfo.pgsize = req_size;
1128 hwq_attr.depth = xrrq->max_elements;
1129 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1130 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1131 if (rc)
1132 goto fail_orrq;
1133
1134 pbl = &xrrq->pbl[PBL_LVL_0];
1135 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1136 }
1137 req.pd_id = cpu_to_le32(qp->pd->id);
1138
1139 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1140 sizeof(resp), 0);
1141 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1142 if (rc)
1143 goto fail;
1144
1145 qp->id = le32_to_cpu(resp.xid);
1146 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1147 INIT_LIST_HEAD(&qp->sq_flush);
1148 INIT_LIST_HEAD(&qp->rq_flush);
1149 qp->cctx = res->cctx;
1150 sq->dbinfo.hwq = &sq->hwq;
1151 sq->dbinfo.xid = qp->id;
1152 sq->dbinfo.db = qp->dpi->dbr;
1153 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1154 if (rq->max_wqe) {
1155 rq->dbinfo.hwq = &rq->hwq;
1156 rq->dbinfo.xid = qp->id;
1157 rq->dbinfo.db = qp->dpi->dbr;
1158 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1159 }
1160 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1161 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1162 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1163
1164 return 0;
1165 fail:
1166 bnxt_qplib_free_hwq(res, &qp->irrq);
1167 fail_orrq:
1168 bnxt_qplib_free_hwq(res, &qp->orrq);
1169 rq_swq:
1170 kfree(rq->swq);
1171 fail_rq:
1172 bnxt_qplib_free_hwq(res, &rq->hwq);
1173 sq_swq:
1174 kfree(sq->swq);
1175 fail_sq:
1176 bnxt_qplib_free_hwq(res, &sq->hwq);
1177 return rc;
1178 }
1179
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1180 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1181 {
1182 switch (qp->state) {
1183 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1184 /* INIT->RTR, configure the path_mtu to the default
1185 * 2048 if not being requested
1186 */
1187 if (!(qp->modify_flags &
1188 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1189 qp->modify_flags |=
1190 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1191 qp->path_mtu =
1192 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1193 }
1194 qp->modify_flags &=
1195 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1196 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1197 if (qp->max_dest_rd_atomic < 1)
1198 qp->max_dest_rd_atomic = 1;
1199 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1200 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1201 if (!(qp->modify_flags &
1202 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1203 qp->modify_flags |=
1204 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1205 qp->ah.sgid_index = 0;
1206 }
1207 break;
1208 default:
1209 break;
1210 }
1211 }
1212
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1213 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1214 {
1215 switch (qp->state) {
1216 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1217 /* Bono FW requires the max_rd_atomic to be >= 1 */
1218 if (qp->max_rd_atomic < 1)
1219 qp->max_rd_atomic = 1;
1220 /* Bono FW does not allow PKEY_INDEX,
1221 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1222 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1223 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1224 * modification
1225 */
1226 qp->modify_flags &=
1227 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1231 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1234 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1235 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1236 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1237 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1238 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1239 break;
1240 default:
1241 break;
1242 }
1243 }
1244
__filter_modify_flags(struct bnxt_qplib_qp * qp)1245 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1246 {
1247 switch (qp->cur_qp_state) {
1248 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1249 break;
1250 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1251 __modify_flags_from_init_state(qp);
1252 break;
1253 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1254 __modify_flags_from_rtr_state(qp);
1255 break;
1256 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1257 break;
1258 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1259 break;
1260 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1261 break;
1262 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1263 break;
1264 default:
1265 break;
1266 }
1267 }
1268
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1269 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1270 {
1271 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1272 struct creq_modify_qp_resp resp = {};
1273 struct bnxt_qplib_cmdqmsg msg = {};
1274 struct cmdq_modify_qp req = {};
1275 u32 temp32[4];
1276 u32 bmask;
1277 int rc;
1278
1279 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1280 CMDQ_BASE_OPCODE_MODIFY_QP,
1281 sizeof(req));
1282
1283 /* Filter out the qp_attr_mask based on the state->new transition */
1284 __filter_modify_flags(qp);
1285 bmask = qp->modify_flags;
1286 req.modify_mask = cpu_to_le32(qp->modify_flags);
1287 req.qp_cid = cpu_to_le32(qp->id);
1288 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1289 req.network_type_en_sqd_async_notify_new_state =
1290 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1291 (qp->en_sqd_async_notify ?
1292 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1293 }
1294 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1295
1296 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1297 req.access = qp->access;
1298
1299 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1300 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1301
1302 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1303 req.qkey = cpu_to_le32(qp->qkey);
1304
1305 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1306 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1307 req.dgid[0] = cpu_to_le32(temp32[0]);
1308 req.dgid[1] = cpu_to_le32(temp32[1]);
1309 req.dgid[2] = cpu_to_le32(temp32[2]);
1310 req.dgid[3] = cpu_to_le32(temp32[3]);
1311 }
1312 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1313 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1314
1315 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1316 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1317 [qp->ah.sgid_index]);
1318
1319 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1320 req.hop_limit = qp->ah.hop_limit;
1321
1322 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1323 req.traffic_class = qp->ah.traffic_class;
1324
1325 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1326 memcpy(req.dest_mac, qp->ah.dmac, 6);
1327
1328 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1329 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1330
1331 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1332 req.timeout = qp->timeout;
1333
1334 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1335 req.retry_cnt = qp->retry_cnt;
1336
1337 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1338 req.rnr_retry = qp->rnr_retry;
1339
1340 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1341 req.min_rnr_timer = qp->min_rnr_timer;
1342
1343 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1344 req.rq_psn = cpu_to_le32(qp->rq.psn);
1345
1346 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1347 req.sq_psn = cpu_to_le32(qp->sq.psn);
1348
1349 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1350 req.max_rd_atomic =
1351 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1352
1353 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1354 req.max_dest_rd_atomic =
1355 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1356
1357 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1358 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1359 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1360 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1361 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1362 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1363 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1364
1365 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1366
1367 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1368 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1369 if (rc)
1370 return rc;
1371 qp->cur_qp_state = qp->state;
1372 return 0;
1373 }
1374
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1375 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1376 {
1377 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1378 struct creq_query_qp_resp resp = {};
1379 struct bnxt_qplib_cmdqmsg msg = {};
1380 struct bnxt_qplib_rcfw_sbuf sbuf;
1381 struct creq_query_qp_resp_sb *sb;
1382 struct cmdq_query_qp req = {};
1383 u32 temp32[4];
1384 int i, rc;
1385
1386 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1387 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1388 &sbuf.dma_addr, GFP_KERNEL);
1389 if (!sbuf.sb)
1390 return -ENOMEM;
1391 sb = sbuf.sb;
1392
1393 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1394 CMDQ_BASE_OPCODE_QUERY_QP,
1395 sizeof(req));
1396
1397 req.qp_cid = cpu_to_le32(qp->id);
1398 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1399 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1400 sizeof(resp), 0);
1401 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1402 if (rc)
1403 goto bail;
1404 /* Extract the context from the side buffer */
1405 qp->state = sb->en_sqd_async_notify_state &
1406 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1407 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1408 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1409 qp->access = sb->access;
1410 qp->pkey_index = le16_to_cpu(sb->pkey);
1411 qp->qkey = le32_to_cpu(sb->qkey);
1412
1413 temp32[0] = le32_to_cpu(sb->dgid[0]);
1414 temp32[1] = le32_to_cpu(sb->dgid[1]);
1415 temp32[2] = le32_to_cpu(sb->dgid[2]);
1416 temp32[3] = le32_to_cpu(sb->dgid[3]);
1417 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1418
1419 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1420
1421 qp->ah.sgid_index = 0;
1422 for (i = 0; i < res->sgid_tbl.max; i++) {
1423 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1424 qp->ah.sgid_index = i;
1425 break;
1426 }
1427 }
1428 if (i == res->sgid_tbl.max)
1429 dev_warn(&res->pdev->dev, "SGID not found??\n");
1430
1431 qp->ah.hop_limit = sb->hop_limit;
1432 qp->ah.traffic_class = sb->traffic_class;
1433 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1434 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1435 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1436 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1437 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1438 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1439 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1440 qp->timeout = sb->timeout;
1441 qp->retry_cnt = sb->retry_cnt;
1442 qp->rnr_retry = sb->rnr_retry;
1443 qp->min_rnr_timer = sb->min_rnr_timer;
1444 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1445 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1446 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1447 qp->max_dest_rd_atomic =
1448 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1449 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1450 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1451 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1452 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1453 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1454 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1455 memcpy(qp->smac, sb->src_mac, 6);
1456 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1457 bail:
1458 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1459 sbuf.sb, sbuf.dma_addr);
1460 return rc;
1461 }
1462
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1463 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1464 {
1465 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1466 u32 peek_flags, peek_cons;
1467 struct cq_base *hw_cqe;
1468 int i;
1469
1470 peek_flags = cq->dbinfo.flags;
1471 peek_cons = cq_hwq->cons;
1472 for (i = 0; i < cq_hwq->max_elements; i++) {
1473 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1474 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1475 continue;
1476 /*
1477 * The valid test of the entry must be done first before
1478 * reading any further.
1479 */
1480 dma_rmb();
1481 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1482 case CQ_BASE_CQE_TYPE_REQ:
1483 case CQ_BASE_CQE_TYPE_TERMINAL:
1484 {
1485 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1486
1487 if (qp == le64_to_cpu(cqe->qp_handle))
1488 cqe->qp_handle = 0;
1489 break;
1490 }
1491 case CQ_BASE_CQE_TYPE_RES_RC:
1492 case CQ_BASE_CQE_TYPE_RES_UD:
1493 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1494 {
1495 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1496
1497 if (qp == le64_to_cpu(cqe->qp_handle))
1498 cqe->qp_handle = 0;
1499 break;
1500 }
1501 default:
1502 break;
1503 }
1504 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1505 1, &peek_flags);
1506 }
1507 }
1508
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1509 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1510 struct bnxt_qplib_qp *qp)
1511 {
1512 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1513 struct creq_destroy_qp_resp resp = {};
1514 struct bnxt_qplib_cmdqmsg msg = {};
1515 struct cmdq_destroy_qp req = {};
1516 u32 tbl_indx;
1517 int rc;
1518
1519 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1520 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1521 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1522
1523 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1524 CMDQ_BASE_OPCODE_DESTROY_QP,
1525 sizeof(req));
1526
1527 req.qp_cid = cpu_to_le32(qp->id);
1528 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1529 sizeof(resp), 0);
1530 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1531 if (rc) {
1532 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1533 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1534 return rc;
1535 }
1536
1537 return 0;
1538 }
1539
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1540 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1541 struct bnxt_qplib_qp *qp)
1542 {
1543 bnxt_qplib_free_qp_hdr_buf(res, qp);
1544 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1545 kfree(qp->sq.swq);
1546
1547 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1548 kfree(qp->rq.swq);
1549
1550 if (qp->irrq.max_elements)
1551 bnxt_qplib_free_hwq(res, &qp->irrq);
1552 if (qp->orrq.max_elements)
1553 bnxt_qplib_free_hwq(res, &qp->orrq);
1554
1555 }
1556
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1557 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1558 struct bnxt_qplib_sge *sge)
1559 {
1560 struct bnxt_qplib_q *sq = &qp->sq;
1561 u32 sw_prod;
1562
1563 memset(sge, 0, sizeof(*sge));
1564
1565 if (qp->sq_hdr_buf) {
1566 sw_prod = sq->swq_start;
1567 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1568 sw_prod * qp->sq_hdr_buf_size);
1569 sge->lkey = 0xFFFFFFFF;
1570 sge->size = qp->sq_hdr_buf_size;
1571 return qp->sq_hdr_buf + sw_prod * sge->size;
1572 }
1573 return NULL;
1574 }
1575
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1576 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1577 {
1578 struct bnxt_qplib_q *rq = &qp->rq;
1579
1580 return rq->swq_start;
1581 }
1582
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1583 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1584 {
1585 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1586 }
1587
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1588 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1589 struct bnxt_qplib_sge *sge)
1590 {
1591 struct bnxt_qplib_q *rq = &qp->rq;
1592 u32 sw_prod;
1593
1594 memset(sge, 0, sizeof(*sge));
1595
1596 if (qp->rq_hdr_buf) {
1597 sw_prod = rq->swq_start;
1598 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1599 sw_prod * qp->rq_hdr_buf_size);
1600 sge->lkey = 0xFFFFFFFF;
1601 sge->size = qp->rq_hdr_buf_size;
1602 return qp->rq_hdr_buf + sw_prod * sge->size;
1603 }
1604 return NULL;
1605 }
1606
1607 /* Fil the MSN table into the next psn row */
bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1608 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1609 struct bnxt_qplib_swqe *wqe,
1610 struct bnxt_qplib_swq *swq)
1611 {
1612 struct sq_msn_search *msns;
1613 u32 start_psn, next_psn;
1614 u16 start_idx;
1615
1616 msns = (struct sq_msn_search *)swq->psn_search;
1617 msns->start_idx_next_psn_start_psn = 0;
1618
1619 start_psn = swq->start_psn;
1620 next_psn = swq->next_psn;
1621 start_idx = swq->slot_idx;
1622 msns->start_idx_next_psn_start_psn |=
1623 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1624 qp->msn++;
1625 qp->msn %= qp->msn_tbl_sz;
1626 }
1627
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1628 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1629 struct bnxt_qplib_swqe *wqe,
1630 struct bnxt_qplib_swq *swq)
1631 {
1632 struct sq_psn_search_ext *psns_ext;
1633 struct sq_psn_search *psns;
1634 u32 flg_npsn;
1635 u32 op_spsn;
1636
1637 if (!swq->psn_search)
1638 return;
1639 /* Handle MSN differently on cap flags */
1640 if (qp->is_host_msn_tbl) {
1641 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1642 return;
1643 }
1644 psns = (struct sq_psn_search *)swq->psn_search;
1645 psns = swq->psn_search;
1646 psns_ext = swq->psn_ext;
1647
1648 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1649 SQ_PSN_SEARCH_START_PSN_MASK);
1650 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1651 SQ_PSN_SEARCH_OPCODE_MASK);
1652 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1653 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1654
1655 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1656 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1657 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1658 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1659 } else {
1660 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1661 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1662 }
1663 }
1664
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1665 static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1666 struct bnxt_qplib_swqe *wqe,
1667 u16 *idx)
1668 {
1669 struct bnxt_qplib_hwq *hwq;
1670 int len, t_len, offt;
1671 bool pull_dst = true;
1672 void *il_dst = NULL;
1673 void *il_src = NULL;
1674 int t_cplen, cplen;
1675 int indx;
1676
1677 hwq = &qp->sq.hwq;
1678 t_len = 0;
1679 for (indx = 0; indx < wqe->num_sge; indx++) {
1680 len = wqe->sg_list[indx].size;
1681 il_src = (void *)wqe->sg_list[indx].addr;
1682 t_len += len;
1683 if (t_len > qp->max_inline_data)
1684 return -ENOMEM;
1685 while (len) {
1686 if (pull_dst) {
1687 pull_dst = false;
1688 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1689 (*idx)++;
1690 t_cplen = 0;
1691 offt = 0;
1692 }
1693 cplen = min_t(int, len, sizeof(struct sq_sge));
1694 cplen = min_t(int, cplen,
1695 (sizeof(struct sq_sge) - offt));
1696 memcpy(il_dst, il_src, cplen);
1697 t_cplen += cplen;
1698 il_src += cplen;
1699 il_dst += cplen;
1700 offt += cplen;
1701 len -= cplen;
1702 if (t_cplen == sizeof(struct sq_sge))
1703 pull_dst = true;
1704 }
1705 }
1706
1707 return t_len;
1708 }
1709
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1710 static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1711 struct bnxt_qplib_sge *ssge,
1712 u16 nsge, u16 *idx)
1713 {
1714 struct sq_sge *dsge;
1715 int indx, len = 0;
1716
1717 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1718 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1719 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1720 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1721 dsge->size = cpu_to_le32(ssge[indx].size);
1722 len += ssge[indx].size;
1723 }
1724
1725 return len;
1726 }
1727
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1728 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1729 struct bnxt_qplib_swqe *wqe,
1730 u16 *wqe_sz, u16 *qdf, u8 mode)
1731 {
1732 u32 ilsize, bytes;
1733 u16 nsge;
1734 u16 slot;
1735
1736 nsge = wqe->num_sge;
1737 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1738 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1739 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1740 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1741 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1742 bytes += sizeof(struct sq_send_hdr);
1743 }
1744
1745 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1746 slot = bytes >> 4;
1747 *wqe_sz = slot;
1748 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1749 slot = 8;
1750 return slot;
1751 }
1752
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp * qp,struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq,bool hw_retx)1753 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1754 struct bnxt_qplib_swq *swq, bool hw_retx)
1755 {
1756 struct bnxt_qplib_hwq *hwq;
1757 u32 pg_num, pg_indx;
1758 void *buff;
1759 u32 tail;
1760
1761 hwq = &sq->hwq;
1762 if (!hwq->pad_pg)
1763 return;
1764 tail = swq->slot_idx / sq->dbinfo.max_slot;
1765 if (hw_retx) {
1766 /* For HW retx use qp msn index */
1767 tail = qp->msn;
1768 tail %= qp->msn_tbl_sz;
1769 }
1770 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1771 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1772 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1773 swq->psn_ext = buff;
1774 swq->psn_search = buff;
1775 }
1776
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1777 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1778 {
1779 struct bnxt_qplib_q *sq = &qp->sq;
1780
1781 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1782 }
1783
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1784 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1785 struct bnxt_qplib_swqe *wqe)
1786 {
1787 struct bnxt_qplib_nq_work *nq_work = NULL;
1788 int i, rc = 0, data_len = 0, pkt_num = 0;
1789 struct bnxt_qplib_q *sq = &qp->sq;
1790 struct bnxt_qplib_hwq *hwq;
1791 struct bnxt_qplib_swq *swq;
1792 bool sch_handler = false;
1793 u16 wqe_sz, qdf = 0;
1794 bool msn_update;
1795 void *base_hdr;
1796 void *ext_hdr;
1797 __le32 temp32;
1798 u32 wqe_idx;
1799 u32 slots;
1800 u16 idx;
1801
1802 hwq = &sq->hwq;
1803 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1804 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1805 dev_err(&hwq->pdev->dev,
1806 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1807 qp->id, qp->state);
1808 rc = -EINVAL;
1809 goto done;
1810 }
1811
1812 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1813 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1814 dev_err(&hwq->pdev->dev,
1815 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1816 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1817 rc = -ENOMEM;
1818 goto done;
1819 }
1820
1821 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1822 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1823
1824 idx = 0;
1825 swq->slot_idx = hwq->prod;
1826 swq->slots = slots;
1827 swq->wr_id = wqe->wr_id;
1828 swq->type = wqe->type;
1829 swq->flags = wqe->flags;
1830 swq->start_psn = sq->psn & BTH_PSN_MASK;
1831 if (qp->sig_type)
1832 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1833
1834 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1835 sch_handler = true;
1836 dev_dbg(&hwq->pdev->dev,
1837 "%s Error QP. Scheduling for poll_cq\n", __func__);
1838 goto queue_err;
1839 }
1840
1841 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1842 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1843 memset(base_hdr, 0, sizeof(struct sq_sge));
1844 memset(ext_hdr, 0, sizeof(struct sq_sge));
1845
1846 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1847 /* Copy the inline data */
1848 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1849 else
1850 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1851 &idx);
1852 if (data_len < 0)
1853 goto queue_err;
1854 /* Make sure we update MSN table only for wired wqes */
1855 msn_update = true;
1856 /* Specifics */
1857 switch (wqe->type) {
1858 case BNXT_QPLIB_SWQE_TYPE_SEND:
1859 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1860 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1861 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1862 /* Assemble info for Raw Ethertype QPs */
1863
1864 sqe->wqe_type = wqe->type;
1865 sqe->flags = wqe->flags;
1866 sqe->wqe_size = wqe_sz;
1867 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1868 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1869 sqe->length = cpu_to_le32(data_len);
1870 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1871 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1872 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1873
1874 break;
1875 }
1876 fallthrough;
1877 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1878 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1879 {
1880 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1881 struct sq_send_hdr *sqe = base_hdr;
1882
1883 sqe->wqe_type = wqe->type;
1884 sqe->flags = wqe->flags;
1885 sqe->wqe_size = wqe_sz;
1886 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1887 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1888 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1889 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1890 sqe->length = cpu_to_le32(data_len);
1891 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1892 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1893 SQ_SEND_DST_QP_MASK);
1894 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1895 SQ_SEND_AVID_MASK);
1896 msn_update = false;
1897 } else {
1898 sqe->length = cpu_to_le32(data_len);
1899 if (qp->mtu)
1900 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1901 if (!pkt_num)
1902 pkt_num = 1;
1903 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1904 }
1905 break;
1906 }
1907 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1908 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1909 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1910 {
1911 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1912 struct sq_rdma_hdr *sqe = base_hdr;
1913
1914 sqe->wqe_type = wqe->type;
1915 sqe->flags = wqe->flags;
1916 sqe->wqe_size = wqe_sz;
1917 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1918 sqe->length = cpu_to_le32((u32)data_len);
1919 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1920 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1921 if (qp->mtu)
1922 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1923 if (!pkt_num)
1924 pkt_num = 1;
1925 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1926 break;
1927 }
1928 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1929 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1930 {
1931 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1932 struct sq_atomic_hdr *sqe = base_hdr;
1933
1934 sqe->wqe_type = wqe->type;
1935 sqe->flags = wqe->flags;
1936 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1937 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1938 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1939 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1940 if (qp->mtu)
1941 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1942 if (!pkt_num)
1943 pkt_num = 1;
1944 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1945 break;
1946 }
1947 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1948 {
1949 struct sq_localinvalidate *sqe = base_hdr;
1950
1951 sqe->wqe_type = wqe->type;
1952 sqe->flags = wqe->flags;
1953 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1954 msn_update = false;
1955 break;
1956 }
1957 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1958 {
1959 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1960 struct sq_fr_pmr_hdr *sqe = base_hdr;
1961
1962 sqe->wqe_type = wqe->type;
1963 sqe->flags = wqe->flags;
1964 sqe->access_cntl = wqe->frmr.access_cntl |
1965 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1966 sqe->zero_based_page_size_log =
1967 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1968 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1969 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1970 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1971 temp32 = cpu_to_le32(wqe->frmr.length);
1972 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1973 sqe->numlevels_pbl_page_size_log =
1974 ((wqe->frmr.pbl_pg_sz_log <<
1975 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1976 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1977 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1978 SQ_FR_PMR_NUMLEVELS_MASK);
1979
1980 for (i = 0; i < wqe->frmr.page_list_len; i++)
1981 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1982 wqe->frmr.page_list[i] |
1983 PTU_PTE_VALID);
1984 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1985 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1986 msn_update = false;
1987
1988 break;
1989 }
1990 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1991 {
1992 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1993 struct sq_bind_hdr *sqe = base_hdr;
1994
1995 sqe->wqe_type = wqe->type;
1996 sqe->flags = wqe->flags;
1997 sqe->access_cntl = wqe->bind.access_cntl;
1998 sqe->mw_type_zero_based = wqe->bind.mw_type |
1999 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2000 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2001 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2002 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2003 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2004 msn_update = false;
2005 break;
2006 }
2007 default:
2008 /* Bad wqe, return error */
2009 rc = -EINVAL;
2010 goto done;
2011 }
2012 if (!qp->is_host_msn_tbl || msn_update) {
2013 swq->next_psn = sq->psn & BTH_PSN_MASK;
2014 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2015 }
2016 queue_err:
2017 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2018 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2019 qp->wqe_cnt++;
2020 done:
2021 if (sch_handler) {
2022 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2023 if (nq_work) {
2024 nq_work->cq = qp->scq;
2025 nq_work->nq = qp->scq->nq;
2026 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2027 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2028 } else {
2029 dev_err(&hwq->pdev->dev,
2030 "FP: Failed to allocate SQ nq_work!\n");
2031 rc = -ENOMEM;
2032 }
2033 }
2034 return rc;
2035 }
2036
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)2037 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2038 {
2039 struct bnxt_qplib_q *rq = &qp->rq;
2040
2041 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2042 }
2043
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)2044 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2045 struct bnxt_qplib_swqe *wqe)
2046 {
2047 struct bnxt_qplib_nq_work *nq_work = NULL;
2048 struct bnxt_qplib_q *rq = &qp->rq;
2049 struct rq_wqe_hdr *base_hdr;
2050 struct rq_ext_hdr *ext_hdr;
2051 struct bnxt_qplib_hwq *hwq;
2052 struct bnxt_qplib_swq *swq;
2053 bool sch_handler = false;
2054 u16 wqe_sz, idx;
2055 u32 wqe_idx;
2056 int rc = 0;
2057
2058 hwq = &rq->hwq;
2059 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2060 dev_err(&hwq->pdev->dev,
2061 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2062 qp->id, qp->state);
2063 rc = -EINVAL;
2064 goto done;
2065 }
2066
2067 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2068 dev_err(&hwq->pdev->dev,
2069 "FP: QP (0x%x) RQ is full!\n", qp->id);
2070 rc = -EINVAL;
2071 goto done;
2072 }
2073
2074 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2075 swq->wr_id = wqe->wr_id;
2076 swq->slots = rq->dbinfo.max_slot;
2077
2078 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2079 sch_handler = true;
2080 dev_dbg(&hwq->pdev->dev,
2081 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2082 goto queue_err;
2083 }
2084
2085 idx = 0;
2086 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2087 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2088 memset(base_hdr, 0, sizeof(struct sq_sge));
2089 memset(ext_hdr, 0, sizeof(struct sq_sge));
2090 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2091 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2092 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2093 if (!wqe->num_sge) {
2094 struct sq_sge *sge;
2095
2096 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2097 sge->size = 0;
2098 wqe_sz++;
2099 }
2100 base_hdr->wqe_type = wqe->type;
2101 base_hdr->flags = wqe->flags;
2102 base_hdr->wqe_size = wqe_sz;
2103 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2104 queue_err:
2105 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2106 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2107 done:
2108 if (sch_handler) {
2109 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2110 if (nq_work) {
2111 nq_work->cq = qp->rcq;
2112 nq_work->nq = qp->rcq->nq;
2113 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2114 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2115 } else {
2116 dev_err(&hwq->pdev->dev,
2117 "FP: Failed to allocate RQ nq_work!\n");
2118 rc = -ENOMEM;
2119 }
2120 }
2121
2122 return rc;
2123 }
2124
2125 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2126 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2127 {
2128 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2129 struct bnxt_qplib_hwq_attr hwq_attr = {};
2130 struct creq_create_cq_resp resp = {};
2131 struct bnxt_qplib_cmdqmsg msg = {};
2132 struct cmdq_create_cq req = {};
2133 struct bnxt_qplib_pbl *pbl;
2134 u32 pg_sz_lvl;
2135 int rc;
2136
2137 if (!cq->dpi) {
2138 dev_err(&rcfw->pdev->dev,
2139 "FP: CREATE_CQ failed due to NULL DPI\n");
2140 return -EINVAL;
2141 }
2142
2143 cq->dbinfo.flags = 0;
2144 hwq_attr.res = res;
2145 hwq_attr.depth = cq->max_wqe;
2146 hwq_attr.stride = sizeof(struct cq_base);
2147 hwq_attr.type = HWQ_TYPE_QUEUE;
2148 hwq_attr.sginfo = &cq->sg_info;
2149 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2150 if (rc)
2151 return rc;
2152
2153 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2154 CMDQ_BASE_OPCODE_CREATE_CQ,
2155 sizeof(req));
2156
2157 req.dpi = cpu_to_le32(cq->dpi->dpi);
2158 req.cq_handle = cpu_to_le64(cq->cq_handle);
2159 req.cq_size = cpu_to_le32(cq->max_wqe);
2160 pbl = &cq->hwq.pbl[PBL_LVL_0];
2161 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2162 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2163 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2164 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2165 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2166 req.cq_fco_cnq_id = cpu_to_le32(
2167 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2168 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2169 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2170 sizeof(resp), 0);
2171 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2172 if (rc)
2173 goto fail;
2174
2175 cq->id = le32_to_cpu(resp.xid);
2176 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2177 init_waitqueue_head(&cq->waitq);
2178 INIT_LIST_HEAD(&cq->sqf_head);
2179 INIT_LIST_HEAD(&cq->rqf_head);
2180 spin_lock_init(&cq->compl_lock);
2181 spin_lock_init(&cq->flush_lock);
2182
2183 cq->dbinfo.hwq = &cq->hwq;
2184 cq->dbinfo.xid = cq->id;
2185 cq->dbinfo.db = cq->dpi->dbr;
2186 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2187 cq->dbinfo.flags = 0;
2188 cq->dbinfo.toggle = 0;
2189
2190 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2191
2192 return 0;
2193
2194 fail:
2195 bnxt_qplib_free_hwq(res, &cq->hwq);
2196 return rc;
2197 }
2198
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2199 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2200 struct bnxt_qplib_cq *cq)
2201 {
2202 bnxt_qplib_free_hwq(res, &cq->hwq);
2203 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2204 /* Reset only the cons bit in the flags */
2205 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2206 }
2207
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2208 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2209 int new_cqes)
2210 {
2211 struct bnxt_qplib_hwq_attr hwq_attr = {};
2212 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2213 struct creq_resize_cq_resp resp = {};
2214 struct bnxt_qplib_cmdqmsg msg = {};
2215 struct cmdq_resize_cq req = {};
2216 struct bnxt_qplib_pbl *pbl;
2217 u32 pg_sz, lvl, new_sz;
2218 int rc;
2219
2220 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2221 CMDQ_BASE_OPCODE_RESIZE_CQ,
2222 sizeof(req));
2223 hwq_attr.sginfo = &cq->sg_info;
2224 hwq_attr.res = res;
2225 hwq_attr.depth = new_cqes;
2226 hwq_attr.stride = sizeof(struct cq_base);
2227 hwq_attr.type = HWQ_TYPE_QUEUE;
2228 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2229 if (rc)
2230 return rc;
2231
2232 req.cq_cid = cpu_to_le32(cq->id);
2233 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2234 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2235 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2236 CMDQ_RESIZE_CQ_LVL_MASK;
2237 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2238 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2239 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2240 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2241
2242 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2243 sizeof(resp), 0);
2244 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2245 return rc;
2246 }
2247
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2248 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2249 {
2250 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2251 struct creq_destroy_cq_resp resp = {};
2252 struct bnxt_qplib_cmdqmsg msg = {};
2253 struct cmdq_destroy_cq req = {};
2254 u16 total_cnq_events;
2255 int rc;
2256
2257 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2258 CMDQ_BASE_OPCODE_DESTROY_CQ,
2259 sizeof(req));
2260
2261 req.cq_cid = cpu_to_le32(cq->id);
2262 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2263 sizeof(resp), 0);
2264 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2265 if (rc)
2266 return rc;
2267 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2268 __wait_for_all_nqes(cq, total_cnq_events);
2269 bnxt_qplib_free_hwq(res, &cq->hwq);
2270 return 0;
2271 }
2272
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2273 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2274 struct bnxt_qplib_cqe **pcqe, int *budget)
2275 {
2276 struct bnxt_qplib_cqe *cqe;
2277 u32 start, last;
2278 int rc = 0;
2279
2280 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2281 start = sq->swq_start;
2282 cqe = *pcqe;
2283 while (*budget) {
2284 last = sq->swq_last;
2285 if (start == last)
2286 break;
2287 /* Skip the FENCE WQE completions */
2288 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2289 bnxt_qplib_cancel_phantom_processing(qp);
2290 goto skip_compl;
2291 }
2292 memset(cqe, 0, sizeof(*cqe));
2293 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2294 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2295 cqe->qp_handle = (u64)(unsigned long)qp;
2296 cqe->wr_id = sq->swq[last].wr_id;
2297 cqe->src_qp = qp->id;
2298 cqe->type = sq->swq[last].type;
2299 cqe++;
2300 (*budget)--;
2301 skip_compl:
2302 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2303 sq->swq[last].slots, &sq->dbinfo.flags);
2304 sq->swq_last = sq->swq[last].next_idx;
2305 }
2306 *pcqe = cqe;
2307 if (!(*budget) && sq->swq_last != start)
2308 /* Out of budget */
2309 rc = -EAGAIN;
2310
2311 return rc;
2312 }
2313
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2314 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2315 struct bnxt_qplib_cqe **pcqe, int *budget)
2316 {
2317 struct bnxt_qplib_cqe *cqe;
2318 u32 start, last;
2319 int opcode = 0;
2320 int rc = 0;
2321
2322 switch (qp->type) {
2323 case CMDQ_CREATE_QP1_TYPE_GSI:
2324 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2325 break;
2326 case CMDQ_CREATE_QP_TYPE_RC:
2327 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2328 break;
2329 case CMDQ_CREATE_QP_TYPE_UD:
2330 case CMDQ_CREATE_QP_TYPE_GSI:
2331 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2332 break;
2333 }
2334
2335 /* Flush the rest of the RQ */
2336 start = rq->swq_start;
2337 cqe = *pcqe;
2338 while (*budget) {
2339 last = rq->swq_last;
2340 if (last == start)
2341 break;
2342 memset(cqe, 0, sizeof(*cqe));
2343 cqe->status =
2344 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2345 cqe->opcode = opcode;
2346 cqe->qp_handle = (unsigned long)qp;
2347 cqe->wr_id = rq->swq[last].wr_id;
2348 cqe++;
2349 (*budget)--;
2350 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2351 rq->swq[last].slots, &rq->dbinfo.flags);
2352 rq->swq_last = rq->swq[last].next_idx;
2353 }
2354 *pcqe = cqe;
2355 if (!*budget && rq->swq_last != start)
2356 /* Out of budget */
2357 rc = -EAGAIN;
2358
2359 return rc;
2360 }
2361
bnxt_qplib_mark_qp_error(void * qp_handle)2362 void bnxt_qplib_mark_qp_error(void *qp_handle)
2363 {
2364 struct bnxt_qplib_qp *qp = qp_handle;
2365
2366 if (!qp)
2367 return;
2368
2369 /* Must block new posting of SQ and RQ */
2370 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2371 bnxt_qplib_cancel_phantom_processing(qp);
2372 }
2373
2374 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2375 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2376 */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2377 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2378 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2379 {
2380 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2381 struct bnxt_qplib_q *sq = &qp->sq;
2382 struct cq_req *peek_req_hwcqe;
2383 struct bnxt_qplib_qp *peek_qp;
2384 struct bnxt_qplib_q *peek_sq;
2385 struct bnxt_qplib_swq *swq;
2386 struct cq_base *peek_hwcqe;
2387 int i, rc = 0;
2388
2389 /* Normal mode */
2390 /* Check for the psn_search marking before completing */
2391 swq = &sq->swq[swq_last];
2392 if (swq->psn_search &&
2393 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2394 /* Unmark */
2395 swq->psn_search->flags_next_psn = cpu_to_le32
2396 (le32_to_cpu(swq->psn_search->flags_next_psn)
2397 & ~0x80000000);
2398 dev_dbg(&cq->hwq.pdev->dev,
2399 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2400 cq_cons, qp->id, swq_last, cqe_sq_cons);
2401 sq->condition = true;
2402 sq->send_phantom = true;
2403
2404 /* TODO: Only ARM if the previous SQE is ARMALL */
2405 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2406 rc = -EAGAIN;
2407 goto out;
2408 }
2409 if (sq->condition) {
2410 /* Peek at the completions */
2411 peek_flags = cq->dbinfo.flags;
2412 peek_sw_cq_cons = cq_cons;
2413 i = cq->hwq.max_elements;
2414 while (i--) {
2415 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2416 peek_sw_cq_cons, NULL);
2417 /* If the next hwcqe is VALID */
2418 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2419 /*
2420 * The valid test of the entry must be done first before
2421 * reading any further.
2422 */
2423 dma_rmb();
2424 /* If the next hwcqe is a REQ */
2425 if ((peek_hwcqe->cqe_type_toggle &
2426 CQ_BASE_CQE_TYPE_MASK) ==
2427 CQ_BASE_CQE_TYPE_REQ) {
2428 peek_req_hwcqe = (struct cq_req *)
2429 peek_hwcqe;
2430 peek_qp = (struct bnxt_qplib_qp *)
2431 ((unsigned long)
2432 le64_to_cpu
2433 (peek_req_hwcqe->qp_handle));
2434 peek_sq = &peek_qp->sq;
2435 peek_sq_cons_idx =
2436 ((le16_to_cpu(
2437 peek_req_hwcqe->sq_cons_idx)
2438 - 1) % sq->max_wqe);
2439 /* If the hwcqe's sq's wr_id matches */
2440 if (peek_sq == sq &&
2441 sq->swq[peek_sq_cons_idx].wr_id ==
2442 BNXT_QPLIB_FENCE_WRID) {
2443 /*
2444 * Unbreak only if the phantom
2445 * comes back
2446 */
2447 dev_dbg(&cq->hwq.pdev->dev,
2448 "FP: Got Phantom CQE\n");
2449 sq->condition = false;
2450 sq->single = true;
2451 rc = 0;
2452 goto out;
2453 }
2454 }
2455 /* Valid but not the phantom, so keep looping */
2456 } else {
2457 /* Not valid yet, just exit and wait */
2458 rc = -EINVAL;
2459 goto out;
2460 }
2461 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2462 &peek_sw_cq_cons,
2463 1, &peek_flags);
2464 }
2465 dev_err(&cq->hwq.pdev->dev,
2466 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2467 cq_cons, qp->id, swq_last, cqe_sq_cons);
2468 rc = -EINVAL;
2469 }
2470 out:
2471 return rc;
2472 }
2473
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2474 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2475 struct cq_req *hwcqe,
2476 struct bnxt_qplib_cqe **pcqe, int *budget,
2477 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2478 {
2479 struct bnxt_qplib_swq *swq;
2480 struct bnxt_qplib_cqe *cqe;
2481 struct bnxt_qplib_qp *qp;
2482 struct bnxt_qplib_q *sq;
2483 u32 cqe_sq_cons;
2484 int rc = 0;
2485
2486 qp = (struct bnxt_qplib_qp *)((unsigned long)
2487 le64_to_cpu(hwcqe->qp_handle));
2488 if (!qp) {
2489 dev_err(&cq->hwq.pdev->dev,
2490 "FP: Process Req qp is NULL\n");
2491 return -EINVAL;
2492 }
2493 sq = &qp->sq;
2494
2495 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2496 if (qp->sq.flushed) {
2497 dev_dbg(&cq->hwq.pdev->dev,
2498 "%s: QP in Flush QP = %p\n", __func__, qp);
2499 goto done;
2500 }
2501 /* Require to walk the sq's swq to fabricate CQEs for all previously
2502 * signaled SWQEs due to CQE aggregation from the current sq cons
2503 * to the cqe_sq_cons
2504 */
2505 cqe = *pcqe;
2506 while (*budget) {
2507 if (sq->swq_last == cqe_sq_cons)
2508 /* Done */
2509 break;
2510
2511 swq = &sq->swq[sq->swq_last];
2512 memset(cqe, 0, sizeof(*cqe));
2513 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2514 cqe->qp_handle = (u64)(unsigned long)qp;
2515 cqe->src_qp = qp->id;
2516 cqe->wr_id = swq->wr_id;
2517 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2518 goto skip;
2519 cqe->type = swq->type;
2520
2521 /* For the last CQE, check for status. For errors, regardless
2522 * of the request being signaled or not, it must complete with
2523 * the hwcqe error status
2524 */
2525 if (swq->next_idx == cqe_sq_cons &&
2526 hwcqe->status != CQ_REQ_STATUS_OK) {
2527 cqe->status = hwcqe->status;
2528 dev_err(&cq->hwq.pdev->dev,
2529 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2530 sq->swq_last, cqe->wr_id, cqe->status);
2531 cqe++;
2532 (*budget)--;
2533 bnxt_qplib_mark_qp_error(qp);
2534 /* Add qp to flush list of the CQ */
2535 bnxt_qplib_add_flush_qp(qp);
2536 } else {
2537 /* Before we complete, do WA 9060 */
2538 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2539 cqe_sq_cons)) {
2540 *lib_qp = qp;
2541 goto out;
2542 }
2543 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2544 cqe->status = CQ_REQ_STATUS_OK;
2545 cqe++;
2546 (*budget)--;
2547 }
2548 }
2549 skip:
2550 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2551 swq->slots, &sq->dbinfo.flags);
2552 sq->swq_last = swq->next_idx;
2553 if (sq->single)
2554 break;
2555 }
2556 out:
2557 *pcqe = cqe;
2558 if (sq->swq_last != cqe_sq_cons) {
2559 /* Out of budget */
2560 rc = -EAGAIN;
2561 goto done;
2562 }
2563 /*
2564 * Back to normal completion mode only after it has completed all of
2565 * the WC for this CQE
2566 */
2567 sq->single = false;
2568 done:
2569 return rc;
2570 }
2571
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2572 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2573 {
2574 spin_lock(&srq->hwq.lock);
2575 srq->swq[srq->last_idx].next_idx = (int)tag;
2576 srq->last_idx = (int)tag;
2577 srq->swq[srq->last_idx].next_idx = -1;
2578 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2579 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2580 spin_unlock(&srq->hwq.lock);
2581 }
2582
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2583 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2584 struct cq_res_rc *hwcqe,
2585 struct bnxt_qplib_cqe **pcqe,
2586 int *budget)
2587 {
2588 struct bnxt_qplib_srq *srq;
2589 struct bnxt_qplib_cqe *cqe;
2590 struct bnxt_qplib_qp *qp;
2591 struct bnxt_qplib_q *rq;
2592 u32 wr_id_idx;
2593
2594 qp = (struct bnxt_qplib_qp *)((unsigned long)
2595 le64_to_cpu(hwcqe->qp_handle));
2596 if (!qp) {
2597 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2598 return -EINVAL;
2599 }
2600 if (qp->rq.flushed) {
2601 dev_dbg(&cq->hwq.pdev->dev,
2602 "%s: QP in Flush QP = %p\n", __func__, qp);
2603 return 0;
2604 }
2605
2606 cqe = *pcqe;
2607 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2608 cqe->length = le32_to_cpu(hwcqe->length);
2609 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2610 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2611 cqe->flags = le16_to_cpu(hwcqe->flags);
2612 cqe->status = hwcqe->status;
2613 cqe->qp_handle = (u64)(unsigned long)qp;
2614
2615 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2616 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2617 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2618 srq = qp->srq;
2619 if (!srq)
2620 return -EINVAL;
2621 if (wr_id_idx >= srq->hwq.max_elements) {
2622 dev_err(&cq->hwq.pdev->dev,
2623 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2624 wr_id_idx, srq->hwq.max_elements);
2625 return -EINVAL;
2626 }
2627 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2628 bnxt_qplib_release_srqe(srq, wr_id_idx);
2629 cqe++;
2630 (*budget)--;
2631 *pcqe = cqe;
2632 } else {
2633 struct bnxt_qplib_swq *swq;
2634
2635 rq = &qp->rq;
2636 if (wr_id_idx > (rq->max_wqe - 1)) {
2637 dev_err(&cq->hwq.pdev->dev,
2638 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2639 wr_id_idx, rq->max_wqe);
2640 return -EINVAL;
2641 }
2642 if (wr_id_idx != rq->swq_last)
2643 return -EINVAL;
2644 swq = &rq->swq[rq->swq_last];
2645 cqe->wr_id = swq->wr_id;
2646 cqe++;
2647 (*budget)--;
2648 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2649 swq->slots, &rq->dbinfo.flags);
2650 rq->swq_last = swq->next_idx;
2651 *pcqe = cqe;
2652
2653 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2654 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2655 /* Add qp to flush list of the CQ */
2656 bnxt_qplib_add_flush_qp(qp);
2657 }
2658 }
2659
2660 return 0;
2661 }
2662
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2663 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2664 struct cq_res_ud *hwcqe,
2665 struct bnxt_qplib_cqe **pcqe,
2666 int *budget)
2667 {
2668 struct bnxt_qplib_srq *srq;
2669 struct bnxt_qplib_cqe *cqe;
2670 struct bnxt_qplib_qp *qp;
2671 struct bnxt_qplib_q *rq;
2672 u32 wr_id_idx;
2673
2674 qp = (struct bnxt_qplib_qp *)((unsigned long)
2675 le64_to_cpu(hwcqe->qp_handle));
2676 if (!qp) {
2677 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2678 return -EINVAL;
2679 }
2680 if (qp->rq.flushed) {
2681 dev_dbg(&cq->hwq.pdev->dev,
2682 "%s: QP in Flush QP = %p\n", __func__, qp);
2683 return 0;
2684 }
2685 cqe = *pcqe;
2686 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2687 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2688 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2689 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2690 cqe->flags = le16_to_cpu(hwcqe->flags);
2691 cqe->status = hwcqe->status;
2692 cqe->qp_handle = (u64)(unsigned long)qp;
2693 /*FIXME: Endianness fix needed for smace */
2694 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2695 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2696 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2697 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2698 ((le32_to_cpu(
2699 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2700 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2701
2702 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2703 srq = qp->srq;
2704 if (!srq)
2705 return -EINVAL;
2706
2707 if (wr_id_idx >= srq->hwq.max_elements) {
2708 dev_err(&cq->hwq.pdev->dev,
2709 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2710 wr_id_idx, srq->hwq.max_elements);
2711 return -EINVAL;
2712 }
2713 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2714 bnxt_qplib_release_srqe(srq, wr_id_idx);
2715 cqe++;
2716 (*budget)--;
2717 *pcqe = cqe;
2718 } else {
2719 struct bnxt_qplib_swq *swq;
2720
2721 rq = &qp->rq;
2722 if (wr_id_idx > (rq->max_wqe - 1)) {
2723 dev_err(&cq->hwq.pdev->dev,
2724 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2725 wr_id_idx, rq->max_wqe);
2726 return -EINVAL;
2727 }
2728
2729 if (rq->swq_last != wr_id_idx)
2730 return -EINVAL;
2731 swq = &rq->swq[rq->swq_last];
2732 cqe->wr_id = swq->wr_id;
2733 cqe++;
2734 (*budget)--;
2735 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2736 swq->slots, &rq->dbinfo.flags);
2737 rq->swq_last = swq->next_idx;
2738 *pcqe = cqe;
2739
2740 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2741 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2742 /* Add qp to flush list of the CQ */
2743 bnxt_qplib_add_flush_qp(qp);
2744 }
2745 }
2746
2747 return 0;
2748 }
2749
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2750 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2751 {
2752 struct cq_base *hw_cqe;
2753 bool rc = true;
2754
2755 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2756 /* Check for Valid bit. If the CQE is valid, return false */
2757 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2758 return rc;
2759 }
2760
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2761 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2762 struct cq_res_raweth_qp1 *hwcqe,
2763 struct bnxt_qplib_cqe **pcqe,
2764 int *budget)
2765 {
2766 struct bnxt_qplib_qp *qp;
2767 struct bnxt_qplib_q *rq;
2768 struct bnxt_qplib_srq *srq;
2769 struct bnxt_qplib_cqe *cqe;
2770 u32 wr_id_idx;
2771
2772 qp = (struct bnxt_qplib_qp *)((unsigned long)
2773 le64_to_cpu(hwcqe->qp_handle));
2774 if (!qp) {
2775 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2776 return -EINVAL;
2777 }
2778 if (qp->rq.flushed) {
2779 dev_dbg(&cq->hwq.pdev->dev,
2780 "%s: QP in Flush QP = %p\n", __func__, qp);
2781 return 0;
2782 }
2783 cqe = *pcqe;
2784 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2785 cqe->flags = le16_to_cpu(hwcqe->flags);
2786 cqe->qp_handle = (u64)(unsigned long)qp;
2787
2788 wr_id_idx =
2789 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2790 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2791 cqe->src_qp = qp->id;
2792 if (qp->id == 1 && !cqe->length) {
2793 /* Add workaround for the length misdetection */
2794 cqe->length = 296;
2795 } else {
2796 cqe->length = le16_to_cpu(hwcqe->length);
2797 }
2798 cqe->pkey_index = qp->pkey_index;
2799 memcpy(cqe->smac, qp->smac, 6);
2800
2801 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2802 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2803 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2804
2805 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2806 srq = qp->srq;
2807 if (!srq) {
2808 dev_err(&cq->hwq.pdev->dev,
2809 "FP: SRQ used but not defined??\n");
2810 return -EINVAL;
2811 }
2812 if (wr_id_idx >= srq->hwq.max_elements) {
2813 dev_err(&cq->hwq.pdev->dev,
2814 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2815 wr_id_idx, srq->hwq.max_elements);
2816 return -EINVAL;
2817 }
2818 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2819 bnxt_qplib_release_srqe(srq, wr_id_idx);
2820 cqe++;
2821 (*budget)--;
2822 *pcqe = cqe;
2823 } else {
2824 struct bnxt_qplib_swq *swq;
2825
2826 rq = &qp->rq;
2827 if (wr_id_idx > (rq->max_wqe - 1)) {
2828 dev_err(&cq->hwq.pdev->dev,
2829 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2830 wr_id_idx, rq->max_wqe);
2831 return -EINVAL;
2832 }
2833 if (rq->swq_last != wr_id_idx)
2834 return -EINVAL;
2835 swq = &rq->swq[rq->swq_last];
2836 cqe->wr_id = swq->wr_id;
2837 cqe++;
2838 (*budget)--;
2839 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2840 swq->slots, &rq->dbinfo.flags);
2841 rq->swq_last = swq->next_idx;
2842 *pcqe = cqe;
2843
2844 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2845 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2846 /* Add qp to flush list of the CQ */
2847 bnxt_qplib_add_flush_qp(qp);
2848 }
2849 }
2850
2851 return 0;
2852 }
2853
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2854 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2855 struct cq_terminal *hwcqe,
2856 struct bnxt_qplib_cqe **pcqe,
2857 int *budget)
2858 {
2859 struct bnxt_qplib_qp *qp;
2860 struct bnxt_qplib_q *sq, *rq;
2861 struct bnxt_qplib_cqe *cqe;
2862 u32 swq_last = 0, cqe_cons;
2863 int rc = 0;
2864
2865 /* Check the Status */
2866 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2867 dev_warn(&cq->hwq.pdev->dev,
2868 "FP: CQ Process Terminal Error status = 0x%x\n",
2869 hwcqe->status);
2870
2871 qp = (struct bnxt_qplib_qp *)((unsigned long)
2872 le64_to_cpu(hwcqe->qp_handle));
2873 if (!qp)
2874 return -EINVAL;
2875
2876 /* Must block new posting of SQ and RQ */
2877 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2878
2879 sq = &qp->sq;
2880 rq = &qp->rq;
2881
2882 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2883 if (cqe_cons == 0xFFFF)
2884 goto do_rq;
2885 cqe_cons %= sq->max_wqe;
2886
2887 if (qp->sq.flushed) {
2888 dev_dbg(&cq->hwq.pdev->dev,
2889 "%s: QP in Flush QP = %p\n", __func__, qp);
2890 goto sq_done;
2891 }
2892
2893 /* Terminal CQE can also include aggregated successful CQEs prior.
2894 * So we must complete all CQEs from the current sq's cons to the
2895 * cq_cons with status OK
2896 */
2897 cqe = *pcqe;
2898 while (*budget) {
2899 swq_last = sq->swq_last;
2900 if (swq_last == cqe_cons)
2901 break;
2902 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2903 memset(cqe, 0, sizeof(*cqe));
2904 cqe->status = CQ_REQ_STATUS_OK;
2905 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2906 cqe->qp_handle = (u64)(unsigned long)qp;
2907 cqe->src_qp = qp->id;
2908 cqe->wr_id = sq->swq[swq_last].wr_id;
2909 cqe->type = sq->swq[swq_last].type;
2910 cqe++;
2911 (*budget)--;
2912 }
2913 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2914 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2915 sq->swq_last = sq->swq[swq_last].next_idx;
2916 }
2917 *pcqe = cqe;
2918 if (!(*budget) && swq_last != cqe_cons) {
2919 /* Out of budget */
2920 rc = -EAGAIN;
2921 goto sq_done;
2922 }
2923 sq_done:
2924 if (rc)
2925 return rc;
2926 do_rq:
2927 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2928 if (cqe_cons == 0xFFFF) {
2929 goto done;
2930 } else if (cqe_cons > rq->max_wqe - 1) {
2931 dev_err(&cq->hwq.pdev->dev,
2932 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2933 cqe_cons, rq->max_wqe);
2934 rc = -EINVAL;
2935 goto done;
2936 }
2937
2938 if (qp->rq.flushed) {
2939 dev_dbg(&cq->hwq.pdev->dev,
2940 "%s: QP in Flush QP = %p\n", __func__, qp);
2941 rc = 0;
2942 goto done;
2943 }
2944
2945 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2946 * from the current rq->cons to the rq->prod regardless what the
2947 * rq->cons the terminal CQE indicates
2948 */
2949
2950 /* Add qp to flush list of the CQ */
2951 bnxt_qplib_add_flush_qp(qp);
2952 done:
2953 return rc;
2954 }
2955
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2956 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2957 struct cq_cutoff *hwcqe)
2958 {
2959 /* Check the Status */
2960 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2961 dev_err(&cq->hwq.pdev->dev,
2962 "FP: CQ Process Cutoff Error status = 0x%x\n",
2963 hwcqe->status);
2964 return -EINVAL;
2965 }
2966 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2967 wake_up_interruptible(&cq->waitq);
2968
2969 return 0;
2970 }
2971
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)2972 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2973 struct bnxt_qplib_cqe *cqe,
2974 int num_cqes)
2975 {
2976 struct bnxt_qplib_qp *qp = NULL;
2977 u32 budget = num_cqes;
2978 unsigned long flags;
2979
2980 spin_lock_irqsave(&cq->flush_lock, flags);
2981 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2982 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2983 __flush_sq(&qp->sq, qp, &cqe, &budget);
2984 }
2985
2986 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2987 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2988 __flush_rq(&qp->rq, qp, &cqe, &budget);
2989 }
2990 spin_unlock_irqrestore(&cq->flush_lock, flags);
2991
2992 return num_cqes - budget;
2993 }
2994
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)2995 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2996 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2997 {
2998 struct cq_base *hw_cqe;
2999 int budget, rc = 0;
3000 u32 hw_polled = 0;
3001 u8 type;
3002
3003 budget = num_cqes;
3004
3005 while (budget) {
3006 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3007
3008 /* Check for Valid bit */
3009 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3010 break;
3011
3012 /*
3013 * The valid test of the entry must be done first before
3014 * reading any further.
3015 */
3016 dma_rmb();
3017 /* From the device's respective CQE format to qplib_wc*/
3018 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3019 switch (type) {
3020 case CQ_BASE_CQE_TYPE_REQ:
3021 rc = bnxt_qplib_cq_process_req(cq,
3022 (struct cq_req *)hw_cqe,
3023 &cqe, &budget,
3024 cq->hwq.cons, lib_qp);
3025 break;
3026 case CQ_BASE_CQE_TYPE_RES_RC:
3027 rc = bnxt_qplib_cq_process_res_rc(cq,
3028 (struct cq_res_rc *)
3029 hw_cqe, &cqe,
3030 &budget);
3031 break;
3032 case CQ_BASE_CQE_TYPE_RES_UD:
3033 rc = bnxt_qplib_cq_process_res_ud
3034 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3035 &budget);
3036 break;
3037 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3038 rc = bnxt_qplib_cq_process_res_raweth_qp1
3039 (cq, (struct cq_res_raweth_qp1 *)
3040 hw_cqe, &cqe, &budget);
3041 break;
3042 case CQ_BASE_CQE_TYPE_TERMINAL:
3043 rc = bnxt_qplib_cq_process_terminal
3044 (cq, (struct cq_terminal *)hw_cqe,
3045 &cqe, &budget);
3046 break;
3047 case CQ_BASE_CQE_TYPE_CUT_OFF:
3048 bnxt_qplib_cq_process_cutoff
3049 (cq, (struct cq_cutoff *)hw_cqe);
3050 /* Done processing this CQ */
3051 goto exit;
3052 default:
3053 dev_err(&cq->hwq.pdev->dev,
3054 "process_cq unknown type 0x%lx\n",
3055 hw_cqe->cqe_type_toggle &
3056 CQ_BASE_CQE_TYPE_MASK);
3057 rc = -EINVAL;
3058 break;
3059 }
3060 if (rc < 0) {
3061 if (rc == -EAGAIN)
3062 break;
3063 /* Error while processing the CQE, just skip to the
3064 * next one
3065 */
3066 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3067 dev_err(&cq->hwq.pdev->dev,
3068 "process_cqe error rc = 0x%x\n", rc);
3069 }
3070 hw_polled++;
3071 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3072 1, &cq->dbinfo.flags);
3073
3074 }
3075 if (hw_polled)
3076 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3077 exit:
3078 return num_cqes - budget;
3079 }
3080
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3081 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3082 {
3083 cq->dbinfo.toggle = cq->toggle;
3084 if (arm_type)
3085 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3086 /* Using cq->arm_state variable to track whether to issue cq handler */
3087 atomic_set(&cq->arm_state, 1);
3088 }
3089
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3090 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3091 {
3092 flush_workqueue(qp->scq->nq->cqn_wq);
3093 if (qp->scq != qp->rcq)
3094 flush_workqueue(qp->rcq->nq->cqn_wq);
3095 }
3096