1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39 #define dev_fmt(fmt) "QPLIB: " fmt
40
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/prefetch.h>
48 #include <linux/if_ether.h>
49 #include <rdma/ib_mad.h>
50
51 #include "roce_hsi.h"
52
53 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
55 #include "qplib_sp.h"
56 #include "qplib_fp.h"
57 #include <rdma/ib_addr.h>
58 #include "bnxt_ulp.h"
59 #include "bnxt_re.h"
60 #include "ib_verbs.h"
61
62 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
63
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)64 static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
65 {
66 qp->sq.condition = false;
67 qp->sq.send_phantom = false;
68 qp->sq.single = false;
69 }
70
71 /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)72 static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
73 {
74 struct bnxt_qplib_cq *scq, *rcq;
75
76 scq = qp->scq;
77 rcq = qp->rcq;
78
79 if (!qp->sq.flushed) {
80 dev_dbg(&scq->hwq.pdev->dev,
81 "FP: Adding to SQ Flush list = %p\n", qp);
82 bnxt_qplib_cancel_phantom_processing(qp);
83 list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 qp->sq.flushed = true;
85 }
86 if (!qp->srq) {
87 if (!qp->rq.flushed) {
88 dev_dbg(&rcq->hwq.pdev->dev,
89 "FP: Adding to RQ Flush list = %p\n", qp);
90 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 qp->rq.flushed = true;
92 }
93 }
94 }
95
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)96 static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
97 unsigned long *flags)
98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
99 {
100 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 if (qp->scq == qp->rcq)
102 __acquire(&qp->rcq->flush_lock);
103 else
104 spin_lock(&qp->rcq->flush_lock);
105 }
106
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)107 static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 unsigned long *flags)
109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
110 {
111 if (qp->scq == qp->rcq)
112 __release(&qp->rcq->flush_lock);
113 else
114 spin_unlock(&qp->rcq->flush_lock);
115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
116 }
117
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)118 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
119 {
120 unsigned long flags;
121
122 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 __bnxt_qplib_add_flush_qp(qp);
124 bnxt_qplib_release_cq_flush_locks(qp, &flags);
125 }
126
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)127 static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
128 {
129 if (qp->sq.flushed) {
130 qp->sq.flushed = false;
131 list_del(&qp->sq_flush);
132 }
133 if (!qp->srq) {
134 if (qp->rq.flushed) {
135 qp->rq.flushed = false;
136 list_del(&qp->rq_flush);
137 }
138 }
139 }
140
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)141 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
142 {
143 unsigned long flags;
144
145 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 __clean_cq(qp->scq, (u64)(unsigned long)qp);
147 qp->sq.hwq.prod = 0;
148 qp->sq.hwq.cons = 0;
149 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
150 qp->rq.hwq.prod = 0;
151 qp->rq.hwq.cons = 0;
152
153 __bnxt_qplib_del_flush_qp(qp);
154 bnxt_qplib_release_cq_flush_locks(qp, &flags);
155 }
156
bnxt_qpn_cqn_sched_task(struct work_struct * work)157 static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
158 {
159 struct bnxt_qplib_nq_work *nq_work =
160 container_of(work, struct bnxt_qplib_nq_work, work);
161
162 struct bnxt_qplib_cq *cq = nq_work->cq;
163 struct bnxt_qplib_nq *nq = nq_work->nq;
164
165 if (cq && nq) {
166 spin_lock_bh(&cq->compl_lock);
167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 dev_dbg(&nq->pdev->dev,
169 "%s:Trigger cq = %p event nq = %p\n",
170 __func__, cq, nq);
171 nq->cqn_handler(nq, cq);
172 }
173 spin_unlock_bh(&cq->compl_lock);
174 }
175 kfree(nq_work);
176 }
177
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)178 static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 struct bnxt_qplib_qp *qp)
180 {
181 struct bnxt_qplib_q *rq = &qp->rq;
182 struct bnxt_qplib_q *sq = &qp->sq;
183
184 if (qp->rq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 rq->max_wqe * qp->rq_hdr_buf_size,
187 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
188 if (qp->sq_hdr_buf)
189 dma_free_coherent(&res->pdev->dev,
190 sq->max_wqe * qp->sq_hdr_buf_size,
191 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 qp->rq_hdr_buf = NULL;
193 qp->sq_hdr_buf = NULL;
194 qp->rq_hdr_buf_map = 0;
195 qp->sq_hdr_buf_map = 0;
196 qp->sq_hdr_buf_size = 0;
197 qp->rq_hdr_buf_size = 0;
198 }
199
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)200 static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 struct bnxt_qplib_qp *qp)
202 {
203 struct bnxt_qplib_q *rq = &qp->rq;
204 struct bnxt_qplib_q *sq = &qp->sq;
205 int rc = 0;
206
207 if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 sq->max_wqe * qp->sq_hdr_buf_size,
210 &qp->sq_hdr_buf_map, GFP_KERNEL);
211 if (!qp->sq_hdr_buf) {
212 rc = -ENOMEM;
213 dev_err(&res->pdev->dev,
214 "Failed to create sq_hdr_buf\n");
215 goto fail;
216 }
217 }
218
219 if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 rq->max_wqe *
222 qp->rq_hdr_buf_size,
223 &qp->rq_hdr_buf_map,
224 GFP_KERNEL);
225 if (!qp->rq_hdr_buf) {
226 rc = -ENOMEM;
227 dev_err(&res->pdev->dev,
228 "Failed to create rq_hdr_buf\n");
229 goto fail;
230 }
231 }
232 return 0;
233
234 fail:
235 bnxt_qplib_free_qp_hdr_buf(res, qp);
236 return rc;
237 }
238
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)239 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
240 {
241 struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 struct nq_base *nqe, **nq_ptr;
243 int budget = nq->budget;
244 uintptr_t q_handle;
245 u16 type;
246
247 spin_lock_bh(&hwq->lock);
248 /* Service the NQ until empty */
249 while (budget--) {
250 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
253 break;
254
255 /*
256 * The valid test of the entry must be done first before
257 * reading any further.
258 */
259 dma_rmb();
260
261 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 switch (type) {
263 case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 {
265 struct nq_cn *nqcne = (struct nq_cn *)nqe;
266
267 q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 << 32;
270 if ((unsigned long)cq == q_handle) {
271 nqcne->cq_handle_low = 0;
272 nqcne->cq_handle_high = 0;
273 cq->cnq_events++;
274 }
275 break;
276 }
277 default:
278 break;
279 }
280 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 1, &nq->nq_db.dbinfo.flags);
282 }
283 spin_unlock_bh(&hwq->lock);
284 }
285
286 /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287 * this CQ.
288 */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)289 static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290 {
291 u32 retry_cnt = 100;
292
293 while (retry_cnt--) {
294 if (cnq_events == cq->cnq_events)
295 return;
296 usleep_range(50, 100);
297 clean_nq(cq->nq, cq);
298 }
299 }
300
bnxt_qplib_service_nq(struct tasklet_struct * t)301 static void bnxt_qplib_service_nq(struct tasklet_struct *t)
302 {
303 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 struct bnxt_qplib_cq *cq;
306 int budget = nq->budget;
307 struct nq_base *nqe;
308 uintptr_t q_handle;
309 u32 hw_polled = 0;
310 u16 type;
311
312 spin_lock_bh(&hwq->lock);
313 /* Service the NQ until empty */
314 while (budget--) {
315 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
317 break;
318
319 /*
320 * The valid test of the entry must be done first before
321 * reading any further.
322 */
323 dma_rmb();
324
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 switch (type) {
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 {
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 struct bnxt_re_cq *cq_p;
331
332 q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 << 32;
335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336 if (!cq)
337 break;
338 cq->toggle = (le16_to_cpu(nqe->info10_type) &
339 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
340 cq->dbinfo.toggle = cq->toggle;
341 cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
342 if (cq_p->uctx_cq_page)
343 *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
344
345 bnxt_qplib_armen_db(&cq->dbinfo,
346 DBC_DBC_TYPE_CQ_ARMENA);
347 spin_lock_bh(&cq->compl_lock);
348 atomic_set(&cq->arm_state, 0);
349 if (nq->cqn_handler(nq, (cq)))
350 dev_warn(&nq->pdev->dev,
351 "cqn - type 0x%x not handled\n", type);
352 cq->cnq_events++;
353 spin_unlock_bh(&cq->compl_lock);
354 break;
355 }
356 case NQ_BASE_TYPE_SRQ_EVENT:
357 {
358 struct bnxt_qplib_srq *srq;
359 struct bnxt_re_srq *srq_p;
360 struct nq_srq_event *nqsrqe =
361 (struct nq_srq_event *)nqe;
362
363 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
364 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
365 << 32;
366 srq = (struct bnxt_qplib_srq *)q_handle;
367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
368 >> NQ_CN_TOGGLE_SFT;
369 srq->dbinfo.toggle = srq->toggle;
370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
371 if (srq_p->uctx_srq_page)
372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
373 bnxt_qplib_armen_db(&srq->dbinfo,
374 DBC_DBC_TYPE_SRQ_ARMENA);
375 if (nq->srqn_handler(nq,
376 (struct bnxt_qplib_srq *)q_handle,
377 nqsrqe->event))
378 dev_warn(&nq->pdev->dev,
379 "SRQ event 0x%x not handled\n",
380 nqsrqe->event);
381 break;
382 }
383 case NQ_BASE_TYPE_DBQ_EVENT:
384 break;
385 default:
386 dev_warn(&nq->pdev->dev,
387 "nqe with type = 0x%x not handled\n", type);
388 break;
389 }
390 hw_polled++;
391 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
392 1, &nq->nq_db.dbinfo.flags);
393 }
394 if (hw_polled)
395 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
396 spin_unlock_bh(&hwq->lock);
397 }
398
399 /* bnxt_re_synchronize_nq - self polling notification queue.
400 * @nq - notification queue pointer
401 *
402 * This function will start polling entries of a given notification queue
403 * for all pending entries.
404 * This function is useful to synchronize notification entries while resources
405 * are going away.
406 */
407
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)408 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
409 {
410 int budget = nq->budget;
411
412 nq->budget = nq->hwq.max_elements;
413 bnxt_qplib_service_nq(&nq->nq_tasklet);
414 nq->budget = budget;
415 }
416
bnxt_qplib_nq_irq(int irq,void * dev_instance)417 static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
418 {
419 struct bnxt_qplib_nq *nq = dev_instance;
420 struct bnxt_qplib_hwq *hwq = &nq->hwq;
421 u32 sw_cons;
422
423 /* Prefetch the NQ element */
424 sw_cons = HWQ_CMP(hwq->cons, hwq);
425 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
426
427 /* Fan out to CPU affinitized kthreads? */
428 tasklet_schedule(&nq->nq_tasklet);
429
430 return IRQ_HANDLED;
431 }
432
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)433 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
434 {
435 if (!nq->requested)
436 return;
437
438 nq->requested = false;
439 /* Mask h/w interrupt */
440 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
441 /* Sync with last running IRQ handler */
442 synchronize_irq(nq->msix_vec);
443 irq_set_affinity_hint(nq->msix_vec, NULL);
444 free_irq(nq->msix_vec, nq);
445 kfree(nq->name);
446 nq->name = NULL;
447
448 if (kill)
449 tasklet_kill(&nq->nq_tasklet);
450 tasklet_disable(&nq->nq_tasklet);
451 }
452
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)453 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
454 {
455 if (nq->cqn_wq) {
456 destroy_workqueue(nq->cqn_wq);
457 nq->cqn_wq = NULL;
458 }
459
460 /* Make sure the HW is stopped! */
461 bnxt_qplib_nq_stop_irq(nq, true);
462
463 if (nq->nq_db.reg.bar_reg) {
464 iounmap(nq->nq_db.reg.bar_reg);
465 nq->nq_db.reg.bar_reg = NULL;
466 }
467
468 nq->cqn_handler = NULL;
469 nq->srqn_handler = NULL;
470 nq->msix_vec = 0;
471 }
472
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)473 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
474 int msix_vector, bool need_init)
475 {
476 struct bnxt_qplib_res *res = nq->res;
477 int rc;
478
479 if (nq->requested)
480 return -EFAULT;
481
482 nq->msix_vec = msix_vector;
483 if (need_init)
484 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
485 else
486 tasklet_enable(&nq->nq_tasklet);
487
488 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
489 nq_indx, pci_name(res->pdev));
490 if (!nq->name)
491 return -ENOMEM;
492 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
493 if (rc) {
494 kfree(nq->name);
495 nq->name = NULL;
496 tasklet_disable(&nq->nq_tasklet);
497 return rc;
498 }
499
500 cpumask_clear(&nq->mask);
501 cpumask_set_cpu(nq_indx, &nq->mask);
502 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
503 if (rc) {
504 dev_warn(&nq->pdev->dev,
505 "set affinity failed; vector: %d nq_idx: %d\n",
506 nq->msix_vec, nq_indx);
507 }
508 nq->requested = true;
509 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
510
511 return rc;
512 }
513
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)514 static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
515 {
516 resource_size_t reg_base;
517 struct bnxt_qplib_nq_db *nq_db;
518 struct pci_dev *pdev;
519
520 pdev = nq->pdev;
521 nq_db = &nq->nq_db;
522
523 nq_db->dbinfo.flags = 0;
524 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
525 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
526 if (!nq_db->reg.bar_base) {
527 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
528 nq_db->reg.bar_id);
529 return -ENOMEM;
530 }
531
532 reg_base = nq_db->reg.bar_base + reg_offt;
533 /* Unconditionally map 8 bytes to support 57500 series */
534 nq_db->reg.len = 8;
535 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
536 if (!nq_db->reg.bar_reg) {
537 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
538 nq_db->reg.bar_id);
539 return -ENOMEM;
540 }
541
542 nq_db->dbinfo.db = nq_db->reg.bar_reg;
543 nq_db->dbinfo.hwq = &nq->hwq;
544 nq_db->dbinfo.xid = nq->ring_id;
545
546 return 0;
547 }
548
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
550 int nq_idx, int msix_vector, int bar_reg_offset,
551 cqn_handler_t cqn_handler,
552 srqn_handler_t srqn_handler)
553 {
554 int rc;
555
556 nq->pdev = pdev;
557 nq->cqn_handler = cqn_handler;
558 nq->srqn_handler = srqn_handler;
559 nq->load = 0;
560
561 /* Have a task to schedule CQ notifiers in post send case */
562 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
563 if (!nq->cqn_wq)
564 return -ENOMEM;
565
566 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
567 if (rc)
568 goto fail;
569
570 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
571 if (rc) {
572 dev_err(&nq->pdev->dev,
573 "Failed to request irq for nq-idx %d\n", nq_idx);
574 goto fail;
575 }
576
577 return 0;
578 fail:
579 bnxt_qplib_disable_nq(nq);
580 return rc;
581 }
582
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)583 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
584 {
585 if (nq->hwq.max_elements) {
586 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
587 nq->hwq.max_elements = 0;
588 }
589 }
590
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)591 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
592 {
593 struct bnxt_qplib_hwq_attr hwq_attr = {};
594 struct bnxt_qplib_sg_info sginfo = {};
595
596 nq->pdev = res->pdev;
597 nq->res = res;
598 if (!nq->hwq.max_elements ||
599 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
600 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
601
602 sginfo.pgsize = PAGE_SIZE;
603 sginfo.pgshft = PAGE_SHIFT;
604 hwq_attr.res = res;
605 hwq_attr.sginfo = &sginfo;
606 hwq_attr.depth = nq->hwq.max_elements;
607 hwq_attr.stride = sizeof(struct nq_base);
608 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
609 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
610 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
611 return -ENOMEM;
612 }
613 nq->budget = 8;
614 return 0;
615 }
616
617 /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)618 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
619 struct bnxt_qplib_srq *srq)
620 {
621 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
622 struct creq_destroy_srq_resp resp = {};
623 struct bnxt_qplib_cmdqmsg msg = {};
624 struct cmdq_destroy_srq req = {};
625 int rc;
626
627 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
628 CMDQ_BASE_OPCODE_DESTROY_SRQ,
629 sizeof(req));
630
631 /* Configure the request */
632 req.srq_cid = cpu_to_le32(srq->id);
633
634 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
635 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
636 kfree(srq->swq);
637 if (rc)
638 return;
639 bnxt_qplib_free_hwq(res, &srq->hwq);
640 }
641
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)642 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
643 struct bnxt_qplib_srq *srq)
644 {
645 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
646 struct bnxt_qplib_hwq_attr hwq_attr = {};
647 struct creq_create_srq_resp resp = {};
648 struct bnxt_qplib_cmdqmsg msg = {};
649 struct cmdq_create_srq req = {};
650 struct bnxt_qplib_pbl *pbl;
651 u16 pg_sz_lvl;
652 int rc, idx;
653
654 hwq_attr.res = res;
655 hwq_attr.sginfo = &srq->sg_info;
656 hwq_attr.depth = srq->max_wqe;
657 hwq_attr.stride = srq->wqe_size;
658 hwq_attr.type = HWQ_TYPE_QUEUE;
659 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
660 if (rc)
661 return rc;
662 srq->dbinfo.flags = 0;
663 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
664 CMDQ_BASE_OPCODE_CREATE_SRQ,
665 sizeof(req));
666
667 /* Configure the request */
668 req.dpi = cpu_to_le32(srq->dpi->dpi);
669 req.srq_handle = cpu_to_le64((uintptr_t)srq);
670
671 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
672 pbl = &srq->hwq.pbl[PBL_LVL_0];
673 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
674 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
675 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
676 CMDQ_CREATE_SRQ_LVL_SFT;
677 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
678 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
679 req.pd_id = cpu_to_le32(srq->pd->id);
680 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
681
682 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
683 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
684 if (rc)
685 goto fail;
686
687 spin_lock_init(&srq->lock);
688 srq->start_idx = 0;
689 srq->last_idx = srq->hwq.max_elements - 1;
690 if (!srq->hwq.is_user) {
691 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
692 GFP_KERNEL);
693 if (!srq->swq) {
694 rc = -ENOMEM;
695 goto fail;
696 }
697 for (idx = 0; idx < srq->hwq.max_elements; idx++)
698 srq->swq[idx].next_idx = idx + 1;
699 srq->swq[srq->last_idx].next_idx = -1;
700 }
701
702 srq->id = le32_to_cpu(resp.xid);
703 srq->dbinfo.hwq = &srq->hwq;
704 srq->dbinfo.xid = srq->id;
705 srq->dbinfo.db = srq->dpi->dbr;
706 srq->dbinfo.max_slot = 1;
707 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
708 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
709
710 return 0;
711 fail:
712 bnxt_qplib_free_hwq(res, &srq->hwq);
713 kfree(srq->swq);
714
715 return rc;
716 }
717
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)718 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
719 struct bnxt_qplib_srq *srq)
720 {
721 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
722 struct creq_query_srq_resp resp = {};
723 struct bnxt_qplib_cmdqmsg msg = {};
724 struct bnxt_qplib_rcfw_sbuf sbuf;
725 struct creq_query_srq_resp_sb *sb;
726 struct cmdq_query_srq req = {};
727 int rc;
728
729 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
730 CMDQ_BASE_OPCODE_QUERY_SRQ,
731 sizeof(req));
732
733 /* Configure the request */
734 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
735 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
736 &sbuf.dma_addr, GFP_KERNEL);
737 if (!sbuf.sb)
738 return -ENOMEM;
739 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
740 req.srq_cid = cpu_to_le32(srq->id);
741 sb = sbuf.sb;
742 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
743 sizeof(resp), 0);
744 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
745 if (!rc)
746 srq->threshold = le16_to_cpu(sb->srq_limit);
747 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
748 sbuf.sb, sbuf.dma_addr);
749
750 return rc;
751 }
752
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)753 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
754 struct bnxt_qplib_swqe *wqe)
755 {
756 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
757 struct rq_wqe *srqe;
758 struct sq_sge *hw_sge;
759 int i, next;
760
761 spin_lock(&srq_hwq->lock);
762 if (srq->start_idx == srq->last_idx) {
763 dev_err(&srq_hwq->pdev->dev,
764 "FP: SRQ (0x%x) is full!\n", srq->id);
765 spin_unlock(&srq_hwq->lock);
766 return -EINVAL;
767 }
768 next = srq->start_idx;
769 srq->start_idx = srq->swq[next].next_idx;
770 spin_unlock(&srq_hwq->lock);
771
772 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
773 memset(srqe, 0, srq->wqe_size);
774 /* Calculate wqe_size16 and data_len */
775 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
776 i < wqe->num_sge; i++, hw_sge++) {
777 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
778 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
779 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
780 }
781 srqe->wqe_type = wqe->type;
782 srqe->flags = wqe->flags;
783 srqe->wqe_size = wqe->num_sge +
784 ((offsetof(typeof(*srqe), data) + 15) >> 4);
785 srqe->wr_id[0] = cpu_to_le32((u32)next);
786 srq->swq[next].wr_id = wqe->wr_id;
787
788 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
789
790 /* Ring DB */
791 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
792
793 return 0;
794 }
795
796 /* QP */
797
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)798 static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
799 {
800 int indx;
801
802 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
803 if (!que->swq)
804 return -ENOMEM;
805
806 que->swq_start = 0;
807 que->swq_last = que->max_sw_wqe - 1;
808 for (indx = 0; indx < que->max_sw_wqe; indx++)
809 que->swq[indx].next_idx = indx + 1;
810 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
811 que->swq_last = 0;
812
813 return 0;
814 }
815
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)816 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
817 {
818 struct bnxt_qplib_hwq_attr hwq_attr = {};
819 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
820 struct creq_create_qp1_resp resp = {};
821 struct bnxt_qplib_cmdqmsg msg = {};
822 struct bnxt_qplib_q *sq = &qp->sq;
823 struct bnxt_qplib_q *rq = &qp->rq;
824 struct cmdq_create_qp1 req = {};
825 struct bnxt_qplib_pbl *pbl;
826 u32 qp_flags = 0;
827 u8 pg_sz_lvl;
828 u32 tbl_indx;
829 int rc;
830
831 sq->dbinfo.flags = 0;
832 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
833 CMDQ_BASE_OPCODE_CREATE_QP1,
834 sizeof(req));
835 /* General */
836 req.type = qp->type;
837 req.dpi = cpu_to_le32(qp->dpi->dpi);
838 req.qp_handle = cpu_to_le64(qp->qp_handle);
839
840 /* SQ */
841 hwq_attr.res = res;
842 hwq_attr.sginfo = &sq->sg_info;
843 hwq_attr.stride = sizeof(struct sq_sge);
844 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
845 hwq_attr.type = HWQ_TYPE_QUEUE;
846 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
847 if (rc)
848 return rc;
849
850 rc = bnxt_qplib_alloc_init_swq(sq);
851 if (rc)
852 goto fail_sq;
853
854 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
855 pbl = &sq->hwq.pbl[PBL_LVL_0];
856 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
857 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
858 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
859 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
860 req.sq_pg_size_sq_lvl = pg_sz_lvl;
861 req.sq_fwo_sq_sge =
862 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
863 CMDQ_CREATE_QP1_SQ_SGE_SFT);
864 req.scq_cid = cpu_to_le32(qp->scq->id);
865
866 /* RQ */
867 if (rq->max_wqe) {
868 rq->dbinfo.flags = 0;
869 hwq_attr.res = res;
870 hwq_attr.sginfo = &rq->sg_info;
871 hwq_attr.stride = sizeof(struct sq_sge);
872 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
873 hwq_attr.type = HWQ_TYPE_QUEUE;
874 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
875 if (rc)
876 goto sq_swq;
877 rc = bnxt_qplib_alloc_init_swq(rq);
878 if (rc)
879 goto fail_rq;
880 req.rq_size = cpu_to_le32(rq->max_wqe);
881 pbl = &rq->hwq.pbl[PBL_LVL_0];
882 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
883 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
884 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
885 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
886 req.rq_pg_size_rq_lvl = pg_sz_lvl;
887 req.rq_fwo_rq_sge =
888 cpu_to_le16((rq->max_sge &
889 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
890 CMDQ_CREATE_QP1_RQ_SGE_SFT);
891 }
892 req.rcq_cid = cpu_to_le32(qp->rcq->id);
893 /* Header buffer - allow hdr_buf pass in */
894 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
895 if (rc) {
896 rc = -ENOMEM;
897 goto rq_rwq;
898 }
899 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
900 req.qp_flags = cpu_to_le32(qp_flags);
901 req.pd_id = cpu_to_le32(qp->pd->id);
902
903 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
904 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
905 if (rc)
906 goto fail;
907
908 qp->id = le32_to_cpu(resp.xid);
909 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
910 qp->cctx = res->cctx;
911 sq->dbinfo.hwq = &sq->hwq;
912 sq->dbinfo.xid = qp->id;
913 sq->dbinfo.db = qp->dpi->dbr;
914 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
915 if (rq->max_wqe) {
916 rq->dbinfo.hwq = &rq->hwq;
917 rq->dbinfo.xid = qp->id;
918 rq->dbinfo.db = qp->dpi->dbr;
919 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
920 }
921 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
922 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
923 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
924
925 return 0;
926
927 fail:
928 bnxt_qplib_free_qp_hdr_buf(res, qp);
929 rq_rwq:
930 kfree(rq->swq);
931 fail_rq:
932 bnxt_qplib_free_hwq(res, &rq->hwq);
933 sq_swq:
934 kfree(sq->swq);
935 fail_sq:
936 bnxt_qplib_free_hwq(res, &sq->hwq);
937 return rc;
938 }
939
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)940 static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
941 {
942 struct bnxt_qplib_hwq *hwq;
943 struct bnxt_qplib_q *sq;
944 u64 fpsne, psn_pg;
945 u16 indx_pad = 0;
946
947 sq = &qp->sq;
948 hwq = &sq->hwq;
949 /* First psn entry */
950 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
951 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
952 indx_pad = (fpsne & ~PAGE_MASK) / size;
953 hwq->pad_pgofft = indx_pad;
954 hwq->pad_pg = (u64 *)psn_pg;
955 hwq->pad_stride = size;
956 }
957
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)958 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
959 {
960 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
961 struct bnxt_qplib_hwq_attr hwq_attr = {};
962 struct bnxt_qplib_sg_info sginfo = {};
963 struct creq_create_qp_resp resp = {};
964 struct bnxt_qplib_cmdqmsg msg = {};
965 struct bnxt_qplib_q *sq = &qp->sq;
966 struct bnxt_qplib_q *rq = &qp->rq;
967 struct cmdq_create_qp req = {};
968 int rc, req_size, psn_sz = 0;
969 struct bnxt_qplib_hwq *xrrq;
970 struct bnxt_qplib_pbl *pbl;
971 u32 qp_flags = 0;
972 u8 pg_sz_lvl;
973 u32 tbl_indx;
974 u16 nsge;
975
976 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
977 sq->dbinfo.flags = 0;
978 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
979 CMDQ_BASE_OPCODE_CREATE_QP,
980 sizeof(req));
981
982 /* General */
983 req.type = qp->type;
984 req.dpi = cpu_to_le32(qp->dpi->dpi);
985 req.qp_handle = cpu_to_le64(qp->qp_handle);
986
987 /* SQ */
988 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
989 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
990 sizeof(struct sq_psn_search_ext) :
991 sizeof(struct sq_psn_search);
992
993 if (qp->is_host_msn_tbl) {
994 psn_sz = sizeof(struct sq_msn_search);
995 qp->msn = 0;
996 }
997 }
998
999 hwq_attr.res = res;
1000 hwq_attr.sginfo = &sq->sg_info;
1001 hwq_attr.stride = sizeof(struct sq_sge);
1002 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1003 hwq_attr.aux_stride = psn_sz;
1004 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1005 : 0;
1006 /* Update msn tbl size */
1007 if (qp->is_host_msn_tbl && psn_sz) {
1008 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1009 hwq_attr.aux_depth =
1010 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1011 else
1012 hwq_attr.aux_depth =
1013 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1014 qp->msn_tbl_sz = hwq_attr.aux_depth;
1015 qp->msn = 0;
1016 }
1017
1018 hwq_attr.type = HWQ_TYPE_QUEUE;
1019 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1020 if (rc)
1021 return rc;
1022
1023 if (!sq->hwq.is_user) {
1024 rc = bnxt_qplib_alloc_init_swq(sq);
1025 if (rc)
1026 goto fail_sq;
1027
1028 if (psn_sz)
1029 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1030 }
1031 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1032 pbl = &sq->hwq.pbl[PBL_LVL_0];
1033 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1034 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1035 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1036 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1037 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1038 req.sq_fwo_sq_sge =
1039 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1040 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1041 req.scq_cid = cpu_to_le32(qp->scq->id);
1042
1043 /* RQ */
1044 if (!qp->srq) {
1045 rq->dbinfo.flags = 0;
1046 hwq_attr.res = res;
1047 hwq_attr.sginfo = &rq->sg_info;
1048 hwq_attr.stride = sizeof(struct sq_sge);
1049 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1050 hwq_attr.aux_stride = 0;
1051 hwq_attr.aux_depth = 0;
1052 hwq_attr.type = HWQ_TYPE_QUEUE;
1053 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1054 if (rc)
1055 goto sq_swq;
1056 if (!rq->hwq.is_user) {
1057 rc = bnxt_qplib_alloc_init_swq(rq);
1058 if (rc)
1059 goto fail_rq;
1060 }
1061
1062 req.rq_size = cpu_to_le32(rq->max_wqe);
1063 pbl = &rq->hwq.pbl[PBL_LVL_0];
1064 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1065 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1066 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1067 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1068 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1069 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1070 6 : rq->max_sge;
1071 req.rq_fwo_rq_sge =
1072 cpu_to_le16(((nsge &
1073 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1074 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1075 } else {
1076 /* SRQ */
1077 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1078 req.srq_cid = cpu_to_le32(qp->srq->id);
1079 }
1080 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1081
1082 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1083 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1084 if (qp->sig_type)
1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1086 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1087 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1088 if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
1089 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1090
1091 req.qp_flags = cpu_to_le32(qp_flags);
1092
1093 /* ORRQ and IRRQ */
1094 if (psn_sz) {
1095 xrrq = &qp->orrq;
1096 xrrq->max_elements =
1097 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1098 req_size = xrrq->max_elements *
1099 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1100 req_size &= ~(PAGE_SIZE - 1);
1101 sginfo.pgsize = req_size;
1102 sginfo.pgshft = PAGE_SHIFT;
1103
1104 hwq_attr.res = res;
1105 hwq_attr.sginfo = &sginfo;
1106 hwq_attr.depth = xrrq->max_elements;
1107 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1108 hwq_attr.aux_stride = 0;
1109 hwq_attr.aux_depth = 0;
1110 hwq_attr.type = HWQ_TYPE_CTX;
1111 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1112 if (rc)
1113 goto rq_swq;
1114 pbl = &xrrq->pbl[PBL_LVL_0];
1115 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1116
1117 xrrq = &qp->irrq;
1118 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1119 qp->max_dest_rd_atomic);
1120 req_size = xrrq->max_elements *
1121 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1122 req_size &= ~(PAGE_SIZE - 1);
1123 sginfo.pgsize = req_size;
1124 hwq_attr.depth = xrrq->max_elements;
1125 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1126 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1127 if (rc)
1128 goto fail_orrq;
1129
1130 pbl = &xrrq->pbl[PBL_LVL_0];
1131 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1132 }
1133 req.pd_id = cpu_to_le32(qp->pd->id);
1134
1135 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1136 sizeof(resp), 0);
1137 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1138 if (rc)
1139 goto fail;
1140
1141 qp->id = le32_to_cpu(resp.xid);
1142 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1143 INIT_LIST_HEAD(&qp->sq_flush);
1144 INIT_LIST_HEAD(&qp->rq_flush);
1145 qp->cctx = res->cctx;
1146 sq->dbinfo.hwq = &sq->hwq;
1147 sq->dbinfo.xid = qp->id;
1148 sq->dbinfo.db = qp->dpi->dbr;
1149 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1150 if (rq->max_wqe) {
1151 rq->dbinfo.hwq = &rq->hwq;
1152 rq->dbinfo.xid = qp->id;
1153 rq->dbinfo.db = qp->dpi->dbr;
1154 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1155 }
1156 spin_lock_bh(&rcfw->tbl_lock);
1157 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1158 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1159 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1160 spin_unlock_bh(&rcfw->tbl_lock);
1161
1162 return 0;
1163 fail:
1164 bnxt_qplib_free_hwq(res, &qp->irrq);
1165 fail_orrq:
1166 bnxt_qplib_free_hwq(res, &qp->orrq);
1167 rq_swq:
1168 kfree(rq->swq);
1169 fail_rq:
1170 bnxt_qplib_free_hwq(res, &rq->hwq);
1171 sq_swq:
1172 kfree(sq->swq);
1173 fail_sq:
1174 bnxt_qplib_free_hwq(res, &sq->hwq);
1175 return rc;
1176 }
1177
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1178 static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1179 {
1180 switch (qp->state) {
1181 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1182 /* INIT->RTR, configure the path_mtu to the default
1183 * 2048 if not being requested
1184 */
1185 if (!(qp->modify_flags &
1186 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1187 qp->modify_flags |=
1188 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1189 qp->path_mtu =
1190 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1191 }
1192 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1193 if (qp->max_dest_rd_atomic < 1)
1194 qp->max_dest_rd_atomic = 1;
1195 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1196 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1197 if (!(qp->modify_flags &
1198 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1199 qp->modify_flags |=
1200 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1201 qp->ah.sgid_index = 0;
1202 }
1203 break;
1204 default:
1205 break;
1206 }
1207 }
1208
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1209 static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1210 {
1211 switch (qp->state) {
1212 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1213 /* Bono FW requires the max_rd_atomic to be >= 1 */
1214 if (qp->max_rd_atomic < 1)
1215 qp->max_rd_atomic = 1;
1216 /* Bono FW does not allow PKEY_INDEX,
1217 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1218 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1219 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1220 * modification
1221 */
1222 qp->modify_flags &=
1223 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1224 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1225 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1226 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1227 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1231 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1234 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1235 break;
1236 default:
1237 break;
1238 }
1239 }
1240
__filter_modify_flags(struct bnxt_qplib_qp * qp)1241 static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1242 {
1243 switch (qp->cur_qp_state) {
1244 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1245 break;
1246 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1247 __modify_flags_from_init_state(qp);
1248 break;
1249 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1250 __modify_flags_from_rtr_state(qp);
1251 break;
1252 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1253 break;
1254 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1255 break;
1256 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1257 break;
1258 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1259 break;
1260 default:
1261 break;
1262 }
1263 }
1264
bnxt_set_mandatory_attributes(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp,struct cmdq_modify_qp * req)1265 static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
1266 struct bnxt_qplib_qp *qp,
1267 struct cmdq_modify_qp *req)
1268 {
1269 u32 mandatory_flags = 0;
1270
1271 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1272 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1273
1274 if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1275 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
1276 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
1277 req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
1278 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1279 }
1280
1281 if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
1282 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1283 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
1284 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1285 mandatory_flags |=
1286 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1287 }
1288
1289 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
1290 qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
1291 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1292
1293 qp->modify_flags |= mandatory_flags;
1294 req->qp_type = qp->type;
1295 }
1296
is_optimized_state_transition(struct bnxt_qplib_qp * qp)1297 static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
1298 {
1299 if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1300 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
1301 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1302 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
1303 return true;
1304
1305 return false;
1306 }
1307
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1308 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1309 {
1310 struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
1311 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1312 struct creq_modify_qp_resp resp = {};
1313 struct bnxt_qplib_cmdqmsg msg = {};
1314 struct cmdq_modify_qp req = {};
1315 u16 vlan_pcp_vlan_dei_vlan_id;
1316 u32 temp32[4];
1317 u32 bmask;
1318 int rc;
1319
1320 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1321 CMDQ_BASE_OPCODE_MODIFY_QP,
1322 sizeof(req));
1323
1324 /* Filter out the qp_attr_mask based on the state->new transition */
1325 __filter_modify_flags(qp);
1326 if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1327 /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
1328 if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
1329 is_optimized_state_transition(qp))
1330 bnxt_set_mandatory_attributes(res, qp, &req);
1331 }
1332 bmask = qp->modify_flags;
1333 req.modify_mask = cpu_to_le32(qp->modify_flags);
1334 req.qp_cid = cpu_to_le32(qp->id);
1335 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1336 req.network_type_en_sqd_async_notify_new_state =
1337 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1338 (qp->en_sqd_async_notify ?
1339 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1340 }
1341 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1342
1343 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1344 req.access = qp->access;
1345
1346 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1347 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1348
1349 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1350 req.qkey = cpu_to_le32(qp->qkey);
1351
1352 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1353 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1354 req.dgid[0] = cpu_to_le32(temp32[0]);
1355 req.dgid[1] = cpu_to_le32(temp32[1]);
1356 req.dgid[2] = cpu_to_le32(temp32[2]);
1357 req.dgid[3] = cpu_to_le32(temp32[3]);
1358 }
1359 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1360 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1361
1362 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
1363 if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
1364 req.sgid_index =
1365 cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
1366 else
1367 req.sgid_index =
1368 cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
1369 }
1370
1371 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1372 req.hop_limit = qp->ah.hop_limit;
1373
1374 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1375 req.traffic_class = qp->ah.traffic_class;
1376
1377 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1378 memcpy(req.dest_mac, qp->ah.dmac, 6);
1379
1380 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1381 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1382
1383 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1384 req.timeout = qp->timeout;
1385
1386 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1387 req.retry_cnt = qp->retry_cnt;
1388
1389 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1390 req.rnr_retry = qp->rnr_retry;
1391
1392 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1393 req.min_rnr_timer = qp->min_rnr_timer;
1394
1395 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1396 req.rq_psn = cpu_to_le32(qp->rq.psn);
1397
1398 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1399 req.sq_psn = cpu_to_le32(qp->sq.psn);
1400
1401 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1402 req.max_rd_atomic =
1403 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1404
1405 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1406 req.max_dest_rd_atomic =
1407 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1408
1409 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1410 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1411 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1412 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1413 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1414 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1415 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1416
1417 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
1418 vlan_pcp_vlan_dei_vlan_id =
1419 ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
1420 CMDQ_MODIFY_QP_VLAN_ID_SFT) &
1421 CMDQ_MODIFY_QP_VLAN_ID_MASK);
1422 vlan_pcp_vlan_dei_vlan_id |=
1423 ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
1424 CMDQ_MODIFY_QP_VLAN_PCP_MASK);
1425 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
1426 }
1427
1428 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1429 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1430 if (rc)
1431 return rc;
1432 qp->cur_qp_state = qp->state;
1433 return 0;
1434 }
1435
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1436 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1437 {
1438 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1439 struct creq_query_qp_resp resp = {};
1440 struct bnxt_qplib_cmdqmsg msg = {};
1441 struct bnxt_qplib_rcfw_sbuf sbuf;
1442 struct creq_query_qp_resp_sb *sb;
1443 struct cmdq_query_qp req = {};
1444 u32 temp32[4];
1445 int i, rc;
1446
1447 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1448 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1449 &sbuf.dma_addr, GFP_KERNEL);
1450 if (!sbuf.sb)
1451 return -ENOMEM;
1452 sb = sbuf.sb;
1453
1454 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1455 CMDQ_BASE_OPCODE_QUERY_QP,
1456 sizeof(req));
1457
1458 req.qp_cid = cpu_to_le32(qp->id);
1459 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1460 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1461 sizeof(resp), 0);
1462 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1463 if (rc)
1464 goto bail;
1465 /* Extract the context from the side buffer */
1466 qp->state = sb->en_sqd_async_notify_state &
1467 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1468 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1469 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1470 qp->access = sb->access;
1471 qp->pkey_index = le16_to_cpu(sb->pkey);
1472 qp->qkey = le32_to_cpu(sb->qkey);
1473 qp->udp_sport = le16_to_cpu(sb->udp_src_port);
1474
1475 temp32[0] = le32_to_cpu(sb->dgid[0]);
1476 temp32[1] = le32_to_cpu(sb->dgid[1]);
1477 temp32[2] = le32_to_cpu(sb->dgid[2]);
1478 temp32[3] = le32_to_cpu(sb->dgid[3]);
1479 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1480
1481 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1482
1483 qp->ah.sgid_index = 0;
1484 for (i = 0; i < res->sgid_tbl.max; i++) {
1485 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1486 qp->ah.sgid_index = i;
1487 break;
1488 }
1489 }
1490 if (i == res->sgid_tbl.max)
1491 dev_warn(&res->pdev->dev, "SGID not found??\n");
1492
1493 qp->ah.hop_limit = sb->hop_limit;
1494 qp->ah.traffic_class = sb->traffic_class;
1495 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1496 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1497 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1498 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1499 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1500 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1501 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1502 qp->timeout = sb->timeout;
1503 qp->retry_cnt = sb->retry_cnt;
1504 qp->rnr_retry = sb->rnr_retry;
1505 qp->min_rnr_timer = sb->min_rnr_timer;
1506 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1507 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1508 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1509 qp->max_dest_rd_atomic =
1510 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1511 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1512 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1513 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1514 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1515 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1516 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1517 memcpy(qp->smac, sb->src_mac, 6);
1518 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1519 qp->port_id = le16_to_cpu(sb->port_id);
1520 bail:
1521 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1522 sbuf.sb, sbuf.dma_addr);
1523 return rc;
1524 }
1525
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1526 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1527 {
1528 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1529 u32 peek_flags, peek_cons;
1530 struct cq_base *hw_cqe;
1531 int i;
1532
1533 peek_flags = cq->dbinfo.flags;
1534 peek_cons = cq_hwq->cons;
1535 for (i = 0; i < cq_hwq->max_elements; i++) {
1536 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1537 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1538 continue;
1539 /*
1540 * The valid test of the entry must be done first before
1541 * reading any further.
1542 */
1543 dma_rmb();
1544 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1545 case CQ_BASE_CQE_TYPE_REQ:
1546 case CQ_BASE_CQE_TYPE_TERMINAL:
1547 {
1548 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1549
1550 if (qp == le64_to_cpu(cqe->qp_handle))
1551 cqe->qp_handle = 0;
1552 break;
1553 }
1554 case CQ_BASE_CQE_TYPE_RES_RC:
1555 case CQ_BASE_CQE_TYPE_RES_UD:
1556 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1557 {
1558 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1559
1560 if (qp == le64_to_cpu(cqe->qp_handle))
1561 cqe->qp_handle = 0;
1562 break;
1563 }
1564 default:
1565 break;
1566 }
1567 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1568 1, &peek_flags);
1569 }
1570 }
1571
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1572 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1573 struct bnxt_qplib_qp *qp)
1574 {
1575 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1576 struct creq_destroy_qp_resp resp = {};
1577 struct bnxt_qplib_cmdqmsg msg = {};
1578 struct cmdq_destroy_qp req = {};
1579 u32 tbl_indx;
1580 int rc;
1581
1582 spin_lock_bh(&rcfw->tbl_lock);
1583 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1584 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1585 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1586 spin_unlock_bh(&rcfw->tbl_lock);
1587
1588 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1589 CMDQ_BASE_OPCODE_DESTROY_QP,
1590 sizeof(req));
1591
1592 req.qp_cid = cpu_to_le32(qp->id);
1593 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1594 sizeof(resp), 0);
1595 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1596 if (rc) {
1597 spin_lock_bh(&rcfw->tbl_lock);
1598 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1599 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1600 spin_unlock_bh(&rcfw->tbl_lock);
1601 return rc;
1602 }
1603
1604 return 0;
1605 }
1606
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1607 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1608 struct bnxt_qplib_qp *qp)
1609 {
1610 bnxt_qplib_free_qp_hdr_buf(res, qp);
1611 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1612 kfree(qp->sq.swq);
1613
1614 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1615 kfree(qp->rq.swq);
1616
1617 if (qp->irrq.max_elements)
1618 bnxt_qplib_free_hwq(res, &qp->irrq);
1619 if (qp->orrq.max_elements)
1620 bnxt_qplib_free_hwq(res, &qp->orrq);
1621
1622 }
1623
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1624 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1625 struct bnxt_qplib_sge *sge)
1626 {
1627 struct bnxt_qplib_q *sq = &qp->sq;
1628 u32 sw_prod;
1629
1630 memset(sge, 0, sizeof(*sge));
1631
1632 if (qp->sq_hdr_buf) {
1633 sw_prod = sq->swq_start;
1634 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1635 sw_prod * qp->sq_hdr_buf_size);
1636 sge->lkey = 0xFFFFFFFF;
1637 sge->size = qp->sq_hdr_buf_size;
1638 return qp->sq_hdr_buf + sw_prod * sge->size;
1639 }
1640 return NULL;
1641 }
1642
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1643 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1644 {
1645 struct bnxt_qplib_q *rq = &qp->rq;
1646
1647 return rq->swq_start;
1648 }
1649
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1650 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1651 {
1652 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1653 }
1654
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1655 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1656 struct bnxt_qplib_sge *sge)
1657 {
1658 struct bnxt_qplib_q *rq = &qp->rq;
1659 u32 sw_prod;
1660
1661 memset(sge, 0, sizeof(*sge));
1662
1663 if (qp->rq_hdr_buf) {
1664 sw_prod = rq->swq_start;
1665 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1666 sw_prod * qp->rq_hdr_buf_size);
1667 sge->lkey = 0xFFFFFFFF;
1668 sge->size = qp->rq_hdr_buf_size;
1669 return qp->rq_hdr_buf + sw_prod * sge->size;
1670 }
1671 return NULL;
1672 }
1673
1674 /* Fil the MSN table into the next psn row */
bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1675 static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1676 struct bnxt_qplib_swqe *wqe,
1677 struct bnxt_qplib_swq *swq)
1678 {
1679 struct sq_msn_search *msns;
1680 u32 start_psn, next_psn;
1681 u16 start_idx;
1682
1683 msns = (struct sq_msn_search *)swq->psn_search;
1684 msns->start_idx_next_psn_start_psn = 0;
1685
1686 start_psn = swq->start_psn;
1687 next_psn = swq->next_psn;
1688 start_idx = swq->slot_idx;
1689 msns->start_idx_next_psn_start_psn |=
1690 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1691 qp->msn++;
1692 qp->msn %= qp->msn_tbl_sz;
1693 }
1694
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1695 static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1696 struct bnxt_qplib_swqe *wqe,
1697 struct bnxt_qplib_swq *swq)
1698 {
1699 struct sq_psn_search_ext *psns_ext;
1700 struct sq_psn_search *psns;
1701 u32 flg_npsn;
1702 u32 op_spsn;
1703
1704 if (!swq->psn_search)
1705 return;
1706 /* Handle MSN differently on cap flags */
1707 if (qp->is_host_msn_tbl) {
1708 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1709 return;
1710 }
1711 psns = (struct sq_psn_search *)swq->psn_search;
1712 psns = swq->psn_search;
1713 psns_ext = swq->psn_ext;
1714
1715 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1716 SQ_PSN_SEARCH_START_PSN_MASK);
1717 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1718 SQ_PSN_SEARCH_OPCODE_MASK);
1719 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1720 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1721
1722 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1723 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1724 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1725 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1726 } else {
1727 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1728 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1729 }
1730 }
1731
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u32 * idx)1732 static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1733 struct bnxt_qplib_swqe *wqe,
1734 u32 *idx)
1735 {
1736 struct bnxt_qplib_hwq *hwq;
1737 int len, t_len, offt;
1738 bool pull_dst = true;
1739 void *il_dst = NULL;
1740 void *il_src = NULL;
1741 int t_cplen, cplen;
1742 int indx;
1743
1744 hwq = &qp->sq.hwq;
1745 t_len = 0;
1746 for (indx = 0; indx < wqe->num_sge; indx++) {
1747 len = wqe->sg_list[indx].size;
1748 il_src = (void *)wqe->sg_list[indx].addr;
1749 t_len += len;
1750 if (t_len > qp->max_inline_data)
1751 return BNXT_RE_INVAL_MSG_SIZE;
1752 while (len) {
1753 if (pull_dst) {
1754 pull_dst = false;
1755 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1756 (*idx)++;
1757 t_cplen = 0;
1758 offt = 0;
1759 }
1760 cplen = min_t(int, len, sizeof(struct sq_sge));
1761 cplen = min_t(int, cplen,
1762 (sizeof(struct sq_sge) - offt));
1763 memcpy(il_dst, il_src, cplen);
1764 t_cplen += cplen;
1765 il_src += cplen;
1766 il_dst += cplen;
1767 offt += cplen;
1768 len -= cplen;
1769 if (t_cplen == sizeof(struct sq_sge))
1770 pull_dst = true;
1771 }
1772 }
1773
1774 return t_len;
1775 }
1776
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u32 nsge,u32 * idx)1777 static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1778 struct bnxt_qplib_sge *ssge,
1779 u32 nsge, u32 *idx)
1780 {
1781 struct sq_sge *dsge;
1782 int indx, len = 0;
1783
1784 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1785 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1786 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1787 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1788 dsge->size = cpu_to_le32(ssge[indx].size);
1789 len += ssge[indx].size;
1790 }
1791
1792 return len;
1793 }
1794
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1795 static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1796 struct bnxt_qplib_swqe *wqe,
1797 u16 *wqe_sz, u16 *qdf, u8 mode)
1798 {
1799 u32 ilsize, bytes;
1800 u16 nsge;
1801 u16 slot;
1802
1803 nsge = wqe->num_sge;
1804 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1805 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1806 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1807 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1808 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1809 bytes += sizeof(struct sq_send_hdr);
1810 }
1811
1812 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1813 slot = bytes >> 4;
1814 *wqe_sz = slot;
1815 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1816 slot = 8;
1817 return slot;
1818 }
1819
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp * qp,struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq,bool hw_retx)1820 static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1821 struct bnxt_qplib_swq *swq, bool hw_retx)
1822 {
1823 struct bnxt_qplib_hwq *hwq;
1824 u32 pg_num, pg_indx;
1825 void *buff;
1826 u32 tail;
1827
1828 hwq = &sq->hwq;
1829 if (!hwq->pad_pg)
1830 return;
1831 tail = swq->slot_idx / sq->dbinfo.max_slot;
1832 if (hw_retx) {
1833 /* For HW retx use qp msn index */
1834 tail = qp->msn;
1835 tail %= qp->msn_tbl_sz;
1836 }
1837 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1838 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1839 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1840 swq->psn_ext = buff;
1841 swq->psn_search = buff;
1842 }
1843
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1844 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1845 {
1846 struct bnxt_qplib_q *sq = &qp->sq;
1847
1848 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1849 }
1850
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1851 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1852 struct bnxt_qplib_swqe *wqe)
1853 {
1854 struct bnxt_qplib_nq_work *nq_work = NULL;
1855 int i, rc = 0, data_len = 0, pkt_num = 0;
1856 struct bnxt_qplib_q *sq = &qp->sq;
1857 struct bnxt_qplib_hwq *hwq;
1858 struct bnxt_qplib_swq *swq;
1859 bool sch_handler = false;
1860 u32 wqe_idx, slots, idx;
1861 u16 wqe_sz, qdf = 0;
1862 bool msn_update;
1863 void *base_hdr;
1864 void *ext_hdr;
1865 __le32 temp32;
1866
1867 hwq = &sq->hwq;
1868 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1869 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1870 dev_err(&hwq->pdev->dev,
1871 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1872 qp->id, qp->state);
1873 rc = -EINVAL;
1874 goto done;
1875 }
1876
1877 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1878 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1879 dev_err(&hwq->pdev->dev,
1880 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1881 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1882 rc = -ENOMEM;
1883 goto done;
1884 }
1885
1886 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1887 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1888
1889 idx = 0;
1890 swq->slot_idx = hwq->prod;
1891 swq->slots = slots;
1892 swq->wr_id = wqe->wr_id;
1893 swq->type = wqe->type;
1894 swq->flags = wqe->flags;
1895 swq->start_psn = sq->psn & BTH_PSN_MASK;
1896 if (qp->sig_type)
1897 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1898
1899 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1900 sch_handler = true;
1901 dev_dbg(&hwq->pdev->dev,
1902 "%s Error QP. Scheduling for poll_cq\n", __func__);
1903 goto queue_err;
1904 }
1905
1906 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1907 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1908 memset(base_hdr, 0, sizeof(struct sq_sge));
1909 memset(ext_hdr, 0, sizeof(struct sq_sge));
1910
1911 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1912 /* Copy the inline data */
1913 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1914 else
1915 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1916 &idx);
1917 if (data_len > BNXT_RE_MAX_MSG_SIZE) {
1918 rc = -EINVAL;
1919 goto done;
1920 }
1921 /* Make sure we update MSN table only for wired wqes */
1922 msn_update = true;
1923 /* Specifics */
1924 switch (wqe->type) {
1925 case BNXT_QPLIB_SWQE_TYPE_SEND:
1926 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1927 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1928 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1929 /* Assemble info for Raw Ethertype QPs */
1930
1931 sqe->wqe_type = wqe->type;
1932 sqe->flags = wqe->flags;
1933 sqe->wqe_size = wqe_sz;
1934 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1935 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1936 sqe->length = cpu_to_le32(data_len);
1937 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1938 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1939 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1940
1941 break;
1942 }
1943 fallthrough;
1944 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1945 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1946 {
1947 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1948 struct sq_send_hdr *sqe = base_hdr;
1949
1950 sqe->wqe_type = wqe->type;
1951 sqe->flags = wqe->flags;
1952 sqe->wqe_size = wqe_sz;
1953 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1954 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1955 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1956 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1957 sqe->length = cpu_to_le32(data_len);
1958 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1959 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1960 SQ_SEND_DST_QP_MASK);
1961 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1962 SQ_SEND_AVID_MASK);
1963 msn_update = false;
1964 } else {
1965 sqe->length = cpu_to_le32(data_len);
1966 if (qp->mtu)
1967 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1968 if (!pkt_num)
1969 pkt_num = 1;
1970 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1971 }
1972 break;
1973 }
1974 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1975 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1976 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1977 {
1978 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1979 struct sq_rdma_hdr *sqe = base_hdr;
1980
1981 sqe->wqe_type = wqe->type;
1982 sqe->flags = wqe->flags;
1983 sqe->wqe_size = wqe_sz;
1984 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1985 sqe->length = cpu_to_le32((u32)data_len);
1986 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1987 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1988 if (qp->mtu)
1989 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1990 if (!pkt_num)
1991 pkt_num = 1;
1992 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1993 break;
1994 }
1995 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1996 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1997 {
1998 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1999 struct sq_atomic_hdr *sqe = base_hdr;
2000
2001 sqe->wqe_type = wqe->type;
2002 sqe->flags = wqe->flags;
2003 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
2004 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
2005 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
2006 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
2007 if (qp->mtu)
2008 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2009 if (!pkt_num)
2010 pkt_num = 1;
2011 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2012 break;
2013 }
2014 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2015 {
2016 struct sq_localinvalidate *sqe = base_hdr;
2017
2018 sqe->wqe_type = wqe->type;
2019 sqe->flags = wqe->flags;
2020 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
2021 msn_update = false;
2022 break;
2023 }
2024 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
2025 {
2026 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
2027 struct sq_fr_pmr_hdr *sqe = base_hdr;
2028
2029 sqe->wqe_type = wqe->type;
2030 sqe->flags = wqe->flags;
2031 sqe->access_cntl = wqe->frmr.access_cntl |
2032 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2033 sqe->zero_based_page_size_log =
2034 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
2035 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
2036 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
2037 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
2038 temp32 = cpu_to_le32(wqe->frmr.length);
2039 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
2040 sqe->numlevels_pbl_page_size_log =
2041 ((wqe->frmr.pbl_pg_sz_log <<
2042 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
2043 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
2044 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
2045 SQ_FR_PMR_NUMLEVELS_MASK);
2046
2047 for (i = 0; i < wqe->frmr.page_list_len; i++)
2048 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
2049 wqe->frmr.page_list[i] |
2050 PTU_PTE_VALID);
2051 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
2052 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
2053 msn_update = false;
2054
2055 break;
2056 }
2057 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2058 {
2059 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2060 struct sq_bind_hdr *sqe = base_hdr;
2061
2062 sqe->wqe_type = wqe->type;
2063 sqe->flags = wqe->flags;
2064 sqe->access_cntl = wqe->bind.access_cntl;
2065 sqe->mw_type_zero_based = wqe->bind.mw_type |
2066 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2067 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2068 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2069 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2070 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2071 msn_update = false;
2072 break;
2073 }
2074 default:
2075 /* Bad wqe, return error */
2076 rc = -EINVAL;
2077 goto done;
2078 }
2079 if (!qp->is_host_msn_tbl || msn_update) {
2080 swq->next_psn = sq->psn & BTH_PSN_MASK;
2081 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2082 }
2083 queue_err:
2084 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2085 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2086 qp->wqe_cnt++;
2087 done:
2088 if (sch_handler) {
2089 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2090 if (nq_work) {
2091 nq_work->cq = qp->scq;
2092 nq_work->nq = qp->scq->nq;
2093 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2094 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2095 } else {
2096 dev_err(&hwq->pdev->dev,
2097 "FP: Failed to allocate SQ nq_work!\n");
2098 rc = -ENOMEM;
2099 }
2100 }
2101 return rc;
2102 }
2103
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)2104 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2105 {
2106 struct bnxt_qplib_q *rq = &qp->rq;
2107
2108 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2109 }
2110
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)2111 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2112 struct bnxt_qplib_swqe *wqe)
2113 {
2114 struct bnxt_qplib_nq_work *nq_work = NULL;
2115 struct bnxt_qplib_q *rq = &qp->rq;
2116 struct rq_wqe_hdr *base_hdr;
2117 struct rq_ext_hdr *ext_hdr;
2118 struct bnxt_qplib_hwq *hwq;
2119 struct bnxt_qplib_swq *swq;
2120 bool sch_handler = false;
2121 u32 wqe_idx, idx;
2122 u16 wqe_sz;
2123 int rc = 0;
2124
2125 hwq = &rq->hwq;
2126 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2127 dev_err(&hwq->pdev->dev,
2128 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2129 qp->id, qp->state);
2130 rc = -EINVAL;
2131 goto done;
2132 }
2133
2134 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2135 dev_err(&hwq->pdev->dev,
2136 "FP: QP (0x%x) RQ is full!\n", qp->id);
2137 rc = -EINVAL;
2138 goto done;
2139 }
2140
2141 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2142 swq->wr_id = wqe->wr_id;
2143 swq->slots = rq->dbinfo.max_slot;
2144
2145 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2146 sch_handler = true;
2147 dev_dbg(&hwq->pdev->dev,
2148 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2149 goto queue_err;
2150 }
2151
2152 idx = 0;
2153 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2154 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2155 memset(base_hdr, 0, sizeof(struct sq_sge));
2156 memset(ext_hdr, 0, sizeof(struct sq_sge));
2157 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2158 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2159 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2160 if (!wqe->num_sge) {
2161 struct sq_sge *sge;
2162
2163 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2164 sge->size = 0;
2165 wqe_sz++;
2166 }
2167 base_hdr->wqe_type = wqe->type;
2168 base_hdr->flags = wqe->flags;
2169 base_hdr->wqe_size = wqe_sz;
2170 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2171 queue_err:
2172 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2173 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2174 done:
2175 if (sch_handler) {
2176 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2177 if (nq_work) {
2178 nq_work->cq = qp->rcq;
2179 nq_work->nq = qp->rcq->nq;
2180 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2181 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2182 } else {
2183 dev_err(&hwq->pdev->dev,
2184 "FP: Failed to allocate RQ nq_work!\n");
2185 rc = -ENOMEM;
2186 }
2187 }
2188
2189 return rc;
2190 }
2191
2192 /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2193 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2194 {
2195 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2196 struct bnxt_qplib_hwq_attr hwq_attr = {};
2197 struct creq_create_cq_resp resp = {};
2198 struct bnxt_qplib_cmdqmsg msg = {};
2199 struct cmdq_create_cq req = {};
2200 struct bnxt_qplib_pbl *pbl;
2201 u32 coalescing = 0;
2202 u32 pg_sz_lvl;
2203 int rc;
2204
2205 if (!cq->dpi) {
2206 dev_err(&rcfw->pdev->dev,
2207 "FP: CREATE_CQ failed due to NULL DPI\n");
2208 return -EINVAL;
2209 }
2210
2211 cq->dbinfo.flags = 0;
2212 hwq_attr.res = res;
2213 hwq_attr.depth = cq->max_wqe;
2214 hwq_attr.stride = sizeof(struct cq_base);
2215 hwq_attr.type = HWQ_TYPE_QUEUE;
2216 hwq_attr.sginfo = &cq->sg_info;
2217 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2218 if (rc)
2219 return rc;
2220
2221 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2222 CMDQ_BASE_OPCODE_CREATE_CQ,
2223 sizeof(req));
2224
2225 req.dpi = cpu_to_le32(cq->dpi->dpi);
2226 req.cq_handle = cpu_to_le64(cq->cq_handle);
2227 req.cq_size = cpu_to_le32(cq->max_wqe);
2228
2229 if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2) &&
2230 cq->coalescing->enable) {
2231 req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
2232 coalescing |= ((cq->coalescing->buf_maxtime <<
2233 CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
2234 CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
2235 coalescing |= ((cq->coalescing->normal_maxbuf <<
2236 CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
2237 CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
2238 coalescing |= ((cq->coalescing->during_maxbuf <<
2239 CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
2240 CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
2241 if (cq->coalescing->en_ring_idle_mode)
2242 coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2243 else
2244 coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2245 req.coalescing = cpu_to_le32(coalescing);
2246 }
2247
2248 pbl = &cq->hwq.pbl[PBL_LVL_0];
2249 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2250 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2251 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2252 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2253 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2254 req.cq_fco_cnq_id = cpu_to_le32(
2255 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2256 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2257 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2258 sizeof(resp), 0);
2259 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2260 if (rc)
2261 goto fail;
2262
2263 cq->id = le32_to_cpu(resp.xid);
2264 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2265 init_waitqueue_head(&cq->waitq);
2266 INIT_LIST_HEAD(&cq->sqf_head);
2267 INIT_LIST_HEAD(&cq->rqf_head);
2268 spin_lock_init(&cq->compl_lock);
2269 spin_lock_init(&cq->flush_lock);
2270
2271 cq->dbinfo.hwq = &cq->hwq;
2272 cq->dbinfo.xid = cq->id;
2273 cq->dbinfo.db = cq->dpi->dbr;
2274 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2275 cq->dbinfo.flags = 0;
2276 cq->dbinfo.toggle = 0;
2277
2278 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2279
2280 return 0;
2281
2282 fail:
2283 bnxt_qplib_free_hwq(res, &cq->hwq);
2284 return rc;
2285 }
2286
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2287 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2288 struct bnxt_qplib_cq *cq)
2289 {
2290 bnxt_qplib_free_hwq(res, &cq->hwq);
2291 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2292 /* Reset only the cons bit in the flags */
2293 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2294 }
2295
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2296 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2297 int new_cqes)
2298 {
2299 struct bnxt_qplib_hwq_attr hwq_attr = {};
2300 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2301 struct creq_resize_cq_resp resp = {};
2302 struct bnxt_qplib_cmdqmsg msg = {};
2303 struct cmdq_resize_cq req = {};
2304 struct bnxt_qplib_pbl *pbl;
2305 u32 pg_sz, lvl, new_sz;
2306 int rc;
2307
2308 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2309 CMDQ_BASE_OPCODE_RESIZE_CQ,
2310 sizeof(req));
2311 hwq_attr.sginfo = &cq->sg_info;
2312 hwq_attr.res = res;
2313 hwq_attr.depth = new_cqes;
2314 hwq_attr.stride = sizeof(struct cq_base);
2315 hwq_attr.type = HWQ_TYPE_QUEUE;
2316 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2317 if (rc)
2318 return rc;
2319
2320 req.cq_cid = cpu_to_le32(cq->id);
2321 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2322 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2323 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2324 CMDQ_RESIZE_CQ_LVL_MASK;
2325 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2326 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2327 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2328 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2329
2330 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2331 sizeof(resp), 0);
2332 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2333 return rc;
2334 }
2335
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2336 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2337 {
2338 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2339 struct creq_destroy_cq_resp resp = {};
2340 struct bnxt_qplib_cmdqmsg msg = {};
2341 struct cmdq_destroy_cq req = {};
2342 u16 total_cnq_events;
2343 int rc;
2344
2345 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2346 CMDQ_BASE_OPCODE_DESTROY_CQ,
2347 sizeof(req));
2348
2349 req.cq_cid = cpu_to_le32(cq->id);
2350 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2351 sizeof(resp), 0);
2352 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2353 if (rc)
2354 return rc;
2355 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2356 __wait_for_all_nqes(cq, total_cnq_events);
2357 bnxt_qplib_free_hwq(res, &cq->hwq);
2358 return 0;
2359 }
2360
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2361 static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2362 struct bnxt_qplib_cqe **pcqe, int *budget)
2363 {
2364 struct bnxt_qplib_cqe *cqe;
2365 u32 start, last;
2366 int rc = 0;
2367
2368 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2369 start = sq->swq_start;
2370 cqe = *pcqe;
2371 while (*budget) {
2372 last = sq->swq_last;
2373 if (start == last)
2374 break;
2375 /* Skip the FENCE WQE completions */
2376 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2377 bnxt_qplib_cancel_phantom_processing(qp);
2378 goto skip_compl;
2379 }
2380 memset(cqe, 0, sizeof(*cqe));
2381 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2382 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2383 cqe->qp_handle = (u64)(unsigned long)qp;
2384 cqe->wr_id = sq->swq[last].wr_id;
2385 cqe->src_qp = qp->id;
2386 cqe->type = sq->swq[last].type;
2387 cqe++;
2388 (*budget)--;
2389 skip_compl:
2390 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2391 sq->swq[last].slots, &sq->dbinfo.flags);
2392 sq->swq_last = sq->swq[last].next_idx;
2393 }
2394 *pcqe = cqe;
2395 if (!(*budget) && sq->swq_last != start)
2396 /* Out of budget */
2397 rc = -EAGAIN;
2398
2399 return rc;
2400 }
2401
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2402 static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2403 struct bnxt_qplib_cqe **pcqe, int *budget)
2404 {
2405 struct bnxt_qplib_cqe *cqe;
2406 u32 start, last;
2407 int opcode = 0;
2408 int rc = 0;
2409
2410 switch (qp->type) {
2411 case CMDQ_CREATE_QP1_TYPE_GSI:
2412 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2413 break;
2414 case CMDQ_CREATE_QP_TYPE_RC:
2415 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2416 break;
2417 case CMDQ_CREATE_QP_TYPE_UD:
2418 case CMDQ_CREATE_QP_TYPE_GSI:
2419 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2420 break;
2421 }
2422
2423 /* Flush the rest of the RQ */
2424 start = rq->swq_start;
2425 cqe = *pcqe;
2426 while (*budget) {
2427 last = rq->swq_last;
2428 if (last == start)
2429 break;
2430 memset(cqe, 0, sizeof(*cqe));
2431 cqe->status =
2432 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2433 cqe->opcode = opcode;
2434 cqe->qp_handle = (unsigned long)qp;
2435 cqe->wr_id = rq->swq[last].wr_id;
2436 cqe++;
2437 (*budget)--;
2438 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2439 rq->swq[last].slots, &rq->dbinfo.flags);
2440 rq->swq_last = rq->swq[last].next_idx;
2441 }
2442 *pcqe = cqe;
2443 if (!*budget && rq->swq_last != start)
2444 /* Out of budget */
2445 rc = -EAGAIN;
2446
2447 return rc;
2448 }
2449
bnxt_qplib_mark_qp_error(void * qp_handle)2450 void bnxt_qplib_mark_qp_error(void *qp_handle)
2451 {
2452 struct bnxt_qplib_qp *qp = qp_handle;
2453
2454 if (!qp)
2455 return;
2456
2457 /* Must block new posting of SQ and RQ */
2458 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2459 bnxt_qplib_cancel_phantom_processing(qp);
2460 }
2461
2462 /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2463 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2464 */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2465 static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2466 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2467 {
2468 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2469 struct bnxt_qplib_q *sq = &qp->sq;
2470 struct cq_req *peek_req_hwcqe;
2471 struct bnxt_qplib_qp *peek_qp;
2472 struct bnxt_qplib_q *peek_sq;
2473 struct bnxt_qplib_swq *swq;
2474 struct cq_base *peek_hwcqe;
2475 int i, rc = 0;
2476
2477 /* Normal mode */
2478 /* Check for the psn_search marking before completing */
2479 swq = &sq->swq[swq_last];
2480 if (swq->psn_search &&
2481 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2482 /* Unmark */
2483 swq->psn_search->flags_next_psn = cpu_to_le32
2484 (le32_to_cpu(swq->psn_search->flags_next_psn)
2485 & ~0x80000000);
2486 dev_dbg(&cq->hwq.pdev->dev,
2487 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2488 cq_cons, qp->id, swq_last, cqe_sq_cons);
2489 sq->condition = true;
2490 sq->send_phantom = true;
2491
2492 /* TODO: Only ARM if the previous SQE is ARMALL */
2493 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2494 rc = -EAGAIN;
2495 goto out;
2496 }
2497 if (sq->condition) {
2498 /* Peek at the completions */
2499 peek_flags = cq->dbinfo.flags;
2500 peek_sw_cq_cons = cq_cons;
2501 i = cq->hwq.max_elements;
2502 while (i--) {
2503 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2504 peek_sw_cq_cons, NULL);
2505 /* If the next hwcqe is VALID */
2506 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2507 /*
2508 * The valid test of the entry must be done first before
2509 * reading any further.
2510 */
2511 dma_rmb();
2512 /* If the next hwcqe is a REQ */
2513 if ((peek_hwcqe->cqe_type_toggle &
2514 CQ_BASE_CQE_TYPE_MASK) ==
2515 CQ_BASE_CQE_TYPE_REQ) {
2516 peek_req_hwcqe = (struct cq_req *)
2517 peek_hwcqe;
2518 peek_qp = (struct bnxt_qplib_qp *)
2519 ((unsigned long)
2520 le64_to_cpu
2521 (peek_req_hwcqe->qp_handle));
2522 peek_sq = &peek_qp->sq;
2523 peek_sq_cons_idx =
2524 ((le16_to_cpu(
2525 peek_req_hwcqe->sq_cons_idx)
2526 - 1) % sq->max_wqe);
2527 /* If the hwcqe's sq's wr_id matches */
2528 if (peek_sq == sq &&
2529 sq->swq[peek_sq_cons_idx].wr_id ==
2530 BNXT_QPLIB_FENCE_WRID) {
2531 /*
2532 * Unbreak only if the phantom
2533 * comes back
2534 */
2535 dev_dbg(&cq->hwq.pdev->dev,
2536 "FP: Got Phantom CQE\n");
2537 sq->condition = false;
2538 sq->single = true;
2539 rc = 0;
2540 goto out;
2541 }
2542 }
2543 /* Valid but not the phantom, so keep looping */
2544 } else {
2545 /* Not valid yet, just exit and wait */
2546 rc = -EINVAL;
2547 goto out;
2548 }
2549 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2550 &peek_sw_cq_cons,
2551 1, &peek_flags);
2552 }
2553 dev_err(&cq->hwq.pdev->dev,
2554 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2555 cq_cons, qp->id, swq_last, cqe_sq_cons);
2556 rc = -EINVAL;
2557 }
2558 out:
2559 return rc;
2560 }
2561
bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q * sq,u32 cqe_slot)2562 static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2563 {
2564 struct bnxt_qplib_hwq *sq_hwq;
2565 struct bnxt_qplib_swq *swq;
2566 int cqe_sq_cons = -1;
2567 u32 start, last;
2568
2569 sq_hwq = &sq->hwq;
2570
2571 start = sq->swq_start;
2572 last = sq->swq_last;
2573
2574 while (last != start) {
2575 swq = &sq->swq[last];
2576 if (swq->slot_idx == cqe_slot) {
2577 cqe_sq_cons = swq->next_idx;
2578 dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2579 __func__, cqe_sq_cons, cqe_slot);
2580 break;
2581 }
2582
2583 last = swq->next_idx;
2584 }
2585 return cqe_sq_cons;
2586 }
2587
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2588 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2589 struct cq_req *hwcqe,
2590 struct bnxt_qplib_cqe **pcqe, int *budget,
2591 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2592 {
2593 struct bnxt_qplib_swq *swq;
2594 struct bnxt_qplib_cqe *cqe;
2595 u32 cqe_sq_cons, slot_num;
2596 struct bnxt_qplib_qp *qp;
2597 struct bnxt_qplib_q *sq;
2598 int cqe_cons;
2599 int rc = 0;
2600
2601 qp = (struct bnxt_qplib_qp *)((unsigned long)
2602 le64_to_cpu(hwcqe->qp_handle));
2603 if (!qp) {
2604 dev_err(&cq->hwq.pdev->dev,
2605 "FP: Process Req qp is NULL\n");
2606 return -EINVAL;
2607 }
2608 sq = &qp->sq;
2609
2610 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2611 if (qp->sq.flushed) {
2612 dev_dbg(&cq->hwq.pdev->dev,
2613 "%s: QP in Flush QP = %p\n", __func__, qp);
2614 goto done;
2615 }
2616
2617 if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2618 slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2619 cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2620 if (cqe_cons < 0) {
2621 dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2622 __func__, slot_num);
2623 goto done;
2624 }
2625 cqe_sq_cons = cqe_cons;
2626 dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2627 __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2628 }
2629
2630 /* Require to walk the sq's swq to fabricate CQEs for all previously
2631 * signaled SWQEs due to CQE aggregation from the current sq cons
2632 * to the cqe_sq_cons
2633 */
2634 cqe = *pcqe;
2635 while (*budget) {
2636 if (sq->swq_last == cqe_sq_cons)
2637 /* Done */
2638 break;
2639
2640 swq = &sq->swq[sq->swq_last];
2641 memset(cqe, 0, sizeof(*cqe));
2642 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2643 cqe->qp_handle = (u64)(unsigned long)qp;
2644 cqe->src_qp = qp->id;
2645 cqe->wr_id = swq->wr_id;
2646 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2647 goto skip;
2648 cqe->type = swq->type;
2649
2650 /* For the last CQE, check for status. For errors, regardless
2651 * of the request being signaled or not, it must complete with
2652 * the hwcqe error status
2653 */
2654 if (swq->next_idx == cqe_sq_cons &&
2655 hwcqe->status != CQ_REQ_STATUS_OK) {
2656 cqe->status = hwcqe->status;
2657 dev_err(&cq->hwq.pdev->dev,
2658 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2659 sq->swq_last, cqe->wr_id, cqe->status);
2660 cqe++;
2661 (*budget)--;
2662 bnxt_qplib_mark_qp_error(qp);
2663 /* Add qp to flush list of the CQ */
2664 bnxt_qplib_add_flush_qp(qp);
2665 } else {
2666 /* Before we complete, do WA 9060 */
2667 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2668 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2669 cqe_sq_cons)) {
2670 *lib_qp = qp;
2671 goto out;
2672 }
2673 }
2674 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2675 cqe->status = CQ_REQ_STATUS_OK;
2676 cqe++;
2677 (*budget)--;
2678 }
2679 }
2680 skip:
2681 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2682 swq->slots, &sq->dbinfo.flags);
2683 sq->swq_last = swq->next_idx;
2684 if (sq->single)
2685 break;
2686 }
2687 out:
2688 *pcqe = cqe;
2689 if (sq->swq_last != cqe_sq_cons) {
2690 /* Out of budget */
2691 rc = -EAGAIN;
2692 goto done;
2693 }
2694 /*
2695 * Back to normal completion mode only after it has completed all of
2696 * the WC for this CQE
2697 */
2698 sq->single = false;
2699 done:
2700 return rc;
2701 }
2702
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2703 static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2704 {
2705 spin_lock(&srq->hwq.lock);
2706 srq->swq[srq->last_idx].next_idx = (int)tag;
2707 srq->last_idx = (int)tag;
2708 srq->swq[srq->last_idx].next_idx = -1;
2709 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2710 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2711 spin_unlock(&srq->hwq.lock);
2712 }
2713
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2714 static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2715 struct cq_res_rc *hwcqe,
2716 struct bnxt_qplib_cqe **pcqe,
2717 int *budget)
2718 {
2719 struct bnxt_qplib_srq *srq;
2720 struct bnxt_qplib_cqe *cqe;
2721 struct bnxt_qplib_qp *qp;
2722 struct bnxt_qplib_q *rq;
2723 u32 wr_id_idx;
2724
2725 qp = (struct bnxt_qplib_qp *)((unsigned long)
2726 le64_to_cpu(hwcqe->qp_handle));
2727 if (!qp) {
2728 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2729 return -EINVAL;
2730 }
2731 if (qp->rq.flushed) {
2732 dev_dbg(&cq->hwq.pdev->dev,
2733 "%s: QP in Flush QP = %p\n", __func__, qp);
2734 return 0;
2735 }
2736
2737 cqe = *pcqe;
2738 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2739 cqe->length = le32_to_cpu(hwcqe->length);
2740 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2741 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2742 cqe->flags = le16_to_cpu(hwcqe->flags);
2743 cqe->status = hwcqe->status;
2744 cqe->qp_handle = (u64)(unsigned long)qp;
2745
2746 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2747 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2748 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2749 srq = qp->srq;
2750 if (!srq)
2751 return -EINVAL;
2752 if (wr_id_idx >= srq->hwq.max_elements) {
2753 dev_err(&cq->hwq.pdev->dev,
2754 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2755 wr_id_idx, srq->hwq.max_elements);
2756 return -EINVAL;
2757 }
2758 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2759 bnxt_qplib_release_srqe(srq, wr_id_idx);
2760 cqe++;
2761 (*budget)--;
2762 *pcqe = cqe;
2763 } else {
2764 struct bnxt_qplib_swq *swq;
2765
2766 rq = &qp->rq;
2767 if (wr_id_idx > (rq->max_wqe - 1)) {
2768 dev_err(&cq->hwq.pdev->dev,
2769 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2770 wr_id_idx, rq->max_wqe);
2771 return -EINVAL;
2772 }
2773 if (wr_id_idx != rq->swq_last)
2774 return -EINVAL;
2775 swq = &rq->swq[rq->swq_last];
2776 cqe->wr_id = swq->wr_id;
2777 cqe++;
2778 (*budget)--;
2779 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2780 swq->slots, &rq->dbinfo.flags);
2781 rq->swq_last = swq->next_idx;
2782 *pcqe = cqe;
2783
2784 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2785 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2786 /* Add qp to flush list of the CQ */
2787 bnxt_qplib_add_flush_qp(qp);
2788 }
2789 }
2790
2791 return 0;
2792 }
2793
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2794 static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2795 struct cq_res_ud *hwcqe,
2796 struct bnxt_qplib_cqe **pcqe,
2797 int *budget)
2798 {
2799 struct bnxt_qplib_srq *srq;
2800 struct bnxt_qplib_cqe *cqe;
2801 struct bnxt_qplib_qp *qp;
2802 struct bnxt_qplib_q *rq;
2803 u32 wr_id_idx;
2804
2805 qp = (struct bnxt_qplib_qp *)((unsigned long)
2806 le64_to_cpu(hwcqe->qp_handle));
2807 if (!qp) {
2808 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2809 return -EINVAL;
2810 }
2811 if (qp->rq.flushed) {
2812 dev_dbg(&cq->hwq.pdev->dev,
2813 "%s: QP in Flush QP = %p\n", __func__, qp);
2814 return 0;
2815 }
2816 cqe = *pcqe;
2817 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2818 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2819 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2820 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2821 cqe->flags = le16_to_cpu(hwcqe->flags);
2822 cqe->status = hwcqe->status;
2823 cqe->qp_handle = (u64)(unsigned long)qp;
2824 /*FIXME: Endianness fix needed for smace */
2825 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2826 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2827 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2828 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2829 ((le32_to_cpu(
2830 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2831 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2832
2833 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2834 srq = qp->srq;
2835 if (!srq)
2836 return -EINVAL;
2837
2838 if (wr_id_idx >= srq->hwq.max_elements) {
2839 dev_err(&cq->hwq.pdev->dev,
2840 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2841 wr_id_idx, srq->hwq.max_elements);
2842 return -EINVAL;
2843 }
2844 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2845 bnxt_qplib_release_srqe(srq, wr_id_idx);
2846 cqe++;
2847 (*budget)--;
2848 *pcqe = cqe;
2849 } else {
2850 struct bnxt_qplib_swq *swq;
2851
2852 rq = &qp->rq;
2853 if (wr_id_idx > (rq->max_wqe - 1)) {
2854 dev_err(&cq->hwq.pdev->dev,
2855 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2856 wr_id_idx, rq->max_wqe);
2857 return -EINVAL;
2858 }
2859
2860 if (rq->swq_last != wr_id_idx)
2861 return -EINVAL;
2862 swq = &rq->swq[rq->swq_last];
2863 cqe->wr_id = swq->wr_id;
2864 cqe++;
2865 (*budget)--;
2866 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2867 swq->slots, &rq->dbinfo.flags);
2868 rq->swq_last = swq->next_idx;
2869 *pcqe = cqe;
2870
2871 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2872 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2873 /* Add qp to flush list of the CQ */
2874 bnxt_qplib_add_flush_qp(qp);
2875 }
2876 }
2877
2878 return 0;
2879 }
2880
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2881 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2882 {
2883 struct cq_base *hw_cqe;
2884 bool rc = true;
2885
2886 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2887 /* Check for Valid bit. If the CQE is valid, return false */
2888 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2889 return rc;
2890 }
2891
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2892 static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2893 struct cq_res_raweth_qp1 *hwcqe,
2894 struct bnxt_qplib_cqe **pcqe,
2895 int *budget)
2896 {
2897 struct bnxt_qplib_qp *qp;
2898 struct bnxt_qplib_q *rq;
2899 struct bnxt_qplib_srq *srq;
2900 struct bnxt_qplib_cqe *cqe;
2901 u32 wr_id_idx;
2902
2903 qp = (struct bnxt_qplib_qp *)((unsigned long)
2904 le64_to_cpu(hwcqe->qp_handle));
2905 if (!qp) {
2906 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2907 return -EINVAL;
2908 }
2909 if (qp->rq.flushed) {
2910 dev_dbg(&cq->hwq.pdev->dev,
2911 "%s: QP in Flush QP = %p\n", __func__, qp);
2912 return 0;
2913 }
2914 cqe = *pcqe;
2915 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2916 cqe->flags = le16_to_cpu(hwcqe->flags);
2917 cqe->qp_handle = (u64)(unsigned long)qp;
2918
2919 wr_id_idx =
2920 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2921 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2922 cqe->src_qp = qp->id;
2923 if (qp->id == 1 && !cqe->length) {
2924 /* Add workaround for the length misdetection */
2925 cqe->length = 296;
2926 } else {
2927 cqe->length = le16_to_cpu(hwcqe->length);
2928 }
2929 cqe->pkey_index = qp->pkey_index;
2930 memcpy(cqe->smac, qp->smac, 6);
2931
2932 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2933 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2934 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2935
2936 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2937 srq = qp->srq;
2938 if (!srq) {
2939 dev_err(&cq->hwq.pdev->dev,
2940 "FP: SRQ used but not defined??\n");
2941 return -EINVAL;
2942 }
2943 if (wr_id_idx >= srq->hwq.max_elements) {
2944 dev_err(&cq->hwq.pdev->dev,
2945 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2946 wr_id_idx, srq->hwq.max_elements);
2947 return -EINVAL;
2948 }
2949 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2950 bnxt_qplib_release_srqe(srq, wr_id_idx);
2951 cqe++;
2952 (*budget)--;
2953 *pcqe = cqe;
2954 } else {
2955 struct bnxt_qplib_swq *swq;
2956
2957 rq = &qp->rq;
2958 if (wr_id_idx > (rq->max_wqe - 1)) {
2959 dev_err(&cq->hwq.pdev->dev,
2960 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2961 wr_id_idx, rq->max_wqe);
2962 return -EINVAL;
2963 }
2964 if (rq->swq_last != wr_id_idx)
2965 return -EINVAL;
2966 swq = &rq->swq[rq->swq_last];
2967 cqe->wr_id = swq->wr_id;
2968 cqe++;
2969 (*budget)--;
2970 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2971 swq->slots, &rq->dbinfo.flags);
2972 rq->swq_last = swq->next_idx;
2973 *pcqe = cqe;
2974
2975 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2976 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2977 /* Add qp to flush list of the CQ */
2978 bnxt_qplib_add_flush_qp(qp);
2979 }
2980 }
2981
2982 return 0;
2983 }
2984
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2985 static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2986 struct cq_terminal *hwcqe,
2987 struct bnxt_qplib_cqe **pcqe,
2988 int *budget)
2989 {
2990 struct bnxt_qplib_qp *qp;
2991 struct bnxt_qplib_q *sq, *rq;
2992 struct bnxt_qplib_cqe *cqe;
2993 u32 swq_last = 0, cqe_cons;
2994 int rc = 0;
2995
2996 /* Check the Status */
2997 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2998 dev_warn(&cq->hwq.pdev->dev,
2999 "FP: CQ Process Terminal Error status = 0x%x\n",
3000 hwcqe->status);
3001
3002 qp = (struct bnxt_qplib_qp *)((unsigned long)
3003 le64_to_cpu(hwcqe->qp_handle));
3004 if (!qp)
3005 return -EINVAL;
3006
3007 /* Must block new posting of SQ and RQ */
3008 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
3009
3010 sq = &qp->sq;
3011 rq = &qp->rq;
3012
3013 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
3014 if (cqe_cons == 0xFFFF)
3015 goto do_rq;
3016 cqe_cons %= sq->max_sw_wqe;
3017
3018 if (qp->sq.flushed) {
3019 dev_dbg(&cq->hwq.pdev->dev,
3020 "%s: QP in Flush QP = %p\n", __func__, qp);
3021 goto sq_done;
3022 }
3023
3024 /* Terminal CQE can also include aggregated successful CQEs prior.
3025 * So we must complete all CQEs from the current sq's cons to the
3026 * cq_cons with status OK
3027 */
3028 cqe = *pcqe;
3029 while (*budget) {
3030 swq_last = sq->swq_last;
3031 if (swq_last == cqe_cons)
3032 break;
3033 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
3034 memset(cqe, 0, sizeof(*cqe));
3035 cqe->status = CQ_REQ_STATUS_OK;
3036 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
3037 cqe->qp_handle = (u64)(unsigned long)qp;
3038 cqe->src_qp = qp->id;
3039 cqe->wr_id = sq->swq[swq_last].wr_id;
3040 cqe->type = sq->swq[swq_last].type;
3041 cqe++;
3042 (*budget)--;
3043 }
3044 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
3045 sq->swq[swq_last].slots, &sq->dbinfo.flags);
3046 sq->swq_last = sq->swq[swq_last].next_idx;
3047 }
3048 *pcqe = cqe;
3049 if (!(*budget) && swq_last != cqe_cons) {
3050 /* Out of budget */
3051 rc = -EAGAIN;
3052 goto sq_done;
3053 }
3054 sq_done:
3055 if (rc)
3056 return rc;
3057 do_rq:
3058 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
3059 if (cqe_cons == 0xFFFF) {
3060 goto done;
3061 } else if (cqe_cons > rq->max_wqe - 1) {
3062 dev_err(&cq->hwq.pdev->dev,
3063 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
3064 cqe_cons, rq->max_wqe);
3065 rc = -EINVAL;
3066 goto done;
3067 }
3068
3069 if (qp->rq.flushed) {
3070 dev_dbg(&cq->hwq.pdev->dev,
3071 "%s: QP in Flush QP = %p\n", __func__, qp);
3072 rc = 0;
3073 goto done;
3074 }
3075
3076 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3077 * from the current rq->cons to the rq->prod regardless what the
3078 * rq->cons the terminal CQE indicates
3079 */
3080
3081 /* Add qp to flush list of the CQ */
3082 bnxt_qplib_add_flush_qp(qp);
3083 done:
3084 return rc;
3085 }
3086
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)3087 static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3088 struct cq_cutoff *hwcqe)
3089 {
3090 /* Check the Status */
3091 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3092 dev_err(&cq->hwq.pdev->dev,
3093 "FP: CQ Process Cutoff Error status = 0x%x\n",
3094 hwcqe->status);
3095 return -EINVAL;
3096 }
3097 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3098 wake_up_interruptible(&cq->waitq);
3099
3100 return 0;
3101 }
3102
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)3103 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3104 struct bnxt_qplib_cqe *cqe,
3105 int num_cqes)
3106 {
3107 struct bnxt_qplib_qp *qp = NULL;
3108 u32 budget = num_cqes;
3109 unsigned long flags;
3110
3111 spin_lock_irqsave(&cq->flush_lock, flags);
3112 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3113 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3114 __flush_sq(&qp->sq, qp, &cqe, &budget);
3115 }
3116
3117 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3118 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3119 __flush_rq(&qp->rq, qp, &cqe, &budget);
3120 }
3121 spin_unlock_irqrestore(&cq->flush_lock, flags);
3122
3123 return num_cqes - budget;
3124 }
3125
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)3126 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3127 int num_cqes, struct bnxt_qplib_qp **lib_qp)
3128 {
3129 struct cq_base *hw_cqe;
3130 int budget, rc = 0;
3131 u32 hw_polled = 0;
3132 u8 type;
3133
3134 budget = num_cqes;
3135
3136 while (budget) {
3137 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3138
3139 /* Check for Valid bit */
3140 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3141 break;
3142
3143 /*
3144 * The valid test of the entry must be done first before
3145 * reading any further.
3146 */
3147 dma_rmb();
3148 /* From the device's respective CQE format to qplib_wc*/
3149 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3150 switch (type) {
3151 case CQ_BASE_CQE_TYPE_REQ:
3152 rc = bnxt_qplib_cq_process_req(cq,
3153 (struct cq_req *)hw_cqe,
3154 &cqe, &budget,
3155 cq->hwq.cons, lib_qp);
3156 break;
3157 case CQ_BASE_CQE_TYPE_RES_RC:
3158 rc = bnxt_qplib_cq_process_res_rc(cq,
3159 (struct cq_res_rc *)
3160 hw_cqe, &cqe,
3161 &budget);
3162 break;
3163 case CQ_BASE_CQE_TYPE_RES_UD:
3164 rc = bnxt_qplib_cq_process_res_ud
3165 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3166 &budget);
3167 break;
3168 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3169 rc = bnxt_qplib_cq_process_res_raweth_qp1
3170 (cq, (struct cq_res_raweth_qp1 *)
3171 hw_cqe, &cqe, &budget);
3172 break;
3173 case CQ_BASE_CQE_TYPE_TERMINAL:
3174 rc = bnxt_qplib_cq_process_terminal
3175 (cq, (struct cq_terminal *)hw_cqe,
3176 &cqe, &budget);
3177 break;
3178 case CQ_BASE_CQE_TYPE_CUT_OFF:
3179 bnxt_qplib_cq_process_cutoff
3180 (cq, (struct cq_cutoff *)hw_cqe);
3181 /* Done processing this CQ */
3182 goto exit;
3183 default:
3184 dev_err(&cq->hwq.pdev->dev,
3185 "process_cq unknown type 0x%lx\n",
3186 hw_cqe->cqe_type_toggle &
3187 CQ_BASE_CQE_TYPE_MASK);
3188 rc = -EINVAL;
3189 break;
3190 }
3191 if (rc < 0) {
3192 if (rc == -EAGAIN)
3193 break;
3194 /* Error while processing the CQE, just skip to the
3195 * next one
3196 */
3197 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3198 dev_err(&cq->hwq.pdev->dev,
3199 "process_cqe error rc = 0x%x\n", rc);
3200 }
3201 hw_polled++;
3202 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3203 1, &cq->dbinfo.flags);
3204
3205 }
3206 if (hw_polled)
3207 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3208 exit:
3209 return num_cqes - budget;
3210 }
3211
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3212 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3213 {
3214 cq->dbinfo.toggle = cq->toggle;
3215 if (arm_type)
3216 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3217 /* Using cq->arm_state variable to track whether to issue cq handler */
3218 atomic_set(&cq->arm_state, 1);
3219 }
3220
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3221 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3222 {
3223 flush_workqueue(qp->scq->nq->cqn_wq);
3224 if (qp->scq != qp->rcq)
3225 flush_workqueue(qp->rcq->nq->cqn_wq);
3226 }
3227