1 /*
2 * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
3 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Description: Fast Path Operators (header)
29 */
30
31 #ifndef __BNXT_QPLIB_FP_H__
32 #define __BNXT_QPLIB_FP_H__
33
34 /* Temp header structures for SQ */
35 struct sq_ud_ext_hdr {
36 __le32 dst_qp;
37 __le32 avid;
38 __le64 rsvd;
39 };
40
41 struct sq_raw_ext_hdr {
42 __le32 cfa_meta;
43 __le32 rsvd0;
44 __le64 rsvd1;
45 };
46
47 struct sq_rdma_ext_hdr {
48 __le64 remote_va;
49 __le32 remote_key;
50 __le32 rsvd;
51 };
52
53 struct sq_atomic_ext_hdr {
54 __le64 swap_data;
55 __le64 cmp_data;
56 };
57
58 struct sq_fr_pmr_ext_hdr {
59 __le64 pblptr;
60 __le64 va;
61 };
62
63 struct sq_bind_ext_hdr {
64 __le64 va;
65 __le32 length_lo;
66 __le32 length_hi;
67 };
68
69 struct rq_ext_hdr {
70 __le64 rsvd1;
71 __le64 rsvd2;
72 };
73
74 #define BNXT_QPLIB_ETHTYPE_ROCEV1 0x8915
75
76 struct bnxt_qplib_srq {
77 struct bnxt_qplib_pd *pd;
78 struct bnxt_qplib_dpi *dpi;
79 struct bnxt_qplib_chip_ctx *cctx;
80 struct bnxt_qplib_cq *cq;
81 struct bnxt_qplib_swq *swq;
82 struct bnxt_qplib_hwq hwq;
83 struct bnxt_qplib_db_info dbinfo;
84 struct bnxt_qplib_sg_info sginfo;
85 u64 srq_handle;
86 u32 id;
87 u16 wqe_size;
88 u32 max_wqe;
89 u32 max_sge;
90 u32 threshold;
91 bool arm_req;
92 int start_idx;
93 int last_idx;
94 u16 eventq_hw_ring_id;
95 bool is_user;
96 spinlock_t lock;
97 };
98
99 struct bnxt_qplib_sge {
100 u64 addr;
101 u32 size;
102 u32 lkey;
103 };
104
105 /*
106 * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
107 * and ib_bth + ib_deth (20).
108 * Max required is 82 when RoCE V2 is enabled
109 */
110
111 /*
112 * RoCE V1 (38 bytes needed)
113 * +------------+----------+--------+--------+-------+
114 * |Eth-hdr(14B)| GRH (40B)|bth+deth| Mad | iCRC |
115 * | | supplied | 20B |payload | 4B |
116 * | | by user |supplied| 256B | |
117 * | | mad | |by user | |
118 * | | | | | |
119 * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
120 * +------------+----------+--------+--------+-------+
121 */
122
123 /*
124 * RoCE V2-IPv4 (46 Bytes needed)
125 * +------------+----------+--------+--------+-------+
126 * |Eth-hdr(14B)| IP-hdr |UDP-hdr | Mad | iCRC |
127 * | | supplied | 8B |payload | 4B |
128 * | | by user |bth+deth| 256B | |
129 * | | mad lower| 20B |supplied| |
130 * | | 20B out | (sge 3)|by user | |
131 * | | of 40B | | | |
132 * | | grh space| | | |
133 * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
134 * +------------+----------+--------+--------+-------+
135 */
136
137 /*
138 * RoCE V2-IPv6 (46 Bytes needed)
139 * +------------+----------+--------+--------+-------+
140 * |Eth-hdr(14B)| IPv6 |UDP-hdr | Mad | iCRC |
141 * | | supplied | 8B |payload | 4B |
142 * | | by user |bth+deth| 256B | |
143 * | | mad lower| 20B |supplied| |
144 * | | 40 bytes | |by user | |
145 * | | grh space| | | |
146 * | | | | | |
147 * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
148 * +------------+----------+--------+--------+-------+
149 */
150
151 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE 74
152 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
153 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE 46
154 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
155 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
156 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
157 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
158 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
159 #define BNXT_QPLIB_MAX_SQSZ 0xFFFF
160
161 struct bnxt_qplib_hdrbuf {
162 dma_addr_t dma_map;
163 void *va;
164 u32 len;
165 u32 step;
166 };
167
168 struct bnxt_qplib_swq {
169 u64 wr_id;
170 int next_idx;
171 u8 type;
172 u8 flags;
173 u32 start_psn;
174 u32 next_psn;
175 u32 slot_idx;
176 u8 slots;
177 /* WIP: make it void * to handle legacy also */
178 struct sq_psn_search *psn_search;
179 void *inline_data;
180 };
181
182 struct bnxt_qplib_swqe {
183 /* General */
184 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
185 #define BNXT_QPLIB_QP1_DUMMY_WRID 0x44554D59 /* "DUMY" */
186 u64 wr_id;
187 u8 reqs_type;
188 u8 type;
189 #define BNXT_QPLIB_SWQE_TYPE_SEND 0
190 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
191 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
192 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
193 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
194 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
195 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
196 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
197 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
198 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
199 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
200 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
201 #define BNXT_QPLIB_SWQE_TYPE_RECV 128
202 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
203 u8 flags;
204 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP (1 << 0)
205 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE (1 << 1)
206 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE (1 << 2)
207 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT (1 << 3)
208 #define BNXT_QPLIB_SWQE_FLAGS_INLINE (1 << 4)
209 struct bnxt_qplib_sge *sg_list;
210 int num_sge;
211
212 union {
213 /* Send, with imm, inval key */
214 struct {
215 union {
216 __be32 imm_data;
217 u32 inv_key;
218 };
219 u32 q_key;
220 u32 dst_qp;
221 u16 avid;
222 } send;
223
224 /* Send Raw Ethernet and QP1 */
225 struct {
226 u16 lflags;
227 u16 cfa_action;
228 u32 cfa_meta;
229 } rawqp1;
230
231 /* RDMA write, with imm, read */
232 struct {
233 union {
234 __be32 imm_data;
235 u32 inv_key;
236 };
237 u64 remote_va;
238 u32 r_key;
239 } rdma;
240
241 /* Atomic cmp/swap, fetch/add */
242 struct {
243 u64 remote_va;
244 u32 r_key;
245 u64 swap_data;
246 u64 cmp_data;
247 } atomic;
248
249 /* Local Invalidate */
250 struct {
251 u32 inv_l_key;
252 } local_inv;
253
254 /* FR-PMR */
255 struct {
256 u8 access_cntl;
257 u8 pg_sz_log;
258 bool zero_based;
259 u32 l_key;
260 u32 length;
261 u8 pbl_pg_sz_log;
262 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
263 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
264 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
265 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
266 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
267 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
268 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
269 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
270 u8 levels;
271 #define PAGE_SHIFT_4K 12
272 __le64 *pbl_ptr;
273 dma_addr_t pbl_dma_ptr;
274 u64 *page_list;
275 u16 page_list_len;
276 u64 va;
277 } frmr;
278
279 /* Bind */
280 struct {
281 u8 access_cntl;
282 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE (1 << 0)
283 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ (1 << 1)
284 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE (1 << 2)
285 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC (1 << 3)
286 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND (1 << 4)
287 bool zero_based;
288 u8 mw_type;
289 u32 parent_l_key;
290 u32 r_key;
291 u64 va;
292 u32 length;
293 } bind;
294 };
295 };
296
297 struct bnxt_qplib_q {
298 struct bnxt_qplib_swq *swq;
299 struct bnxt_qplib_db_info dbinfo;
300 struct bnxt_qplib_sg_info sginfo;
301 struct bnxt_qplib_hwq hwq;
302 u32 max_wqe;
303 u16 max_sge;
304 u16 wqe_size;
305 u16 q_full_delta;
306 u32 psn;
307 bool condition;
308 bool single;
309 bool legacy_send_phantom;
310 u32 phantom_wqe_cnt;
311 u32 phantom_cqe_cnt;
312 u32 next_cq_cons;
313 bool flushed;
314 u32 swq_start;
315 u32 swq_last;
316 };
317
318 #define BNXT_QPLIB_PPP_REQ 0x1
319 #define BNXT_QPLIB_PPP_ST_IDX_SHIFT 0x1
320
321 struct bnxt_qplib_ppp {
322 u32 dpi;
323 u8 req;
324 u8 st_idx_en;
325 };
326
327 struct bnxt_qplib_qp {
328 struct bnxt_qplib_pd *pd;
329 struct bnxt_qplib_dpi *dpi;
330 struct bnxt_qplib_chip_ctx *cctx;
331 u64 qp_handle;
332 #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
333 u32 id;
334 u8 type;
335 u8 sig_type;
336 u8 wqe_mode;
337 u8 state;
338 u8 cur_qp_state;
339 u8 is_user;
340 u64 modify_flags;
341 u32 max_inline_data;
342 u32 mtu;
343 u32 path_mtu;
344 bool en_sqd_async_notify;
345 u16 pkey_index;
346 u32 qkey;
347 u32 dest_qp_id;
348 u8 access;
349 u8 timeout;
350 u8 retry_cnt;
351 u8 rnr_retry;
352 u64 wqe_cnt;
353 u32 min_rnr_timer;
354 u32 max_rd_atomic;
355 u32 max_dest_rd_atomic;
356 u32 dest_qpn;
357 u8 smac[6];
358 u16 vlan_id;
359 u8 nw_type;
360 u16 port_id;
361 struct bnxt_qplib_ah ah;
362 struct bnxt_qplib_ppp ppp;
363
364 #define BTH_PSN_MASK ((1 << 24) - 1)
365 /* SQ */
366 struct bnxt_qplib_q sq;
367 /* RQ */
368 struct bnxt_qplib_q rq;
369 /* SRQ */
370 struct bnxt_qplib_srq *srq;
371 /* CQ */
372 struct bnxt_qplib_cq *scq;
373 struct bnxt_qplib_cq *rcq;
374 /* IRRQ and ORRQ */
375 struct bnxt_qplib_hwq irrq;
376 struct bnxt_qplib_hwq orrq;
377 /* Header buffer for QP1 */
378 struct bnxt_qplib_hdrbuf *sq_hdr_buf;
379 struct bnxt_qplib_hdrbuf *rq_hdr_buf;
380
381 /* ToS */
382 u8 tos_ecn;
383 u8 tos_dscp;
384 /* To track the SQ and RQ flush list */
385 struct list_head sq_flush;
386 struct list_head rq_flush;
387 /* 4 bytes of QP's scrabled mac received from FW */
388 u32 lag_src_mac;
389 u32 msn;
390 u32 msn_tbl_sz;
391 /* get devflags in PI code */
392 u16 dev_cap_flags;
393 };
394
395
396 #define CQE_CMP_VALID(hdr, pass) \
397 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
398 !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
399
__bnxt_qplib_get_avail(struct bnxt_qplib_hwq * hwq)400 static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq)
401 {
402 int cons, prod, avail;
403
404 /* False full is possible retrying post-send makes sense */
405 cons = hwq->cons;
406 prod = hwq->prod;
407 avail = cons - prod;
408 if (cons <= prod)
409 avail += hwq->depth;
410 return avail;
411 }
412
bnxt_qplib_queue_full(struct bnxt_qplib_hwq * hwq,u8 slots)413 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_hwq *hwq, u8 slots)
414 {
415 return __bnxt_qplib_get_avail(hwq) <= slots;
416 }
417
418 struct bnxt_qplib_cqe {
419 u8 status;
420 u8 type;
421 u8 opcode;
422 u32 length;
423 /* Lower 16 is cfa_metadata0, Upper 16 is cfa_metadata1 */
424 u32 cfa_meta;
425 #define BNXT_QPLIB_META1_SHIFT 16
426 #define BNXT_QPLIB_CQE_CFA_META1_VALID 0x80000UL
427 u64 wr_id;
428 union {
429 __be32 immdata;
430 u32 invrkey;
431 };
432 u64 qp_handle;
433 u64 mr_handle;
434 u16 flags;
435 u8 smac[6];
436 u32 src_qp;
437 u16 raweth_qp1_flags;
438 u16 raweth_qp1_errors;
439 u16 raweth_qp1_cfa_code;
440 u32 raweth_qp1_flags2;
441 u32 raweth_qp1_metadata;
442 u8 raweth_qp1_payload_offset;
443 u16 pkey_index;
444 };
445
446 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
447 struct bnxt_qplib_cq {
448 struct bnxt_qplib_dpi *dpi;
449 struct bnxt_qplib_chip_ctx *cctx;
450 struct bnxt_qplib_nq *nq;
451 struct bnxt_qplib_db_info dbinfo;
452 struct bnxt_qplib_sg_info sginfo;
453 struct bnxt_qplib_hwq hwq;
454 struct bnxt_qplib_hwq resize_hwq;
455 struct list_head sqf_head;
456 struct list_head rqf_head;
457 u32 max_wqe;
458 u32 id;
459 u16 count;
460 u16 period;
461 u32 cnq_hw_ring_id;
462 u64 cq_handle;
463 atomic_t arm_state;
464 #define CQ_RESIZE_WAIT_TIME_MS 500
465 unsigned long flags;
466 #define CQ_FLAGS_RESIZE_IN_PROG 1
467 wait_queue_head_t waitq;
468 spinlock_t flush_lock; /* lock flush queue list */
469 spinlock_t compl_lock; /* synch CQ handlers */
470 u16 cnq_events;
471 bool is_cq_err_event;
472 bool destroyed;
473 u8 toggle;
474 };
475
476 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
477 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
478 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * x + 2)
479 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) ((s >> 1) - 1)
480 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) (x + 1)
481 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) (s - 1)
482
483 #define NQE_CMP_VALID(hdr, pass) \
484 (!!(le32_to_cpu((hdr)->info63_v & 0xffffffff) & NQ_BASE_V) == \
485 !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
486
487 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
488
489 /* MSN table print macros for debugging */
490 #define BNXT_RE_MSN_IDX(m) (((m) & SQ_MSN_SEARCH_START_IDX_MASK) >> \
491 SQ_MSN_SEARCH_START_IDX_SFT)
492 #define BNXT_RE_MSN_NPSN(m) (((m) & SQ_MSN_SEARCH_NEXT_PSN_MASK) >> \
493 SQ_MSN_SEARCH_NEXT_PSN_SFT)
494 #define BNXT_RE_MSN_SPSN(m) (((m) & SQ_MSN_SEARCH_START_PSN_MASK) >> \
495 SQ_MSN_SEARCH_START_PSN_SFT)
496 #define BNXT_MSN_TBLE_SGE 6
497
498 struct bnxt_qplib_nq_stats {
499 u64 num_dbqne_processed;
500 u64 num_srqne_processed;
501 u64 num_cqne_processed;
502 u64 num_tasklet_resched;
503 u64 num_nq_rearm;
504 };
505
506 struct bnxt_qplib_nq_db {
507 struct bnxt_qplib_reg_desc reg;
508 void __iomem *db;
509 struct bnxt_qplib_db_info dbinfo;
510 };
511
512 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
513 struct bnxt_qplib_cq *cq);
514 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
515 struct bnxt_qplib_srq *srq, u8 event);
516
517 struct bnxt_qplib_nq {
518 struct bnxt_qplib_res *res;
519 struct bnxt_qplib_hwq hwq;
520 struct bnxt_qplib_nq_db nq_db;
521
522 char *name;
523 u16 ring_id;
524 int msix_vec;
525 bool requested;
526 int budget;
527 u32 load;
528 struct mutex lock;
529
530 cqn_handler_t cqn_handler;
531 srqn_handler_t srqn_handler;
532 struct workqueue_struct *cqn_wq;
533 struct bnxt_qplib_nq_stats stats;
534 };
535
536 struct bnxt_qplib_nq_work {
537 struct work_struct work;
538 struct bnxt_qplib_nq *nq;
539 struct bnxt_qplib_cq *cq;
540 };
541
542 static inline dma_addr_t
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)543 bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
544 {
545 struct bnxt_qplib_hdrbuf *buf;
546
547 buf = qp->rq_hdr_buf;
548 return (buf->dma_map + index * buf->step);
549 }
550
551 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
552 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
553 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
554 int msix_vector, bool need_init);
555 int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx,
556 int msix_vector, int bar_reg_offset,
557 cqn_handler_t cqn_handler,
558 srqn_handler_t srq_handler);
559 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
560 struct bnxt_qplib_srq *srq);
561 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
562 struct bnxt_qplib_srq *srq);
563 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
564 struct bnxt_qplib_srq *srq);
565 int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
566 struct bnxt_qplib_srq *srq);
567 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
568 struct bnxt_qplib_swqe *wqe);
569 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
570 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
571 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
572 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
573 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
574 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
575 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
576 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
577 struct bnxt_qplib_sge *sge);
578 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
579 struct bnxt_qplib_sge *sge);
580 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
581 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
582 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
583 struct bnxt_qplib_swqe *wqe);
584 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
585 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
586 struct bnxt_qplib_swqe *wqe);
587 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
588 int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
589 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
590 int new_cqes);
591 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
592 struct bnxt_qplib_cq *cq);
593 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
594 void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
595 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
596 int num, struct bnxt_qplib_qp **qp);
597 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
598 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
599 void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq);
600 int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res,
601 struct bnxt_qplib_nq *nq);
602 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
603 void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
604 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
605 struct bnxt_qplib_cqe *cqe,
606 int num_cqes);
607 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
608 void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res,
609 struct bnxt_qplib_qp *qp);
610 int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res,
611 struct bnxt_qplib_qp *qp, u32 slen, u32 rlen);
612
__can_request_ppp(struct bnxt_qplib_qp * qp)613 static inline bool __can_request_ppp(struct bnxt_qplib_qp *qp)
614 {
615 bool can_request = false;
616
617 if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RESET &&
618 qp->state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
619 qp->ppp.req &&
620 !(qp->ppp.st_idx_en &
621 CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED))
622 can_request = true;
623 return can_request;
624 }
625
626 /* MSN table update inlin */
bnxt_re_update_msn_tbl(uint32_t st_idx,uint32_t npsn,uint32_t start_psn)627 static inline uint64_t bnxt_re_update_msn_tbl(uint32_t st_idx, uint32_t npsn, uint32_t start_psn)
628 {
629 return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) &
630 SQ_MSN_SEARCH_START_IDX_MASK) |
631 (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) &
632 SQ_MSN_SEARCH_NEXT_PSN_MASK) |
633 (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
634 SQ_MSN_SEARCH_START_PSN_MASK));
635 }
636
637 void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res);
638 #endif
639