1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators (header) 37 */ 38 39 #ifndef __BNXT_QPLIB_FP_H__ 40 #define __BNXT_QPLIB_FP_H__ 41 42 #include <rdma/bnxt_re-abi.h> 43 44 /* Few helper structures temporarily defined here 45 * should get rid of these when roce_hsi.h is updated 46 * in original code base 47 */ 48 struct sq_ud_ext_hdr { 49 __le32 dst_qp; 50 __le32 avid; 51 __le64 rsvd; 52 }; 53 54 struct sq_raw_ext_hdr { 55 __le32 cfa_meta; 56 __le32 rsvd0; 57 __le64 rsvd1; 58 }; 59 60 struct sq_rdma_ext_hdr { 61 __le64 remote_va; 62 __le32 remote_key; 63 __le32 rsvd; 64 }; 65 66 struct sq_atomic_ext_hdr { 67 __le64 swap_data; 68 __le64 cmp_data; 69 }; 70 71 struct sq_fr_pmr_ext_hdr { 72 __le64 pblptr; 73 __le64 va; 74 }; 75 76 struct sq_bind_ext_hdr { 77 __le64 va; 78 __le32 length_lo; 79 __le32 length_hi; 80 }; 81 82 struct rq_ext_hdr { 83 __le64 rsvd1; 84 __le64 rsvd2; 85 }; 86 87 /* Helper structures end */ 88 89 struct bnxt_qplib_srq { 90 struct bnxt_qplib_pd *pd; 91 struct bnxt_qplib_dpi *dpi; 92 struct bnxt_qplib_db_info dbinfo; 93 u64 srq_handle; 94 u32 id; 95 u16 wqe_size; 96 u32 max_wqe; 97 u32 max_sge; 98 u32 threshold; 99 bool arm_req; 100 struct bnxt_qplib_cq *cq; 101 struct bnxt_qplib_hwq hwq; 102 struct bnxt_qplib_swq *swq; 103 int start_idx; 104 int last_idx; 105 struct bnxt_qplib_sg_info sg_info; 106 u16 eventq_hw_ring_id; 107 spinlock_t lock; /* protect SRQE link list */ 108 u8 toggle; 109 }; 110 111 struct bnxt_qplib_sge { 112 u64 addr; 113 u32 lkey; 114 u32 size; 115 }; 116 117 #define BNXT_QPLIB_QP_MAX_SGL 6 118 struct bnxt_qplib_swq { 119 u64 wr_id; 120 int next_idx; 121 u8 type; 122 u8 flags; 123 u32 start_psn; 124 u32 next_psn; 125 u32 slot_idx; 126 u8 slots; 127 struct sq_psn_search *psn_search; 128 struct sq_psn_search_ext *psn_ext; 129 }; 130 131 struct bnxt_qplib_swqe { 132 /* General */ 133 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ 134 u64 wr_id; 135 u8 reqs_type; 136 u8 type; 137 #define BNXT_QPLIB_SWQE_TYPE_SEND 0 138 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1 139 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2 140 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4 141 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5 142 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6 143 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8 144 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11 145 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12 146 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13 147 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13 148 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14 149 #define BNXT_QPLIB_SWQE_TYPE_RECV 128 150 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129 151 u8 flags; 152 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0) 153 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1) 154 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2) 155 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3) 156 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4) 157 struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL]; 158 int num_sge; 159 /* Max inline data is 96 bytes */ 160 u32 inline_len; 161 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96 162 u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH]; 163 164 union { 165 /* Send, with imm, inval key */ 166 struct { 167 union { 168 u32 imm_data; 169 u32 inv_key; 170 }; 171 u32 q_key; 172 u32 dst_qp; 173 u32 avid; 174 } send; 175 176 /* Send Raw Ethernet and QP1 */ 177 struct { 178 u16 lflags; 179 u16 cfa_action; 180 u32 cfa_meta; 181 } rawqp1; 182 183 /* RDMA write, with imm, read */ 184 struct { 185 union { 186 u32 imm_data; 187 u32 inv_key; 188 }; 189 u64 remote_va; 190 u32 r_key; 191 } rdma; 192 193 /* Atomic cmp/swap, fetch/add */ 194 struct { 195 u64 remote_va; 196 u32 r_key; 197 u64 swap_data; 198 u64 cmp_data; 199 } atomic; 200 201 /* Local Invalidate */ 202 struct { 203 u32 inv_l_key; 204 } local_inv; 205 206 /* FR-PMR */ 207 struct { 208 u8 access_cntl; 209 u8 pg_sz_log; 210 bool zero_based; 211 u32 l_key; 212 u32 length; 213 u8 pbl_pg_sz_log; 214 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0 215 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1 216 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4 217 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6 218 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8 219 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9 220 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10 221 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18 222 u8 levels; 223 #define PAGE_SHIFT_4K 12 224 __le64 *pbl_ptr; 225 dma_addr_t pbl_dma_ptr; 226 u64 *page_list; 227 u16 page_list_len; 228 u64 va; 229 } frmr; 230 231 /* Bind */ 232 struct { 233 u8 access_cntl; 234 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0) 235 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1) 236 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2) 237 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3) 238 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4) 239 bool zero_based; 240 u8 mw_type; 241 u32 parent_l_key; 242 u32 r_key; 243 u64 va; 244 u32 length; 245 } bind; 246 }; 247 }; 248 249 struct bnxt_qplib_q { 250 struct bnxt_qplib_hwq hwq; 251 struct bnxt_qplib_swq *swq; 252 struct bnxt_qplib_db_info dbinfo; 253 struct bnxt_qplib_sg_info sg_info; 254 u32 max_wqe; 255 u32 max_sw_wqe; 256 u16 wqe_size; 257 u16 q_full_delta; 258 u16 max_sge; 259 u32 psn; 260 bool condition; 261 bool single; 262 bool send_phantom; 263 u32 phantom_wqe_cnt; 264 u32 phantom_cqe_cnt; 265 u32 next_cq_cons; 266 bool flushed; 267 u32 swq_start; 268 u32 swq_last; 269 }; 270 271 struct bnxt_qplib_qp { 272 struct bnxt_qplib_pd *pd; 273 struct bnxt_qplib_dpi *dpi; 274 struct bnxt_qplib_chip_ctx *cctx; 275 u64 qp_handle; 276 #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF 277 u32 id; 278 u8 type; 279 u8 sig_type; 280 u8 wqe_mode; 281 u8 state; 282 u8 cur_qp_state; 283 u64 modify_flags; 284 u32 max_inline_data; 285 u32 mtu; 286 u8 path_mtu; 287 bool en_sqd_async_notify; 288 u16 pkey_index; 289 u32 qkey; 290 u32 dest_qp_id; 291 u8 access; 292 u8 timeout; 293 u8 retry_cnt; 294 u8 rnr_retry; 295 u64 wqe_cnt; 296 u32 min_rnr_timer; 297 u32 max_rd_atomic; 298 u32 max_dest_rd_atomic; 299 u32 dest_qpn; 300 u8 smac[6]; 301 u16 vlan_id; 302 u8 nw_type; 303 struct bnxt_qplib_ah ah; 304 305 #define BTH_PSN_MASK ((1 << 24) - 1) 306 /* SQ */ 307 struct bnxt_qplib_q sq; 308 /* RQ */ 309 struct bnxt_qplib_q rq; 310 /* SRQ */ 311 struct bnxt_qplib_srq *srq; 312 /* CQ */ 313 struct bnxt_qplib_cq *scq; 314 struct bnxt_qplib_cq *rcq; 315 /* IRRQ and ORRQ */ 316 struct bnxt_qplib_hwq irrq; 317 struct bnxt_qplib_hwq orrq; 318 /* Header buffer for QP1 */ 319 int sq_hdr_buf_size; 320 int rq_hdr_buf_size; 321 /* 322 * Buffer space for ETH(14), IP or GRH(40), UDP header(8) 323 * and ib_bth + ib_deth (20). 324 * Max required is 82 when RoCE V2 is enabled 325 */ 326 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86 327 /* Ethernet header = 14 */ 328 /* ib_grh = 40 (provided by MAD) */ 329 /* ib_bth + ib_deth = 20 */ 330 /* MAD = 256 (provided by MAD) */ 331 /* iCRC = 4 */ 332 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14 333 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512 334 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20 335 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40 336 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20 337 void *sq_hdr_buf; 338 dma_addr_t sq_hdr_buf_map; 339 void *rq_hdr_buf; 340 dma_addr_t rq_hdr_buf_map; 341 struct list_head sq_flush; 342 struct list_head rq_flush; 343 u32 msn; 344 u32 msn_tbl_sz; 345 bool is_host_msn_tbl; 346 }; 347 348 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) 349 350 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE) 351 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1) 352 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG) 353 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG) 354 355 #define ROCE_CQE_CMP_V 0 356 #define CQE_CMP_VALID(hdr, pass) \ 357 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 358 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) 359 360 static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq) 361 { 362 int cons, prod, avail; 363 364 cons = hwq->cons; 365 prod = hwq->prod; 366 avail = cons - prod; 367 if (cons <= prod) 368 avail += hwq->depth; 369 return avail; 370 } 371 372 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que, 373 u8 slots) 374 { 375 struct bnxt_qplib_hwq *hwq; 376 int avail; 377 378 hwq = &que->hwq; 379 /* False full is possible, retrying post-send makes sense */ 380 avail = hwq->cons - hwq->prod; 381 if (hwq->cons <= hwq->prod) 382 avail += hwq->depth; 383 return avail <= slots; 384 } 385 386 struct bnxt_qplib_cqe { 387 u8 status; 388 u8 type; 389 u8 opcode; 390 u32 length; 391 u16 cfa_meta; 392 u64 wr_id; 393 union { 394 __le32 immdata; 395 u32 invrkey; 396 }; 397 u64 qp_handle; 398 u64 mr_handle; 399 u16 flags; 400 u8 smac[6]; 401 u32 src_qp; 402 u16 raweth_qp1_flags; 403 u16 raweth_qp1_errors; 404 u16 raweth_qp1_cfa_code; 405 u32 raweth_qp1_flags2; 406 u32 raweth_qp1_metadata; 407 u8 raweth_qp1_payload_offset; 408 u16 pkey_index; 409 }; 410 411 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 412 struct bnxt_qplib_cq { 413 struct bnxt_qplib_dpi *dpi; 414 struct bnxt_qplib_db_info dbinfo; 415 u32 max_wqe; 416 u32 id; 417 u16 count; 418 u16 period; 419 struct bnxt_qplib_hwq hwq; 420 struct bnxt_qplib_hwq resize_hwq; 421 u32 cnq_hw_ring_id; 422 struct bnxt_qplib_nq *nq; 423 bool resize_in_progress; 424 struct bnxt_qplib_sg_info sg_info; 425 u64 cq_handle; 426 u8 toggle; 427 428 #define CQ_RESIZE_WAIT_TIME_MS 500 429 unsigned long flags; 430 #define CQ_FLAGS_RESIZE_IN_PROG 1 431 wait_queue_head_t waitq; 432 struct list_head sqf_head, rqf_head; 433 atomic_t arm_state; 434 spinlock_t compl_lock; /* synch CQ handlers */ 435 /* Locking Notes: 436 * QP can move to error state from modify_qp, async error event or error 437 * CQE as part of poll_cq. When QP is moved to error state, it gets added 438 * to two flush lists, one each for SQ and RQ. 439 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq 440 * flush_locks should be acquired when QP is moved to error. The control path 441 * operations(modify_qp and async error events) are synchronized with poll_cq 442 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. 443 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq 444 * of the same QP while manipulating the flush list. 445 */ 446 spinlock_t flush_lock; /* QP flush management */ 447 u16 cnq_events; 448 }; 449 450 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 451 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq) 452 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2) 453 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1) 454 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1) 455 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1) 456 457 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base) 458 459 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE) 460 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1) 461 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG) 462 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG) 463 464 #define NQE_CMP_VALID(hdr, pass) \ 465 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \ 466 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) 467 468 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024) 469 470 #define NQ_CONS_PCI_BAR_REGION 2 471 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT) 472 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID 473 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK 474 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \ 475 NQ_DB_IDX_VALID) 476 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \ 477 NQ_DB_IDX_VALID | \ 478 NQ_DB_IRQ_DIS) 479 480 struct bnxt_qplib_nq_db { 481 struct bnxt_qplib_reg_desc reg; 482 struct bnxt_qplib_db_info dbinfo; 483 }; 484 485 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, 486 struct bnxt_qplib_cq *cq); 487 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, 488 struct bnxt_qplib_srq *srq, u8 event); 489 490 struct bnxt_qplib_nq { 491 struct pci_dev *pdev; 492 struct bnxt_qplib_res *res; 493 char *name; 494 struct bnxt_qplib_hwq hwq; 495 struct bnxt_qplib_nq_db nq_db; 496 u16 ring_id; 497 int msix_vec; 498 cpumask_t mask; 499 struct tasklet_struct nq_tasklet; 500 bool requested; 501 int budget; 502 503 cqn_handler_t cqn_handler; 504 srqn_handler_t srqn_handler; 505 struct workqueue_struct *cqn_wq; 506 }; 507 508 struct bnxt_qplib_nq_work { 509 struct work_struct work; 510 struct bnxt_qplib_nq *nq; 511 struct bnxt_qplib_cq *cq; 512 }; 513 514 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 515 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 516 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 517 int msix_vector, bool need_init); 518 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 519 int nq_idx, int msix_vector, int bar_reg_offset, 520 cqn_handler_t cqn_handler, 521 srqn_handler_t srq_handler); 522 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, 523 struct bnxt_qplib_srq *srq); 524 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, 525 struct bnxt_qplib_srq *srq); 526 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, 527 struct bnxt_qplib_srq *srq); 528 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, 529 struct bnxt_qplib_srq *srq); 530 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, 531 struct bnxt_qplib_swqe *wqe); 532 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 533 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 534 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 535 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 536 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 537 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); 538 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, 539 struct bnxt_qplib_qp *qp); 540 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 541 struct bnxt_qplib_sge *sge); 542 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 543 struct bnxt_qplib_sge *sge); 544 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp); 545 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, 546 u32 index); 547 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp); 548 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 549 struct bnxt_qplib_swqe *wqe); 550 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); 551 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 552 struct bnxt_qplib_swqe *wqe); 553 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 554 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, 555 int new_cqes); 556 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, 557 struct bnxt_qplib_cq *cq); 558 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 559 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 560 int num, struct bnxt_qplib_qp **qp); 561 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); 562 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 563 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 564 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq); 565 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); 566 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 567 unsigned long *flags); 568 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 569 unsigned long *flags); 570 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, 571 struct bnxt_qplib_cqe *cqe, 572 int num_cqes); 573 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp); 574 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq); 575 576 static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx) 577 { 578 u32 idx; 579 580 idx = que->swq_start; 581 if (swq_idx) 582 *swq_idx = idx; 583 return &que->swq[idx]; 584 } 585 586 static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx) 587 { 588 que->swq_start = que->swq[idx].next_idx; 589 } 590 591 static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq) 592 { 593 u32 slots; 594 595 /* Queue depth is the number of slots. */ 596 slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge); 597 /* For variable WQE mode, need to align the slots to 256 */ 598 if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq) 599 slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN); 600 return slots; 601 } 602 603 static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode) 604 { 605 return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 606 que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true); 607 } 608 609 static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode) 610 { 611 return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 612 sizeof(struct sq_send) / sizeof(struct sq_sge) : 1; 613 } 614 615 static inline u32 bnxt_qplib_set_rq_max_slot(u32 wqe_size) 616 { 617 return (wqe_size / sizeof(struct sq_sge)); 618 } 619 620 static inline u16 __xlate_qfd(u16 delta, u16 wqe_bytes) 621 { 622 /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128 623 * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128. 624 * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512. 625 * when 8916 is disabled. 626 */ 627 return (delta * wqe_bytes) / sizeof(struct sq_sge); 628 } 629 630 static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max) 631 { 632 u16 size = 0; 633 int indx; 634 635 for (indx = 0; indx < wqe->num_sge; indx++) 636 size += wqe->sg_list[indx].size; 637 if (size > max) 638 size = max; 639 640 return size; 641 } 642 643 /* MSN table update inlin */ 644 static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn) 645 { 646 return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) & 647 SQ_MSN_SEARCH_START_IDX_MASK) | 648 (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) & 649 SQ_MSN_SEARCH_NEXT_PSN_MASK) | 650 (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) & 651 SQ_MSN_SEARCH_START_PSN_MASK)); 652 } 653 654 static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp) 655 { 656 return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE); 657 } 658 659 static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status) 660 { 661 return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp); 662 } 663 #endif /* __BNXT_QPLIB_FP_H__ */ 664