1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators (header) 37 */ 38 39 #ifndef __BNXT_QPLIB_FP_H__ 40 #define __BNXT_QPLIB_FP_H__ 41 42 #include <rdma/bnxt_re-abi.h> 43 44 /* Few helper structures temporarily defined here 45 * should get rid of these when roce_hsi.h is updated 46 * in original code base 47 */ 48 struct sq_ud_ext_hdr { 49 __le32 dst_qp; 50 __le32 avid; 51 __le64 rsvd; 52 }; 53 54 struct sq_raw_ext_hdr { 55 __le32 cfa_meta; 56 __le32 rsvd0; 57 __le64 rsvd1; 58 }; 59 60 struct sq_rdma_ext_hdr { 61 __le64 remote_va; 62 __le32 remote_key; 63 __le32 rsvd; 64 }; 65 66 struct sq_atomic_ext_hdr { 67 __le64 swap_data; 68 __le64 cmp_data; 69 }; 70 71 struct sq_fr_pmr_ext_hdr { 72 __le64 pblptr; 73 __le64 va; 74 }; 75 76 struct sq_bind_ext_hdr { 77 __le64 va; 78 __le32 length_lo; 79 __le32 length_hi; 80 }; 81 82 struct rq_ext_hdr { 83 __le64 rsvd1; 84 __le64 rsvd2; 85 }; 86 87 /* Helper structures end */ 88 89 struct bnxt_qplib_srq { 90 struct bnxt_qplib_pd *pd; 91 struct bnxt_qplib_dpi *dpi; 92 struct bnxt_qplib_db_info dbinfo; 93 u64 srq_handle; 94 u32 id; 95 u16 wqe_size; 96 u32 max_wqe; 97 u32 max_sge; 98 u32 threshold; 99 bool arm_req; 100 struct bnxt_qplib_cq *cq; 101 struct bnxt_qplib_hwq hwq; 102 struct bnxt_qplib_swq *swq; 103 int start_idx; 104 int last_idx; 105 struct bnxt_qplib_sg_info sg_info; 106 u16 eventq_hw_ring_id; 107 spinlock_t lock; /* protect SRQE link list */ 108 u8 toggle; 109 }; 110 111 struct bnxt_qplib_sge { 112 u64 addr; 113 u32 lkey; 114 u32 size; 115 }; 116 117 struct bnxt_qplib_swq { 118 u64 wr_id; 119 int next_idx; 120 u8 type; 121 u8 flags; 122 u32 start_psn; 123 u32 next_psn; 124 u32 slot_idx; 125 u8 slots; 126 struct sq_psn_search *psn_search; 127 struct sq_psn_search_ext *psn_ext; 128 }; 129 130 struct bnxt_qplib_swqe { 131 /* General */ 132 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ 133 u64 wr_id; 134 u8 reqs_type; 135 u8 type; 136 #define BNXT_QPLIB_SWQE_TYPE_SEND 0 137 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1 138 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2 139 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4 140 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5 141 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6 142 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8 143 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11 144 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12 145 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13 146 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13 147 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14 148 #define BNXT_QPLIB_SWQE_TYPE_RECV 128 149 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129 150 u8 flags; 151 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0) 152 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1) 153 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2) 154 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3) 155 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4) 156 struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE]; 157 int num_sge; 158 /* Max inline data is 96 bytes */ 159 u32 inline_len; 160 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96 161 u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH]; 162 163 union { 164 /* Send, with imm, inval key */ 165 struct { 166 union { 167 u32 imm_data; 168 u32 inv_key; 169 }; 170 u32 q_key; 171 u32 dst_qp; 172 u32 avid; 173 } send; 174 175 /* Send Raw Ethernet and QP1 */ 176 struct { 177 u16 lflags; 178 u16 cfa_action; 179 u32 cfa_meta; 180 } rawqp1; 181 182 /* RDMA write, with imm, read */ 183 struct { 184 union { 185 u32 imm_data; 186 u32 inv_key; 187 }; 188 u64 remote_va; 189 u32 r_key; 190 } rdma; 191 192 /* Atomic cmp/swap, fetch/add */ 193 struct { 194 u64 remote_va; 195 u32 r_key; 196 u64 swap_data; 197 u64 cmp_data; 198 } atomic; 199 200 /* Local Invalidate */ 201 struct { 202 u32 inv_l_key; 203 } local_inv; 204 205 /* FR-PMR */ 206 struct { 207 u8 access_cntl; 208 u8 pg_sz_log; 209 bool zero_based; 210 u32 l_key; 211 u32 length; 212 u8 pbl_pg_sz_log; 213 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0 214 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1 215 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4 216 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6 217 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8 218 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9 219 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10 220 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18 221 u8 levels; 222 #define PAGE_SHIFT_4K 12 223 __le64 *pbl_ptr; 224 dma_addr_t pbl_dma_ptr; 225 u64 *page_list; 226 u16 page_list_len; 227 u64 va; 228 } frmr; 229 230 /* Bind */ 231 struct { 232 u8 access_cntl; 233 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0) 234 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1) 235 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2) 236 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3) 237 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4) 238 bool zero_based; 239 u8 mw_type; 240 u32 parent_l_key; 241 u32 r_key; 242 u64 va; 243 u32 length; 244 } bind; 245 }; 246 }; 247 248 struct bnxt_qplib_q { 249 struct bnxt_qplib_hwq hwq; 250 struct bnxt_qplib_swq *swq; 251 struct bnxt_qplib_db_info dbinfo; 252 struct bnxt_qplib_sg_info sg_info; 253 u32 max_wqe; 254 u32 max_sw_wqe; 255 u16 wqe_size; 256 u16 q_full_delta; 257 u16 max_sge; 258 u32 psn; 259 bool condition; 260 bool single; 261 bool send_phantom; 262 u32 phantom_wqe_cnt; 263 u32 phantom_cqe_cnt; 264 u32 next_cq_cons; 265 bool flushed; 266 u32 swq_start; 267 u32 swq_last; 268 }; 269 270 struct bnxt_qplib_qp { 271 struct bnxt_qplib_pd *pd; 272 struct bnxt_qplib_dpi *dpi; 273 struct bnxt_qplib_chip_ctx *cctx; 274 u64 qp_handle; 275 #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF 276 u32 id; 277 u8 type; 278 u8 sig_type; 279 u8 wqe_mode; 280 u8 state; 281 u8 cur_qp_state; 282 u64 modify_flags; 283 u32 ext_modify_flags; 284 u32 max_inline_data; 285 u32 mtu; 286 u8 path_mtu; 287 bool en_sqd_async_notify; 288 u16 pkey_index; 289 u32 qkey; 290 u32 dest_qp_id; 291 u8 access; 292 u8 timeout; 293 u8 retry_cnt; 294 u8 rnr_retry; 295 u64 wqe_cnt; 296 u32 min_rnr_timer; 297 u32 max_rd_atomic; 298 u32 max_dest_rd_atomic; 299 u32 dest_qpn; 300 u8 smac[6]; 301 u16 vlan_id; 302 u16 port_id; 303 u16 udp_sport; 304 u8 nw_type; 305 struct bnxt_qplib_ah ah; 306 307 #define BTH_PSN_MASK ((1 << 24) - 1) 308 /* SQ */ 309 struct bnxt_qplib_q sq; 310 /* RQ */ 311 struct bnxt_qplib_q rq; 312 /* SRQ */ 313 struct bnxt_qplib_srq *srq; 314 /* CQ */ 315 struct bnxt_qplib_cq *scq; 316 struct bnxt_qplib_cq *rcq; 317 /* IRRQ and ORRQ */ 318 struct bnxt_qplib_hwq irrq; 319 struct bnxt_qplib_hwq orrq; 320 /* Header buffer for QP1 */ 321 int sq_hdr_buf_size; 322 int rq_hdr_buf_size; 323 /* 324 * Buffer space for ETH(14), IP or GRH(40), UDP header(8) 325 * and ib_bth + ib_deth (20). 326 * Max required is 82 when RoCE V2 is enabled 327 */ 328 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86 329 /* Ethernet header = 14 */ 330 /* ib_grh = 40 (provided by MAD) */ 331 /* ib_bth + ib_deth = 20 */ 332 /* MAD = 256 (provided by MAD) */ 333 /* iCRC = 4 */ 334 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14 335 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512 336 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20 337 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40 338 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20 339 void *sq_hdr_buf; 340 dma_addr_t sq_hdr_buf_map; 341 void *rq_hdr_buf; 342 dma_addr_t rq_hdr_buf_map; 343 struct list_head sq_flush; 344 struct list_head rq_flush; 345 u32 msn; 346 u32 msn_tbl_sz; 347 bool is_host_msn_tbl; 348 u8 tos_dscp; 349 u32 ugid_index; 350 u32 rate_limit; 351 u8 shaper_allocation_status; 352 }; 353 354 #define BNXT_RE_MAX_MSG_SIZE 0x80000000 355 #define BNXT_RE_INVAL_MSG_SIZE 0xFFFFFFFF 356 357 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) 358 359 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE) 360 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1) 361 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG) 362 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG) 363 364 #define ROCE_CQE_CMP_V 0 365 #define CQE_CMP_VALID(hdr, pass) \ 366 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 367 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) 368 369 static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq) 370 { 371 int cons, prod, avail; 372 373 cons = hwq->cons; 374 prod = hwq->prod; 375 avail = cons - prod; 376 if (cons <= prod) 377 avail += hwq->depth; 378 return avail; 379 } 380 381 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que, 382 u8 slots) 383 { 384 struct bnxt_qplib_hwq *hwq; 385 int avail; 386 387 hwq = &que->hwq; 388 /* False full is possible, retrying post-send makes sense */ 389 avail = hwq->cons - hwq->prod; 390 if (hwq->cons <= hwq->prod) 391 avail += hwq->depth; 392 return avail <= slots; 393 } 394 395 /* CQ coalescing parameters */ 396 struct bnxt_qplib_cq_coal_param { 397 u16 buf_maxtime; 398 u8 normal_maxbuf; 399 u8 during_maxbuf; 400 u8 en_ring_idle_mode; 401 u8 enable; 402 }; 403 404 #define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1 405 #define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7 0x8 406 #define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7 0x8 407 #define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5 0x1 408 #define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5 0x1 409 #define BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE 0x1 410 #define BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME 0x1bf 411 #define BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF 0x1f 412 #define BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF 0x1f 413 #define BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE 0x1 414 415 struct bnxt_qplib_cqe { 416 u8 status; 417 u8 type; 418 u8 opcode; 419 u32 length; 420 u16 cfa_meta; 421 u64 wr_id; 422 union { 423 u32 immdata; 424 u32 invrkey; 425 }; 426 u64 qp_handle; 427 u64 mr_handle; 428 u16 flags; 429 u8 smac[6]; 430 u32 src_qp; 431 u16 raweth_qp1_flags; 432 u16 raweth_qp1_errors; 433 u16 raweth_qp1_cfa_code; 434 u32 raweth_qp1_flags2; 435 u32 raweth_qp1_metadata; 436 u8 raweth_qp1_payload_offset; 437 u16 pkey_index; 438 }; 439 440 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 441 struct bnxt_qplib_cq { 442 struct bnxt_qplib_dpi *dpi; 443 struct bnxt_qplib_db_info dbinfo; 444 u32 max_wqe; 445 u32 id; 446 u16 count; 447 u16 period; 448 struct bnxt_qplib_hwq hwq; 449 struct bnxt_qplib_hwq resize_hwq; 450 u32 cnq_hw_ring_id; 451 struct bnxt_qplib_nq *nq; 452 bool resize_in_progress; 453 struct bnxt_qplib_sg_info sg_info; 454 u64 cq_handle; 455 u8 toggle; 456 457 #define CQ_RESIZE_WAIT_TIME_MS 500 458 unsigned long flags; 459 #define CQ_FLAGS_RESIZE_IN_PROG 1 460 wait_queue_head_t waitq; 461 struct list_head sqf_head, rqf_head; 462 atomic_t arm_state; 463 spinlock_t compl_lock; /* synch CQ handlers */ 464 /* Locking Notes: 465 * QP can move to error state from modify_qp, async error event or error 466 * CQE as part of poll_cq. When QP is moved to error state, it gets added 467 * to two flush lists, one each for SQ and RQ. 468 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq 469 * flush_locks should be acquired when QP is moved to error. The control path 470 * operations(modify_qp and async error events) are synchronized with poll_cq 471 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. 472 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq 473 * of the same QP while manipulating the flush list. 474 */ 475 spinlock_t flush_lock; /* QP flush management */ 476 u16 cnq_events; 477 struct bnxt_qplib_cq_coal_param *coalescing; 478 }; 479 480 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 481 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq) 482 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2) 483 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1) 484 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1) 485 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1) 486 487 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base) 488 489 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE) 490 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1) 491 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG) 492 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG) 493 494 #define NQE_CMP_VALID(hdr, pass) \ 495 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \ 496 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) 497 498 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024) 499 500 #define NQ_CONS_PCI_BAR_REGION 2 501 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT) 502 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID 503 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK 504 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \ 505 NQ_DB_IDX_VALID) 506 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \ 507 NQ_DB_IDX_VALID | \ 508 NQ_DB_IRQ_DIS) 509 510 struct bnxt_qplib_nq_db { 511 struct bnxt_qplib_reg_desc reg; 512 struct bnxt_qplib_db_info dbinfo; 513 }; 514 515 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, 516 struct bnxt_qplib_cq *cq); 517 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, 518 struct bnxt_qplib_srq *srq, u8 event); 519 520 struct bnxt_qplib_nq { 521 struct pci_dev *pdev; 522 struct bnxt_qplib_res *res; 523 char *name; 524 struct bnxt_qplib_hwq hwq; 525 struct bnxt_qplib_nq_db nq_db; 526 u16 ring_id; 527 int msix_vec; 528 cpumask_t mask; 529 struct tasklet_struct nq_tasklet; 530 bool requested; 531 int budget; 532 u32 load; 533 534 cqn_handler_t cqn_handler; 535 srqn_handler_t srqn_handler; 536 struct workqueue_struct *cqn_wq; 537 }; 538 539 struct bnxt_qplib_nq_work { 540 struct work_struct work; 541 struct bnxt_qplib_nq *nq; 542 struct bnxt_qplib_cq *cq; 543 }; 544 545 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 546 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 547 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 548 int msix_vector, bool need_init); 549 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 550 int nq_idx, int msix_vector, int bar_reg_offset, 551 cqn_handler_t cqn_handler, 552 srqn_handler_t srq_handler); 553 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, 554 struct bnxt_qplib_srq *srq); 555 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, 556 struct bnxt_qplib_srq *srq); 557 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, 558 struct bnxt_qplib_srq *srq); 559 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, 560 struct bnxt_qplib_swqe *wqe); 561 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 562 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 563 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 564 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 565 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 566 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); 567 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, 568 struct bnxt_qplib_qp *qp); 569 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 570 struct bnxt_qplib_sge *sge); 571 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 572 struct bnxt_qplib_sge *sge); 573 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp); 574 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, 575 u32 index); 576 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp); 577 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 578 struct bnxt_qplib_swqe *wqe); 579 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); 580 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 581 struct bnxt_qplib_swqe *wqe); 582 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 583 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, 584 int new_cqes); 585 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, 586 struct bnxt_qplib_cq *cq); 587 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 588 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 589 int num, struct bnxt_qplib_qp **qp); 590 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); 591 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 592 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 593 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq); 594 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); 595 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 596 unsigned long *flags); 597 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 598 unsigned long *flags); 599 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, 600 struct bnxt_qplib_cqe *cqe, 601 int num_cqes); 602 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp); 603 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq); 604 605 static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx) 606 { 607 u32 idx; 608 609 idx = que->swq_start; 610 if (swq_idx) 611 *swq_idx = idx; 612 return &que->swq[idx]; 613 } 614 615 static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx) 616 { 617 que->swq_start = que->swq[idx].next_idx; 618 } 619 620 static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq) 621 { 622 u32 slots; 623 624 /* Queue depth is the number of slots. */ 625 slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge); 626 /* For variable WQE mode, need to align the slots to 256 */ 627 if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq) 628 slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN); 629 return slots; 630 } 631 632 static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode) 633 { 634 return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 635 que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true); 636 } 637 638 static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode) 639 { 640 return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 641 sizeof(struct sq_send) / sizeof(struct sq_sge) : 1; 642 } 643 644 static inline u32 bnxt_qplib_set_rq_max_slot(u32 wqe_size) 645 { 646 return (wqe_size / sizeof(struct sq_sge)); 647 } 648 649 static inline u16 __xlate_qfd(u16 delta, u16 wqe_bytes) 650 { 651 /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128 652 * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128. 653 * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512. 654 * when 8916 is disabled. 655 */ 656 return (delta * wqe_bytes) / sizeof(struct sq_sge); 657 } 658 659 static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max) 660 { 661 u16 size = 0; 662 int indx; 663 664 for (indx = 0; indx < wqe->num_sge; indx++) 665 size += wqe->sg_list[indx].size; 666 if (size > max) 667 size = max; 668 669 return size; 670 } 671 672 /* MSN table update inlin */ 673 static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn) 674 { 675 return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) & 676 SQ_MSN_SEARCH_START_IDX_MASK) | 677 (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) & 678 SQ_MSN_SEARCH_NEXT_PSN_MASK) | 679 (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) & 680 SQ_MSN_SEARCH_START_PSN_MASK)); 681 } 682 683 static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp) 684 { 685 return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE); 686 } 687 688 static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status) 689 { 690 return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp); 691 } 692 #endif /* __BNXT_QPLIB_FP_H__ */ 693