1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Fast Path Operators (header) 37 */ 38 39 #ifndef __BNXT_QPLIB_FP_H__ 40 #define __BNXT_QPLIB_FP_H__ 41 42 #include <rdma/bnxt_re-abi.h> 43 44 /* Few helper structures temporarily defined here 45 * should get rid of these when roce_hsi.h is updated 46 * in original code base 47 */ 48 struct sq_ud_ext_hdr { 49 __le32 dst_qp; 50 __le32 avid; 51 __le64 rsvd; 52 }; 53 54 struct sq_raw_ext_hdr { 55 __le32 cfa_meta; 56 __le32 rsvd0; 57 __le64 rsvd1; 58 }; 59 60 struct sq_rdma_ext_hdr { 61 __le64 remote_va; 62 __le32 remote_key; 63 __le32 rsvd; 64 }; 65 66 struct sq_atomic_ext_hdr { 67 __le64 swap_data; 68 __le64 cmp_data; 69 }; 70 71 struct sq_fr_pmr_ext_hdr { 72 __le64 pblptr; 73 __le64 va; 74 }; 75 76 struct sq_bind_ext_hdr { 77 __le64 va; 78 __le32 length_lo; 79 __le32 length_hi; 80 }; 81 82 struct rq_ext_hdr { 83 __le64 rsvd1; 84 __le64 rsvd2; 85 }; 86 87 /* Helper structures end */ 88 89 struct bnxt_qplib_srq { 90 struct bnxt_qplib_pd *pd; 91 struct bnxt_qplib_dpi *dpi; 92 struct bnxt_qplib_db_info dbinfo; 93 u64 srq_handle; 94 u32 id; 95 u16 wqe_size; 96 u32 max_wqe; 97 u32 max_sge; 98 u32 threshold; 99 bool arm_req; 100 struct bnxt_qplib_cq *cq; 101 struct bnxt_qplib_hwq hwq; 102 struct bnxt_qplib_swq *swq; 103 int start_idx; 104 int last_idx; 105 struct bnxt_qplib_sg_info sg_info; 106 u16 eventq_hw_ring_id; 107 spinlock_t lock; /* protect SRQE link list */ 108 u8 toggle; 109 }; 110 111 struct bnxt_qplib_sge { 112 u64 addr; 113 u32 lkey; 114 u32 size; 115 }; 116 117 struct bnxt_qplib_swq { 118 u64 wr_id; 119 int next_idx; 120 u8 type; 121 u8 flags; 122 u32 start_psn; 123 u32 next_psn; 124 u32 slot_idx; 125 u8 slots; 126 struct sq_psn_search *psn_search; 127 struct sq_psn_search_ext *psn_ext; 128 }; 129 130 struct bnxt_qplib_swqe { 131 /* General */ 132 #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ 133 u64 wr_id; 134 u8 reqs_type; 135 u8 type; 136 #define BNXT_QPLIB_SWQE_TYPE_SEND 0 137 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1 138 #define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2 139 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4 140 #define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5 141 #define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6 142 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8 143 #define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11 144 #define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12 145 #define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13 146 #define BNXT_QPLIB_SWQE_TYPE_REG_MR 13 147 #define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14 148 #define BNXT_QPLIB_SWQE_TYPE_RECV 128 149 #define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129 150 u8 flags; 151 #define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP BIT(0) 152 #define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE BIT(1) 153 #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2) 154 #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3) 155 #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4) 156 struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE]; 157 int num_sge; 158 /* Max inline data is 96 bytes */ 159 u32 inline_len; 160 #define BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH 96 161 u8 inline_data[BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH]; 162 163 union { 164 /* Send, with imm, inval key */ 165 struct { 166 union { 167 u32 imm_data; 168 u32 inv_key; 169 }; 170 u32 q_key; 171 u32 dst_qp; 172 u32 avid; 173 } send; 174 175 /* Send Raw Ethernet and QP1 */ 176 struct { 177 u16 lflags; 178 u16 cfa_action; 179 u32 cfa_meta; 180 } rawqp1; 181 182 /* RDMA write, with imm, read */ 183 struct { 184 union { 185 u32 imm_data; 186 u32 inv_key; 187 }; 188 u64 remote_va; 189 u32 r_key; 190 } rdma; 191 192 /* Atomic cmp/swap, fetch/add */ 193 struct { 194 u64 remote_va; 195 u32 r_key; 196 u64 swap_data; 197 u64 cmp_data; 198 } atomic; 199 200 /* Local Invalidate */ 201 struct { 202 u32 inv_l_key; 203 } local_inv; 204 205 /* FR-PMR */ 206 struct { 207 u8 access_cntl; 208 u8 pg_sz_log; 209 bool zero_based; 210 u32 l_key; 211 u32 length; 212 u8 pbl_pg_sz_log; 213 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0 214 #define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1 215 #define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4 216 #define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6 217 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8 218 #define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9 219 #define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10 220 #define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18 221 u8 levels; 222 #define PAGE_SHIFT_4K 12 223 __le64 *pbl_ptr; 224 dma_addr_t pbl_dma_ptr; 225 u64 *page_list; 226 u16 page_list_len; 227 u64 va; 228 } frmr; 229 230 /* Bind */ 231 struct { 232 u8 access_cntl; 233 #define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE BIT(0) 234 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ BIT(1) 235 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE BIT(2) 236 #define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC BIT(3) 237 #define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND BIT(4) 238 bool zero_based; 239 u8 mw_type; 240 u32 parent_l_key; 241 u32 r_key; 242 u64 va; 243 u32 length; 244 } bind; 245 }; 246 }; 247 248 struct bnxt_qplib_q { 249 struct bnxt_qplib_hwq hwq; 250 struct bnxt_qplib_swq *swq; 251 struct bnxt_qplib_db_info dbinfo; 252 struct bnxt_qplib_sg_info sg_info; 253 u32 max_wqe; 254 u32 max_sw_wqe; 255 u16 wqe_size; 256 u16 q_full_delta; 257 u16 max_sge; 258 u32 psn; 259 bool condition; 260 bool single; 261 bool send_phantom; 262 u32 phantom_wqe_cnt; 263 u32 phantom_cqe_cnt; 264 u32 next_cq_cons; 265 bool flushed; 266 u32 swq_start; 267 u32 swq_last; 268 }; 269 270 struct bnxt_qplib_qp { 271 struct bnxt_qplib_pd *pd; 272 struct bnxt_qplib_dpi *dpi; 273 struct bnxt_qplib_chip_ctx *cctx; 274 u64 qp_handle; 275 #define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF 276 u32 id; 277 u8 type; 278 u8 sig_type; 279 u8 wqe_mode; 280 u8 state; 281 u8 cur_qp_state; 282 u64 modify_flags; 283 u32 max_inline_data; 284 u32 mtu; 285 u8 path_mtu; 286 bool en_sqd_async_notify; 287 u16 pkey_index; 288 u32 qkey; 289 u32 dest_qp_id; 290 u8 access; 291 u8 timeout; 292 u8 retry_cnt; 293 u8 rnr_retry; 294 u64 wqe_cnt; 295 u32 min_rnr_timer; 296 u32 max_rd_atomic; 297 u32 max_dest_rd_atomic; 298 u32 dest_qpn; 299 u8 smac[6]; 300 u16 vlan_id; 301 u16 port_id; 302 u8 nw_type; 303 struct bnxt_qplib_ah ah; 304 305 #define BTH_PSN_MASK ((1 << 24) - 1) 306 /* SQ */ 307 struct bnxt_qplib_q sq; 308 /* RQ */ 309 struct bnxt_qplib_q rq; 310 /* SRQ */ 311 struct bnxt_qplib_srq *srq; 312 /* CQ */ 313 struct bnxt_qplib_cq *scq; 314 struct bnxt_qplib_cq *rcq; 315 /* IRRQ and ORRQ */ 316 struct bnxt_qplib_hwq irrq; 317 struct bnxt_qplib_hwq orrq; 318 /* Header buffer for QP1 */ 319 int sq_hdr_buf_size; 320 int rq_hdr_buf_size; 321 /* 322 * Buffer space for ETH(14), IP or GRH(40), UDP header(8) 323 * and ib_bth + ib_deth (20). 324 * Max required is 82 when RoCE V2 is enabled 325 */ 326 #define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86 327 /* Ethernet header = 14 */ 328 /* ib_grh = 40 (provided by MAD) */ 329 /* ib_bth + ib_deth = 20 */ 330 /* MAD = 256 (provided by MAD) */ 331 /* iCRC = 4 */ 332 #define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14 333 #define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512 334 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20 335 #define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40 336 #define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20 337 void *sq_hdr_buf; 338 dma_addr_t sq_hdr_buf_map; 339 void *rq_hdr_buf; 340 dma_addr_t rq_hdr_buf_map; 341 struct list_head sq_flush; 342 struct list_head rq_flush; 343 u32 msn; 344 u32 msn_tbl_sz; 345 bool is_host_msn_tbl; 346 u8 tos_dscp; 347 }; 348 349 #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base) 350 351 #define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE) 352 #define CQE_MAX_IDX_PER_PG (CQE_CNT_PER_PG - 1) 353 #define CQE_PG(x) (((x) & ~CQE_MAX_IDX_PER_PG) / CQE_CNT_PER_PG) 354 #define CQE_IDX(x) ((x) & CQE_MAX_IDX_PER_PG) 355 356 #define ROCE_CQE_CMP_V 0 357 #define CQE_CMP_VALID(hdr, pass) \ 358 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 359 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) 360 361 static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq) 362 { 363 int cons, prod, avail; 364 365 cons = hwq->cons; 366 prod = hwq->prod; 367 avail = cons - prod; 368 if (cons <= prod) 369 avail += hwq->depth; 370 return avail; 371 } 372 373 static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que, 374 u8 slots) 375 { 376 struct bnxt_qplib_hwq *hwq; 377 int avail; 378 379 hwq = &que->hwq; 380 /* False full is possible, retrying post-send makes sense */ 381 avail = hwq->cons - hwq->prod; 382 if (hwq->cons <= hwq->prod) 383 avail += hwq->depth; 384 return avail <= slots; 385 } 386 387 /* CQ coalescing parameters */ 388 struct bnxt_qplib_cq_coal_param { 389 u16 buf_maxtime; 390 u8 normal_maxbuf; 391 u8 during_maxbuf; 392 u8 en_ring_idle_mode; 393 }; 394 395 #define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1 396 #define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7 0x8 397 #define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7 0x8 398 #define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5 0x1 399 #define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5 0x1 400 #define BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE 0x1 401 #define BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME 0x1bf 402 #define BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF 0x1f 403 #define BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF 0x1f 404 #define BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE 0x1 405 406 struct bnxt_qplib_cqe { 407 u8 status; 408 u8 type; 409 u8 opcode; 410 u32 length; 411 u16 cfa_meta; 412 u64 wr_id; 413 union { 414 u32 immdata; 415 u32 invrkey; 416 }; 417 u64 qp_handle; 418 u64 mr_handle; 419 u16 flags; 420 u8 smac[6]; 421 u32 src_qp; 422 u16 raweth_qp1_flags; 423 u16 raweth_qp1_errors; 424 u16 raweth_qp1_cfa_code; 425 u32 raweth_qp1_flags2; 426 u32 raweth_qp1_metadata; 427 u8 raweth_qp1_payload_offset; 428 u16 pkey_index; 429 }; 430 431 #define BNXT_QPLIB_QUEUE_START_PERIOD 0x01 432 struct bnxt_qplib_cq { 433 struct bnxt_qplib_dpi *dpi; 434 struct bnxt_qplib_db_info dbinfo; 435 u32 max_wqe; 436 u32 id; 437 u16 count; 438 u16 period; 439 struct bnxt_qplib_hwq hwq; 440 struct bnxt_qplib_hwq resize_hwq; 441 u32 cnq_hw_ring_id; 442 struct bnxt_qplib_nq *nq; 443 bool resize_in_progress; 444 struct bnxt_qplib_sg_info sg_info; 445 u64 cq_handle; 446 u8 toggle; 447 448 #define CQ_RESIZE_WAIT_TIME_MS 500 449 unsigned long flags; 450 #define CQ_FLAGS_RESIZE_IN_PROG 1 451 wait_queue_head_t waitq; 452 struct list_head sqf_head, rqf_head; 453 atomic_t arm_state; 454 spinlock_t compl_lock; /* synch CQ handlers */ 455 /* Locking Notes: 456 * QP can move to error state from modify_qp, async error event or error 457 * CQE as part of poll_cq. When QP is moved to error state, it gets added 458 * to two flush lists, one each for SQ and RQ. 459 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq 460 * flush_locks should be acquired when QP is moved to error. The control path 461 * operations(modify_qp and async error events) are synchronized with poll_cq 462 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. 463 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq 464 * of the same QP while manipulating the flush list. 465 */ 466 spinlock_t flush_lock; /* QP flush management */ 467 u16 cnq_events; 468 struct bnxt_qplib_cq_coal_param *coalescing; 469 }; 470 471 #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 472 #define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq) 473 #define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * (x) + 2) 474 #define IRRQ_SLOTS_TO_IRD_LIMIT(s) (((s) >> 1) - 1) 475 #define ORD_LIMIT_TO_ORRQ_SLOTS(x) ((x) + 1) 476 #define ORRQ_SLOTS_TO_ORD_LIMIT(s) ((s) - 1) 477 478 #define BNXT_QPLIB_MAX_NQE_ENTRY_SIZE sizeof(struct nq_base) 479 480 #define NQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_NQE_ENTRY_SIZE) 481 #define NQE_MAX_IDX_PER_PG (NQE_CNT_PER_PG - 1) 482 #define NQE_PG(x) (((x) & ~NQE_MAX_IDX_PER_PG) / NQE_CNT_PER_PG) 483 #define NQE_IDX(x) ((x) & NQE_MAX_IDX_PER_PG) 484 485 #define NQE_CMP_VALID(hdr, pass) \ 486 (!!(le32_to_cpu((hdr)->info63_v[0]) & NQ_BASE_V) == \ 487 !((pass) & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK)) 488 489 #define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024) 490 491 #define NQ_CONS_PCI_BAR_REGION 2 492 #define NQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT) 493 #define NQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID 494 #define NQ_DB_IRQ_DIS CMPL_DOORBELL_MASK 495 #define NQ_DB_CP_FLAGS_REARM (NQ_DB_KEY_CP | \ 496 NQ_DB_IDX_VALID) 497 #define NQ_DB_CP_FLAGS (NQ_DB_KEY_CP | \ 498 NQ_DB_IDX_VALID | \ 499 NQ_DB_IRQ_DIS) 500 501 struct bnxt_qplib_nq_db { 502 struct bnxt_qplib_reg_desc reg; 503 struct bnxt_qplib_db_info dbinfo; 504 }; 505 506 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq, 507 struct bnxt_qplib_cq *cq); 508 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq, 509 struct bnxt_qplib_srq *srq, u8 event); 510 511 struct bnxt_qplib_nq { 512 struct pci_dev *pdev; 513 struct bnxt_qplib_res *res; 514 char *name; 515 struct bnxt_qplib_hwq hwq; 516 struct bnxt_qplib_nq_db nq_db; 517 u16 ring_id; 518 int msix_vec; 519 cpumask_t mask; 520 struct tasklet_struct nq_tasklet; 521 bool requested; 522 int budget; 523 u32 load; 524 525 cqn_handler_t cqn_handler; 526 srqn_handler_t srqn_handler; 527 struct workqueue_struct *cqn_wq; 528 }; 529 530 struct bnxt_qplib_nq_work { 531 struct work_struct work; 532 struct bnxt_qplib_nq *nq; 533 struct bnxt_qplib_cq *cq; 534 }; 535 536 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); 537 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 538 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, 539 int msix_vector, bool need_init); 540 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 541 int nq_idx, int msix_vector, int bar_reg_offset, 542 cqn_handler_t cqn_handler, 543 srqn_handler_t srq_handler); 544 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, 545 struct bnxt_qplib_srq *srq); 546 int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, 547 struct bnxt_qplib_srq *srq); 548 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, 549 struct bnxt_qplib_srq *srq); 550 void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, 551 struct bnxt_qplib_srq *srq); 552 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, 553 struct bnxt_qplib_swqe *wqe); 554 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 555 int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 556 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 557 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 558 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); 559 void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); 560 void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, 561 struct bnxt_qplib_qp *qp); 562 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, 563 struct bnxt_qplib_sge *sge); 564 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, 565 struct bnxt_qplib_sge *sge); 566 u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp); 567 dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, 568 u32 index); 569 void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp); 570 int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, 571 struct bnxt_qplib_swqe *wqe); 572 void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp); 573 int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, 574 struct bnxt_qplib_swqe *wqe); 575 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 576 int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq, 577 int new_cqes); 578 void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res, 579 struct bnxt_qplib_cq *cq); 580 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 581 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 582 int num, struct bnxt_qplib_qp **qp); 583 bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); 584 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 585 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 586 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq); 587 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); 588 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 589 unsigned long *flags); 590 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 591 unsigned long *flags); 592 int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, 593 struct bnxt_qplib_cqe *cqe, 594 int num_cqes); 595 void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp); 596 void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq); 597 598 static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx) 599 { 600 u32 idx; 601 602 idx = que->swq_start; 603 if (swq_idx) 604 *swq_idx = idx; 605 return &que->swq[idx]; 606 } 607 608 static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx) 609 { 610 que->swq_start = que->swq[idx].next_idx; 611 } 612 613 static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq) 614 { 615 u32 slots; 616 617 /* Queue depth is the number of slots. */ 618 slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge); 619 /* For variable WQE mode, need to align the slots to 256 */ 620 if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq) 621 slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN); 622 return slots; 623 } 624 625 static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode) 626 { 627 return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 628 que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true); 629 } 630 631 static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode) 632 { 633 return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? 634 sizeof(struct sq_send) / sizeof(struct sq_sge) : 1; 635 } 636 637 static inline u32 bnxt_qplib_set_rq_max_slot(u32 wqe_size) 638 { 639 return (wqe_size / sizeof(struct sq_sge)); 640 } 641 642 static inline u16 __xlate_qfd(u16 delta, u16 wqe_bytes) 643 { 644 /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128 645 * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128. 646 * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512. 647 * when 8916 is disabled. 648 */ 649 return (delta * wqe_bytes) / sizeof(struct sq_sge); 650 } 651 652 static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max) 653 { 654 u16 size = 0; 655 int indx; 656 657 for (indx = 0; indx < wqe->num_sge; indx++) 658 size += wqe->sg_list[indx].size; 659 if (size > max) 660 size = max; 661 662 return size; 663 } 664 665 /* MSN table update inlin */ 666 static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn) 667 { 668 return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) & 669 SQ_MSN_SEARCH_START_IDX_MASK) | 670 (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) & 671 SQ_MSN_SEARCH_NEXT_PSN_MASK) | 672 (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) & 673 SQ_MSN_SEARCH_START_PSN_MASK)); 674 } 675 676 static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp) 677 { 678 return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE); 679 } 680 681 static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status) 682 { 683 return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp); 684 } 685 #endif /* __BNXT_QPLIB_FP_H__ */ 686