1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: QPLib resource manager (header) 37 */ 38 39 #ifndef __BNXT_QPLIB_RES_H__ 40 #define __BNXT_QPLIB_RES_H__ 41 42 extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; 43 44 #define CHIP_NUM_57508 0x1750 45 #define CHIP_NUM_57504 0x1751 46 #define CHIP_NUM_57502 0x1752 47 #define CHIP_NUM_58818 0xd818 48 #define CHIP_NUM_57608 0x1760 49 50 #define BNXT_QPLIB_DBR_VALID (0x1UL << 26) 51 #define BNXT_QPLIB_DBR_EPOCH_SHIFT 24 52 #define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25 53 54 struct bnxt_qplib_drv_modes { 55 u8 wqe_mode; 56 bool db_push; 57 bool dbr_pacing; 58 u32 toggle_bits; 59 }; 60 61 enum bnxt_re_toggle_modes { 62 BNXT_QPLIB_CQ_TOGGLE_BIT = 0x1, 63 BNXT_QPLIB_SRQ_TOGGLE_BIT = 0x2, 64 }; 65 66 struct bnxt_qplib_chip_ctx { 67 u16 chip_num; 68 u8 chip_rev; 69 u8 chip_metal; 70 u16 hw_stats_size; 71 u16 hwrm_cmd_max_timeout; 72 struct bnxt_qplib_drv_modes modes; 73 u64 hwrm_intf_ver; 74 u32 dbr_stat_db_fifo; 75 }; 76 77 struct bnxt_qplib_db_pacing_data { 78 u32 do_pacing; 79 u32 pacing_th; 80 u32 alarm_th; 81 u32 fifo_max_depth; 82 u32 fifo_room_mask; 83 u32 fifo_room_shift; 84 u32 grc_reg_offset; 85 }; 86 87 #define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000 88 #define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000 89 90 #define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *)) 91 #define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1) 92 #define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG) 93 #define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG) 94 95 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) 96 97 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ 98 ((HWQ_CMP(hwq->prod, hwq)\ 99 - HWQ_CMP(hwq->cons, hwq))\ 100 & (hwq->max_elements - 1))) 101 enum bnxt_qplib_hwq_type { 102 HWQ_TYPE_CTX, 103 HWQ_TYPE_QUEUE, 104 HWQ_TYPE_L2_CMPL, 105 HWQ_TYPE_MR 106 }; 107 108 #define MAX_PBL_LVL_0_PGS 1 109 #define MAX_PBL_LVL_1_PGS 512 110 #define MAX_PBL_LVL_1_PGS_SHIFT 9 111 #define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256 112 #define MAX_PBL_LVL_2_PGS (256 * 512) 113 #define MAX_PDL_LVL_SHIFT 9 114 115 enum bnxt_qplib_pbl_lvl { 116 PBL_LVL_0, 117 PBL_LVL_1, 118 PBL_LVL_2, 119 PBL_LVL_MAX 120 }; 121 122 #define ROCE_PG_SIZE_4K (4 * 1024) 123 #define ROCE_PG_SIZE_8K (8 * 1024) 124 #define ROCE_PG_SIZE_64K (64 * 1024) 125 #define ROCE_PG_SIZE_2M (2 * 1024 * 1024) 126 #define ROCE_PG_SIZE_8M (8 * 1024 * 1024) 127 #define ROCE_PG_SIZE_1G (1024 * 1024 * 1024) 128 129 enum bnxt_qplib_hwrm_pg_size { 130 BNXT_QPLIB_HWRM_PG_SIZE_4K = 0, 131 BNXT_QPLIB_HWRM_PG_SIZE_8K = 1, 132 BNXT_QPLIB_HWRM_PG_SIZE_64K = 2, 133 BNXT_QPLIB_HWRM_PG_SIZE_2M = 3, 134 BNXT_QPLIB_HWRM_PG_SIZE_8M = 4, 135 BNXT_QPLIB_HWRM_PG_SIZE_1G = 5, 136 }; 137 138 struct bnxt_qplib_reg_desc { 139 u8 bar_id; 140 resource_size_t bar_base; 141 unsigned long offset; 142 void __iomem *bar_reg; 143 size_t len; 144 }; 145 146 struct bnxt_qplib_pbl { 147 u32 pg_count; 148 u32 pg_size; 149 void **pg_arr; 150 dma_addr_t *pg_map_arr; 151 }; 152 153 struct bnxt_qplib_sg_info { 154 struct ib_umem *umem; 155 u32 npages; 156 u32 pgshft; 157 u32 pgsize; 158 bool nopte; 159 }; 160 161 struct bnxt_qplib_hwq_attr { 162 struct bnxt_qplib_res *res; 163 struct bnxt_qplib_sg_info *sginfo; 164 enum bnxt_qplib_hwq_type type; 165 u32 depth; 166 u32 stride; 167 u32 aux_stride; 168 u32 aux_depth; 169 }; 170 171 struct bnxt_qplib_hwq { 172 struct pci_dev *pdev; 173 /* lock to protect qplib_hwq */ 174 spinlock_t lock; 175 struct bnxt_qplib_pbl pbl[PBL_LVL_MAX + 1]; 176 enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */ 177 /* ptr for easy access to the PBL entries */ 178 void **pbl_ptr; 179 /* ptr for easy access to the dma_addr */ 180 dma_addr_t *pbl_dma_ptr; 181 u32 max_elements; 182 u32 depth; 183 u16 element_size; /* Size of each entry */ 184 u16 qe_ppg; /* queue entry per page */ 185 186 u32 prod; /* raw */ 187 u32 cons; /* raw */ 188 u8 cp_bit; 189 u8 is_user; 190 u64 *pad_pg; 191 u32 pad_stride; 192 u32 pad_pgofft; 193 }; 194 195 struct bnxt_qplib_db_info { 196 void __iomem *db; 197 void __iomem *priv_db; 198 struct bnxt_qplib_hwq *hwq; 199 u32 xid; 200 u32 max_slot; 201 u32 flags; 202 u8 toggle; 203 }; 204 205 enum bnxt_qplib_db_info_flags_mask { 206 BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL, 207 BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL, 208 BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL, 209 BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL, 210 }; 211 212 enum bnxt_qplib_db_epoch_flag_shift { 213 BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT, 214 BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1), 215 }; 216 217 /* Tables */ 218 struct bnxt_qplib_pd_tbl { 219 unsigned long *tbl; 220 u32 max; 221 }; 222 223 struct bnxt_qplib_sgid_tbl { 224 struct bnxt_qplib_gid_info *tbl; 225 u16 *hw_id; 226 u16 max; 227 u16 active; 228 void *ctx; 229 u8 *vlan; 230 }; 231 232 enum { 233 BNXT_QPLIB_DPI_TYPE_KERNEL = 0, 234 BNXT_QPLIB_DPI_TYPE_UC = 1, 235 BNXT_QPLIB_DPI_TYPE_WC = 2 236 }; 237 238 struct bnxt_qplib_dpi { 239 u32 dpi; 240 u32 bit; 241 void __iomem *dbr; 242 u64 umdbr; 243 u8 type; 244 }; 245 246 struct bnxt_qplib_dpi_tbl { 247 void **app_tbl; 248 unsigned long *tbl; 249 u16 max; 250 struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */ 251 struct bnxt_qplib_reg_desc wcreg; 252 void __iomem *priv_db; 253 }; 254 255 struct bnxt_qplib_stats { 256 dma_addr_t dma_map; 257 void *dma; 258 u32 size; 259 u32 fw_id; 260 }; 261 262 struct bnxt_qplib_vf_res { 263 u32 max_qp_per_vf; 264 u32 max_mrw_per_vf; 265 u32 max_srq_per_vf; 266 u32 max_cq_per_vf; 267 u32 max_gid_per_vf; 268 }; 269 270 #define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448 271 #define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64 272 #define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64 273 #define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128 274 275 #define MAX_TQM_ALLOC_REQ 48 276 #define MAX_TQM_ALLOC_BLK_SIZE 8 277 struct bnxt_qplib_tqm_ctx { 278 struct bnxt_qplib_hwq pde; 279 u8 pde_level; /* Original level */ 280 struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ]; 281 u8 qcount[MAX_TQM_ALLOC_REQ]; 282 }; 283 284 struct bnxt_qplib_ctx { 285 u32 qpc_count; 286 struct bnxt_qplib_hwq qpc_tbl; 287 u32 mrw_count; 288 struct bnxt_qplib_hwq mrw_tbl; 289 u32 srqc_count; 290 struct bnxt_qplib_hwq srqc_tbl; 291 u32 cq_count; 292 struct bnxt_qplib_hwq cq_tbl; 293 struct bnxt_qplib_hwq tim_tbl; 294 struct bnxt_qplib_tqm_ctx tqm_ctx; 295 struct bnxt_qplib_stats stats; 296 struct bnxt_qplib_vf_res vf_res; 297 }; 298 299 struct bnxt_qplib_res { 300 struct pci_dev *pdev; 301 struct bnxt_qplib_chip_ctx *cctx; 302 struct bnxt_qplib_dev_attr *dattr; 303 struct net_device *netdev; 304 struct bnxt_qplib_rcfw *rcfw; 305 struct bnxt_qplib_pd_tbl pd_tbl; 306 /* To protect the pd table bit map */ 307 struct mutex pd_tbl_lock; 308 struct bnxt_qplib_sgid_tbl sgid_tbl; 309 struct bnxt_qplib_dpi_tbl dpi_tbl; 310 /* To protect the dpi table bit map */ 311 struct mutex dpi_tbl_lock; 312 bool prio; 313 bool is_vf; 314 struct bnxt_qplib_db_pacing_data *pacing_data; 315 }; 316 317 static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx) 318 { 319 return (cctx->chip_num == CHIP_NUM_58818 || 320 cctx->chip_num == CHIP_NUM_57608); 321 } 322 323 static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx) 324 { 325 return (cctx->chip_num == CHIP_NUM_57508 || 326 cctx->chip_num == CHIP_NUM_57504 || 327 cctx->chip_num == CHIP_NUM_57502); 328 } 329 330 static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx) 331 { 332 return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx); 333 } 334 335 static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res) 336 { 337 return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? 338 HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL; 339 } 340 341 static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx) 342 { 343 return bnxt_qplib_is_chip_gen_p5_p7(cctx) ? 344 RING_ALLOC_REQ_RING_TYPE_NQ : 345 RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL; 346 } 347 348 static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq) 349 { 350 u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; 351 struct bnxt_qplib_pbl *pbl; 352 353 pbl = &hwq->pbl[PBL_LVL_0]; 354 switch (pbl->pg_size) { 355 case ROCE_PG_SIZE_4K: 356 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K; 357 break; 358 case ROCE_PG_SIZE_8K: 359 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K; 360 break; 361 case ROCE_PG_SIZE_64K: 362 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K; 363 break; 364 case ROCE_PG_SIZE_2M: 365 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M; 366 break; 367 case ROCE_PG_SIZE_8M: 368 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M; 369 break; 370 case ROCE_PG_SIZE_1G: 371 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G; 372 break; 373 default: 374 break; 375 } 376 377 return pg_size; 378 } 379 380 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq, 381 u32 indx, u64 *pg) 382 { 383 u32 pg_num, pg_idx; 384 385 pg_num = (indx / hwq->qe_ppg); 386 pg_idx = (indx % hwq->qe_ppg); 387 if (pg) 388 *pg = (u64)&hwq->pbl_ptr[pg_num]; 389 return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx); 390 } 391 392 static inline void *bnxt_qplib_get_prod_qe(struct bnxt_qplib_hwq *hwq, u32 idx) 393 { 394 idx += hwq->prod; 395 if (idx >= hwq->depth) 396 idx -= hwq->depth; 397 return bnxt_qplib_get_qe(hwq, idx, NULL); 398 } 399 400 #define to_bnxt_qplib(ptr, type, member) \ 401 container_of(ptr, type, member) 402 403 struct bnxt_qplib_pd; 404 struct bnxt_qplib_dev_attr; 405 406 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, 407 struct bnxt_qplib_hwq *hwq); 408 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, 409 struct bnxt_qplib_hwq_attr *hwq_attr); 410 int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, 411 struct bnxt_qplib_pd *pd); 412 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, 413 struct bnxt_qplib_pd_tbl *pd_tbl, 414 struct bnxt_qplib_pd *pd); 415 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res, 416 struct bnxt_qplib_dpi *dpi, 417 void *app, u8 type); 418 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, 419 struct bnxt_qplib_dpi *dpi); 420 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res); 421 int bnxt_qplib_init_res(struct bnxt_qplib_res *res); 422 void bnxt_qplib_free_res(struct bnxt_qplib_res *res); 423 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, 424 struct net_device *netdev, 425 struct bnxt_qplib_dev_attr *dev_attr); 426 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, 427 struct bnxt_qplib_ctx *ctx); 428 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, 429 struct bnxt_qplib_ctx *ctx, 430 bool virt_fn, bool is_p5); 431 int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res); 432 void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res); 433 434 int bnxt_qplib_determine_atomics(struct pci_dev *dev); 435 436 static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo, 437 struct bnxt_qplib_hwq *hwq, u32 cnt) 438 { 439 /* move prod and update toggle/epoch if wrap around */ 440 hwq->prod += cnt; 441 if (hwq->prod >= hwq->depth) { 442 hwq->prod %= hwq->depth; 443 dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT; 444 } 445 } 446 447 static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt, 448 u32 *dbinfo_flags) 449 { 450 /* move cons and update toggle/epoch if wrap around */ 451 *cons += cnt; 452 if (*cons >= max_elements) { 453 *cons %= max_elements; 454 *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT; 455 } 456 } 457 458 static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info, 459 bool arm) 460 { 461 u32 key = 0; 462 463 key |= info->hwq->cons | (CMPL_DOORBELL_IDX_VALID | 464 (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK)); 465 if (!arm) 466 key |= CMPL_DOORBELL_MASK; 467 writel(key, info->db); 468 } 469 470 #define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \ 471 (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \ 472 (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \ 473 (((u32)(toggle)) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT))) 474 475 static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info, 476 u32 type) 477 { 478 u64 key = 0; 479 u32 indx; 480 u8 toggle = 0; 481 482 if (type == DBC_DBC_TYPE_CQ_ARMALL || 483 type == DBC_DBC_TYPE_CQ_ARMSE) 484 toggle = info->toggle; 485 486 indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) | 487 ((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) << 488 BNXT_QPLIB_DB_EPOCH_CONS_SHIFT); 489 490 key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle); 491 writeq(key, info->db); 492 } 493 494 static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info, 495 u32 type) 496 { 497 u64 key = 0; 498 u32 indx; 499 500 indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) | 501 ((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) << 502 BNXT_QPLIB_DB_EPOCH_PROD_SHIFT)); 503 key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0); 504 writeq(key, info->db); 505 } 506 507 static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info, 508 u32 type) 509 { 510 u64 key = 0; 511 u8 toggle = 0; 512 513 if (type == DBC_DBC_TYPE_CQ_ARMENA || type == DBC_DBC_TYPE_SRQ_ARMENA) 514 toggle = info->toggle; 515 /* Index always at 0 */ 516 key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle); 517 writeq(key, info->priv_db); 518 } 519 520 static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info, 521 u32 th) 522 { 523 u64 key = 0; 524 525 key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, th, info->toggle); 526 writeq(key, info->priv_db); 527 } 528 529 static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info, 530 struct bnxt_qplib_chip_ctx *cctx, 531 bool arm) 532 { 533 u32 type; 534 535 type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ; 536 if (bnxt_qplib_is_chip_gen_p5_p7(cctx)) 537 bnxt_qplib_ring_db(info, type); 538 else 539 bnxt_qplib_ring_db32(info, arm); 540 } 541 542 static inline bool _is_ext_stats_supported(u16 dev_cap_flags) 543 { 544 return dev_cap_flags & 545 CREQ_QUERY_FUNC_RESP_SB_EXT_STATS; 546 } 547 548 static inline bool _is_hw_retx_supported(u16 dev_cap_flags) 549 { 550 return dev_cap_flags & 551 (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED | 552 CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED); 553 } 554 555 #define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a)) 556 557 static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx) 558 { 559 return cctx->modes.dbr_pacing; 560 } 561 562 #endif /* __BNXT_QPLIB_RES_H__ */ 563