1 /* 2 * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term 3 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in 13 * the documentation and/or other materials provided with the 14 * distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Description: main (header) 29 */ 30 31 #ifndef __BNXT_RE_H__ 32 #define __BNXT_RE_H__ 33 34 #include <linux/module.h> 35 #include <linux/netdevice.h> 36 #include <linux/mutex.h> 37 #include <linux/list.h> 38 #include <linux/rculist.h> 39 #include <linux/spinlock.h> 40 #include <net/ipv6.h> 41 #include <linux/if_ether.h> 42 #include <linux/debugfs.h> 43 #include <linux/seq_file.h> 44 #include <linux/interrupt.h> 45 #include <linux/vmalloc.h> 46 #include <linux/delay.h> 47 #include <rdma/ib_verbs.h> 48 #include <rdma/ib_user_verbs.h> 49 #include <rdma/ib_umem.h> 50 #include <rdma/ib_addr.h> 51 #include <rdma/ib_mad.h> 52 #include <rdma/ib_cache.h> 53 #include <linux/pci.h> 54 55 #include "bnxt.h" 56 #include "bnxt_ulp.h" 57 #include "hsi_struct_def.h" 58 #include "qplib_res.h" 59 #include "qplib_sp.h" 60 #include "qplib_fp.h" 61 #include "qplib_rcfw.h" 62 #include "ib_verbs.h" 63 #include "stats.h" 64 65 #define ROCE_DRV_MODULE_NAME "bnxt_re" 66 #define ROCE_DRV_MODULE_VERSION "230.0.133.0" 67 #define ROCE_DRV_MODULE_RELDATE "April 22, 2024" 68 69 #define BNXT_RE_REF_WAIT_COUNT 20 70 #define BNXT_RE_ROCE_V1_ETH_TYPE 0x8915 71 #define BNXT_RE_ROCE_V2_PORT_NO 4791 72 #define BNXT_RE_RES_FREE_WAIT_COUNT 1000 73 74 #define BNXT_RE_PAGE_SHIFT_4K (12) 75 #define BNXT_RE_PAGE_SHIFT_8K (13) 76 #define BNXT_RE_PAGE_SHIFT_64K (16) 77 #define BNXT_RE_PAGE_SHIFT_2M (21) 78 #define BNXT_RE_PAGE_SHIFT_8M (23) 79 #define BNXT_RE_PAGE_SHIFT_1G (30) 80 81 #define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K) 82 #define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K) 83 #define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K) 84 #define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M) 85 #define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) 86 #define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) 87 88 #define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) 89 #define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) 90 #define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH 91 92 /* Number of MRs to reserve for PF, leaving remainder for VFs */ 93 #define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024) 94 #define BNXT_RE_MAX_GID_PER_VF 128 95 96 #define BNXT_RE_MAX_VF_QPS_PER_PF (6 * 1024) 97 98 /** 99 * min_not_zero - return the minimum that is _not_ zero, unless both are zero 100 * @x: value1 101 * @y: value2 102 */ 103 #ifndef min_not_zero 104 #define min_not_zero(x, y) ({ \ 105 typeof(x) __x = (x); \ 106 typeof(y) __y = (y); \ 107 __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) 108 #endif 109 110 struct ib_mr_init_attr { 111 int max_reg_descriptors; 112 u32 flags; 113 }; 114 115 struct bnxt_re_dev; 116 117 int bnxt_re_register_netdevice_notifier(struct notifier_block *nb); 118 int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb); 119 int ib_register_device_compat(struct bnxt_re_dev *rdev); 120 121 #ifndef __struct_group 122 #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ 123 union { \ 124 struct { MEMBERS } ATTRS; \ 125 struct TAG { MEMBERS } ATTRS NAME; \ 126 } 127 #endif /* __struct_group */ 128 #ifndef struct_group_attr 129 #define struct_group_attr(NAME, ATTRS, MEMBERS...) \ 130 __struct_group(/* no tag */, NAME, ATTRS, MEMBERS) 131 #endif /* struct_group_attr */ 132 /* 133 * Percentage of resources of each type reserved for PF. 134 * Remaining resources are divided equally among VFs. 135 * [0, 100] 136 */ 137 138 #define BNXT_RE_RQ_WQE_THRESHOLD 32 139 #define BNXT_RE_UD_QP_HW_STALL 0x400000 140 141 /* 142 * Setting the default ack delay value to 16, which means 143 * the default timeout is approx. 260ms(4 usec * 2 ^(timeout)) 144 */ 145 146 #define BNXT_RE_DEFAULT_ACK_DELAY 16 147 #define BNXT_RE_BOND_PF_MAX 2 148 149 #define BNXT_RE_STATS_CTX_UPDATE_TIMER 250 150 #define BNXT_RE_30SEC_MSEC (30 * 1000) 151 152 #define BNXT_RE_BOND_RESCHED_CNT 10 153 154 #define BNXT_RE_CHIP_NUM_57454 0xC454 155 #define BNXT_RE_CHIP_NUM_57452 0xC452 156 157 #define BNXT_RE_CHIP_NUM_5745X(chip_num) \ 158 ((chip_num) == BNXT_RE_CHIP_NUM_57454 || \ 159 (chip_num) == BNXT_RE_CHIP_NUM_57452) 160 161 #define BNXT_RE_MIN_KERNEL_QP_TX_DEPTH 4096 162 #define BNXT_RE_STOP_QPS_BUDGET 200 163 164 #define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \ 165 ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000) 166 167 extern unsigned int min_tx_depth; 168 extern struct mutex bnxt_re_dev_lock; 169 extern struct mutex bnxt_re_mutex; 170 extern struct list_head bnxt_re_dev_list; 171 172 struct bnxt_re_ring_attr { 173 dma_addr_t *dma_arr; 174 int pages; 175 int type; 176 u32 depth; 177 u32 lrid; /* Logical ring id */ 178 u16 flags; 179 u8 mode; 180 u8 rsvd; 181 }; 182 183 #define BNXT_RE_MAX_DEVICES 256 184 #define BNXT_RE_MSIX_FROM_MOD_PARAM -1 185 #define BNXT_RE_MIN_MSIX 2 186 #define BNXT_RE_MAX_MSIX_VF 2 187 #define BNXT_RE_MAX_MSIX_PF 9 188 #define BNXT_RE_MAX_MSIX_NPAR_PF 5 189 #define BNXT_RE_MAX_MSIX 64 190 #define BNXT_RE_MAX_MSIX_GEN_P5_PF BNXT_RE_MAX_MSIX 191 #define BNXT_RE_GEN_P5_MAX_VF 64 192 193 struct bnxt_re_nq_record { 194 struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX]; 195 /* FP Notification Queue (CQ & SRQ) */ 196 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; 197 int num_msix; 198 int max_init; 199 struct mutex load_lock; 200 }; 201 202 struct bnxt_re_work { 203 struct work_struct work; 204 unsigned long event; 205 struct bnxt_re_dev *rdev; 206 struct ifnet *vlan_dev; 207 bool do_lag; 208 209 /* netdev where we received the event */ 210 struct ifnet *netdev; 211 struct auxiliary_device *adev; 212 }; 213 214 /* 215 * Data structure and defines to handle 216 * recovery 217 */ 218 #define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_RETRY 20 219 #define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_TIME_MS 30000 /* 30sec timeout */ 220 #define BNXT_RE_PRE_RECOVERY_REMOVE 0x1 221 #define BNXT_RE_COMPLETE_REMOVE 0x2 222 #define BNXT_RE_POST_RECOVERY_INIT 0x4 223 #define BNXT_RE_COMPLETE_INIT 0x8 224 #define BNXT_RE_COMPLETE_SHUTDOWN 0x10 225 226 /* QP1 SQ entry data strucutre */ 227 struct bnxt_re_sqp_entries { 228 u64 wrid; 229 struct bnxt_qplib_sge sge; 230 /* For storing the actual qp1 cqe */ 231 struct bnxt_qplib_cqe cqe; 232 struct bnxt_re_qp *qp1_qp; 233 }; 234 235 /* GSI QP mode enum */ 236 enum bnxt_re_gsi_mode { 237 BNXT_RE_GSI_MODE_INVALID = 0, 238 BNXT_RE_GSI_MODE_ALL = 1, 239 BNXT_RE_GSI_MODE_ROCE_V1, 240 BNXT_RE_GSI_MODE_ROCE_V2_IPV4, 241 BNXT_RE_GSI_MODE_ROCE_V2_IPV6, 242 BNXT_RE_GSI_MODE_UD 243 }; 244 245 enum bnxt_re_roce_cap { 246 BNXT_RE_FLAG_ROCEV1_CAP = 1, 247 BNXT_RE_FLAG_ROCEV2_CAP, 248 BNXT_RE_FLAG_ROCEV1_V2_CAP, 249 }; 250 251 #define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024 252 struct bnxt_re_gsi_context { 253 u8 gsi_qp_mode; 254 bool first_cq_created; 255 /* Start: used only in gsi_mode_all */ 256 struct bnxt_re_qp *gsi_qp; 257 struct bnxt_re_qp *gsi_sqp; 258 struct bnxt_re_ah *gsi_sah; 259 struct bnxt_re_sqp_entries *sqp_tbl; 260 /* End: used only in gsi_mode_all */ 261 }; 262 263 struct bnxt_re_tc_rec { 264 u8 cos_id_roce; 265 u8 tc_roce; 266 u8 cos_id_cnp; 267 u8 tc_cnp; 268 u8 tc_def; 269 u8 cos_id_def; 270 u8 max_tc; 271 u8 roce_prio; 272 u8 cnp_prio; 273 u8 roce_dscp; 274 u8 cnp_dscp; 275 u8 prio_valid; 276 u8 dscp_valid; 277 bool ecn_enabled; 278 bool serv_type_enabled; 279 u64 cnp_dscp_bv; 280 u64 roce_dscp_bv; 281 }; 282 283 struct bnxt_re_dscp2pri { 284 u8 dscp; 285 u8 mask; 286 u8 pri; 287 }; 288 289 struct bnxt_re_cos2bw_cfg { 290 u8 pad[3]; 291 struct_group_attr(cfg, __packed, 292 u8 queue_id; 293 __le32 min_bw; 294 __le32 max_bw; 295 u8 tsa; 296 u8 pri_lvl; 297 u8 bw_weight; 298 ); 299 u8 unused; 300 }; 301 302 #define BNXT_RE_AEQ_IDX 0 303 #define BNXT_RE_MAX_SGID_ENTRIES 256 304 305 #define BNXT_RE_DBGFS_FILE_MEM 65536 306 enum { 307 BNXT_RE_STATS_QUERY = 1, 308 BNXT_RE_QP_QUERY = 2, 309 BNXT_RE_SERVICE_FN_QUERY = 3, 310 }; 311 312 struct bnxt_re_dbg_file { 313 struct bnxt_re_dev *rdev; 314 u32 type; 315 union { 316 struct bnxt_qplib_query_stats_info sinfo; 317 struct bnxt_qplib_query_fn_info fninfo; 318 }params; 319 char dbg_buf[BNXT_RE_DBGFS_FILE_MEM]; 320 }; 321 322 struct bnxt_re_debug_entries { 323 /* Dir entries */ 324 struct dentry *qpinfo_dir; 325 struct dentry *service_fn_dir; 326 /* file entries */ 327 struct dentry *stat_query; 328 struct bnxt_re_dbg_file stat_file; 329 struct dentry *qplist_query; 330 struct bnxt_re_dbg_file qp_file; 331 struct dentry *service_fn_query; 332 struct bnxt_re_dbg_file service_fn_file; 333 }; 334 335 struct bnxt_re_en_dev_info { 336 struct list_head en_list; 337 struct bnxt_en_dev *en_dev; 338 struct bnxt_re_dev *rdev; 339 unsigned long flags; 340 #define BNXT_RE_FLAG_EN_DEV_NETDEV_REG 0 341 #define BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV 1 342 #define BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV 2 343 u8 wqe_mode; 344 u8 gsi_mode; 345 bool te_bypass; 346 bool ib_uninit_done; 347 u32 num_msix_requested; 348 wait_queue_head_t waitq; 349 }; 350 351 #define BNXT_RE_DB_FIFO_ROOM_MASK_P5 0x1FFF8000 352 #define BNXT_RE_MAX_FIFO_DEPTH_P5 0x2c00 353 #define BNXT_RE_DB_FIFO_ROOM_SHIFT 15 354 355 #define BNXT_RE_DB_FIFO_ROOM_MASK_P7 0x3FFF8000 356 #define BNXT_RE_MAX_FIFO_DEPTH_P7 0x8000 357 358 #define BNXT_RE_DB_FIFO_ROOM_MASK(ctx) \ 359 (_is_chip_p7((ctx)) ? \ 360 BNXT_RE_DB_FIFO_ROOM_MASK_P7 :\ 361 BNXT_RE_DB_FIFO_ROOM_MASK_P5) 362 #define BNXT_RE_MAX_FIFO_DEPTH(ctx) \ 363 (_is_chip_p7((ctx)) ? \ 364 BNXT_RE_MAX_FIFO_DEPTH_P7 :\ 365 BNXT_RE_MAX_FIFO_DEPTH_P5) 366 367 struct bnxt_dbq_nq_list { 368 int num_nql_entries; 369 u16 nq_id[16]; 370 }; 371 372 #define BNXT_RE_ASYNC_ERR_REP_BASE(_type) \ 373 (ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_##_type) 374 375 #define BNXT_RE_ASYNC_ERR_DBR_TRESH(_type) \ 376 (ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_##_type) 377 378 #define BNXT_RE_EVENT_DBR_EPOCH(data) \ 379 (((data) & \ 380 BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_MASK)) >> \ 381 BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_SFT)) 382 383 #define BNXT_RE_EVENT_ERROR_REPORT_TYPE(data1) \ 384 (((data1) & \ 385 BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_MASK)) >> \ 386 BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_SFT)) 387 388 #define BNXT_RE_DBR_LIST_ADD(_rdev, _res, _type) \ 389 { \ 390 spin_lock(&(_rdev)->res_list[_type].lock); \ 391 list_add_tail(&(_res)->dbr_list, \ 392 &(_rdev)->res_list[_type].head); \ 393 spin_unlock(&(_rdev)->res_list[_type].lock); \ 394 } 395 396 #define BNXT_RE_DBR_LIST_DEL(_rdev, _res, _type) \ 397 { \ 398 spin_lock(&(_rdev)->res_list[_type].lock); \ 399 list_del(&(_res)->dbr_list); \ 400 spin_unlock(&(_rdev)->res_list[_type].lock); \ 401 } 402 403 #define BNXT_RE_CQ_PAGE_LIST_ADD(_uctx, _cq) \ 404 { \ 405 mutex_lock(&(_uctx)->cq_lock); \ 406 list_add_tail(&(_cq)->cq_list, &(_uctx)->cq_list); \ 407 mutex_unlock(&(_uctx)->cq_lock); \ 408 } 409 410 #define BNXT_RE_CQ_PAGE_LIST_DEL(_uctx, _cq) \ 411 { \ 412 mutex_lock(&(_uctx)->cq_lock); \ 413 list_del(&(_cq)->cq_list); \ 414 mutex_unlock(&(_uctx)->cq_lock); \ 415 } 416 417 #define BNXT_RE_NETDEV_EVENT(event, x) \ 418 do { \ 419 if ((event) == (x)) \ 420 return #x; \ 421 } while (0) 422 423 /* Do not change the seq of this enum which is followed by dbr recov */ 424 enum { 425 BNXT_RE_RES_TYPE_CQ = 0, 426 BNXT_RE_RES_TYPE_UCTX, 427 BNXT_RE_RES_TYPE_QP, 428 BNXT_RE_RES_TYPE_SRQ, 429 BNXT_RE_RES_TYPE_MAX 430 }; 431 432 struct bnxt_re_dbr_res_list { 433 struct list_head head; 434 spinlock_t lock; 435 }; 436 437 struct bnxt_re_dbr_drop_recov_work { 438 struct work_struct work; 439 struct bnxt_re_dev *rdev; 440 u32 curr_epoch; 441 }; 442 443 struct bnxt_re_aer_work { 444 struct work_struct work; 445 struct bnxt_re_dev *rdev; 446 }; 447 448 struct bnxt_re_dbq_stats { 449 u64 fifo_occup_slab_1; 450 u64 fifo_occup_slab_2; 451 u64 fifo_occup_slab_3; 452 u64 fifo_occup_slab_4; 453 u64 fifo_occup_water_mark; 454 u64 do_pacing_slab_1; 455 u64 do_pacing_slab_2; 456 u64 do_pacing_slab_3; 457 u64 do_pacing_slab_4; 458 u64 do_pacing_slab_5; 459 u64 do_pacing_water_mark; 460 }; 461 462 /* Device debug statistics */ 463 struct bnxt_re_drv_dbg_stats { 464 struct bnxt_re_dbq_stats dbq; 465 }; 466 467 /* DB pacing counters */ 468 struct bnxt_re_dbr_sw_stats { 469 u64 dbq_int_recv; 470 u64 dbq_int_en; 471 u64 dbq_pacing_resched; 472 u64 dbq_pacing_complete; 473 u64 dbq_pacing_alerts; 474 u64 dbr_drop_recov_events; 475 u64 dbr_drop_recov_timeouts; 476 u64 dbr_drop_recov_timeout_users; 477 u64 dbr_drop_recov_event_skips; 478 }; 479 480 struct bnxt_re_dev { 481 struct ib_device ibdev; 482 struct list_head list; 483 atomic_t ref_count; 484 atomic_t sched_count; 485 unsigned long flags; 486 #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 487 #define BNXT_RE_FLAG_IBDEV_REGISTERED 1 488 #define BNXT_RE_FLAG_GOT_MSIX 2 489 #define BNXT_RE_FLAG_HAVE_L2_REF 3 490 #define BNXT_RE_FLAG_ALLOC_RCFW 4 491 #define BNXT_RE_FLAG_NET_RING_ALLOC 5 492 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 6 493 #define BNXT_RE_FLAG_ALLOC_CTX 7 494 #define BNXT_RE_FLAG_STATS_CTX_ALLOC 8 495 #define BNXT_RE_FLAG_STATS_CTX2_ALLOC 9 496 #define BNXT_RE_FLAG_RCFW_CHANNEL_INIT 10 497 #define BNXT_RE_FLAG_WORKER_REG 11 498 #define BNXT_RE_FLAG_TBLS_ALLOCINIT 12 499 #define BNXT_RE_FLAG_SETUP_NQ 13 500 #define BNXT_RE_FLAG_BOND_DEV_REGISTERED 14 501 #define BNXT_RE_FLAG_PER_PORT_DEBUG_INFO 15 502 #define BNXT_RE_FLAG_DEV_LIST_INITIALIZED 16 503 #define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17 504 #define BNXT_RE_FLAG_INIT_DCBX_CC_PARAM 18 505 #define BNXT_RE_FLAG_STOP_IN_PROGRESS 20 506 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 507 #define BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS 30 508 struct ifnet *netdev; 509 struct auxiliary_device *adev; 510 struct bnxt_qplib_chip_ctx *chip_ctx; 511 struct bnxt_en_dev *en_dev; 512 struct bnxt_re_nq_record nqr; 513 int id; 514 struct delayed_work worker; 515 u16 worker_30s; 516 struct bnxt_re_tc_rec tc_rec[2]; 517 u8 cur_prio_map; 518 /* RCFW Channel */ 519 struct bnxt_qplib_rcfw rcfw; 520 /* Device Resources */ 521 struct bnxt_qplib_dev_attr *dev_attr; 522 struct bnxt_qplib_res qplib_res; 523 struct bnxt_qplib_dpi dpi_privileged; 524 struct bnxt_qplib_cc_param cc_param; 525 struct mutex cc_lock; 526 struct mutex qp_lock; 527 struct list_head qp_list; 528 u8 roce_mode; 529 530 /* Max of 2 lossless traffic class supported per port */ 531 u16 cosq[2]; 532 /* Start: QP for handling QP1 packets */ 533 struct bnxt_re_gsi_context gsi_ctx; 534 /* End: QP for handling QP1 packets */ 535 bool is_virtfn; 536 u32 num_vfs; 537 u32 espeed; 538 /* 539 * For storing the speed of slave interfaces. 540 * Same as espeed when bond is not configured 541 */ 542 u32 sl_espeed; 543 /* To be used for a workaround for ISER stack */ 544 u32 min_tx_depth; 545 /* To enable qp debug info. Disabled during driver load */ 546 u32 en_qp_dbg; 547 /* Array to handle gid mapping */ 548 char *gid_map; 549 550 struct bnxt_re_device_stats stats; 551 struct bnxt_re_drv_dbg_stats *dbg_stats; 552 /* debugfs to expose per port information*/ 553 struct dentry *port_debug_dir; 554 struct dentry *info; 555 struct dentry *drv_dbg_stats; 556 struct dentry *sp_perf_stats; 557 struct dentry *pdev_debug_dir; 558 struct dentry *pdev_qpinfo_dir; 559 struct bnxt_re_debug_entries *dbg_ent; 560 struct workqueue_struct *resolve_wq; 561 struct list_head mac_wq_list; 562 struct workqueue_struct *dcb_wq; 563 struct workqueue_struct *aer_wq; 564 u32 event_bitmap[3]; 565 bool unreg_sched; 566 u64 dbr_throttling_reg_off; 567 u64 dbr_aeq_arm_reg_off; 568 u64 dbr_db_fifo_reg_off; 569 void *dbr_page; 570 u64 dbr_bar_addr; 571 u32 pacing_algo_th; 572 u32 pacing_en_int_th; 573 u32 do_pacing_save; 574 struct workqueue_struct *dbq_wq; 575 struct workqueue_struct *dbr_drop_recov_wq; 576 struct work_struct dbq_fifo_check_work; 577 struct delayed_work dbq_pacing_work; 578 /* protect DB pacing */ 579 struct mutex dbq_lock; 580 /* Control DBR pacing feature. Set if enabled */ 581 bool dbr_pacing; 582 /* Control DBR recovery feature. Set if enabled */ 583 bool dbr_drop_recov; 584 bool user_dbr_drop_recov; 585 /* DBR recovery feature. Set if running */ 586 bool dbr_recovery_on; 587 u32 user_dbr_drop_recov_timeout; 588 /* 589 * Value used for pacing algo when pacing is active 590 */ 591 #define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF 592 u32 dbr_do_pacing; 593 u32 dbq_watermark; /* Current watermark set in HW registers */ 594 u32 dbq_nq_id; /* Current NQ ID for DBQ events */ 595 u32 dbq_pacing_time; /* ms */ 596 u32 dbr_def_do_pacing; /* do_pacing when no congestion */ 597 u32 dbr_evt_curr_epoch; 598 bool dbq_int_disable; 599 600 bool mod_exit; 601 struct bnxt_re_dbr_sw_stats *dbr_sw_stats; 602 struct bnxt_re_dbr_res_list res_list[BNXT_RE_RES_TYPE_MAX]; 603 struct bnxt_dbq_nq_list nq_list; 604 char dev_name[IB_DEVICE_NAME_MAX]; 605 atomic_t dbq_intr_running; 606 u32 num_msix_requested; 607 unsigned char *dev_addr; /* For netdev->dev_addr */ 608 }; 609 610 #define BNXT_RE_RESOLVE_RETRY_COUNT_US 5000000 /* 5 sec */ 611 struct bnxt_re_resolve_dmac_work{ 612 struct work_struct work; 613 struct list_head list; 614 struct bnxt_re_dev *rdev; 615 struct ib_ah_attr *ah_attr; 616 struct bnxt_re_ah_info *ah_info; 617 atomic_t status_wait; 618 }; 619 620 static inline u8 bnxt_re_get_prio(u8 prio_map) 621 { 622 u8 prio = 0xFF; 623 624 for (prio = 0; prio < 8; prio++) 625 if (prio_map & (1UL << prio)) 626 break; 627 return prio; 628 } 629 630 /* This should be called with bnxt_re_dev_lock mutex held */ 631 static inline bool __bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev) 632 { 633 struct bnxt_re_dev *tmp_rdev; 634 635 list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) { 636 if (rdev == tmp_rdev) 637 return true; 638 } 639 return false; 640 } 641 642 static inline bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev) 643 { 644 struct bnxt_re_dev *tmp_rdev; 645 646 mutex_lock(&bnxt_re_dev_lock); 647 list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) { 648 if (rdev == tmp_rdev) { 649 mutex_unlock(&bnxt_re_dev_lock); 650 return true; 651 } 652 } 653 mutex_unlock(&bnxt_re_dev_lock); 654 655 pr_debug("bnxt_re: %s : Invalid rdev received rdev = %p\n", 656 __func__, rdev); 657 return false; 658 } 659 660 int bnxt_re_send_hwrm_cmd(struct bnxt_re_dev *rdev, void *cmd, 661 int cmdlen); 662 void bnxt_re_stopqps_and_ib_uninit(struct bnxt_re_dev *rdev); 663 int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev, 664 struct bnxt_re_dscp2pri *d2p, u16 count, 665 u16 target_id); 666 int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev, 667 struct bnxt_re_dscp2pri *d2p, u16 *count, 668 u16 target_id); 669 int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev, 670 struct bnxt_re_tc_rec *cnprec, u16 tid); 671 int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id, 672 struct bnxt_re_cos2bw_cfg *cfg); 673 int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id, 674 struct bnxt_re_cos2bw_cfg *cfg); 675 int bnxt_re_hwrm_pri2cos_cfg(struct bnxt_re_dev *rdev, 676 u16 target_id, u16 port_id, 677 u8 *cos_id_map, u8 pri_map); 678 int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev); 679 int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev); 680 struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev); 681 struct bnxt_re_dev *bnxt_re_from_netdev(struct ifnet *netdev); 682 u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector); 683 struct bnxt_qplib_nq * bnxt_re_get_nq(struct bnxt_re_dev *rdev); 684 void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq); 685 686 #define to_bnxt_re(ptr, type, member) \ 687 container_of(ptr, type, member) 688 689 #define to_bnxt_re_dev(ptr, member) \ 690 container_of((ptr), struct bnxt_re_dev, member) 691 692 /* Even number functions from port 0 and odd number from port 1 */ 693 #define BNXT_RE_IS_PORT0(rdev) (!(rdev->en_dev->pdev->devfn & 1)) 694 695 #define BNXT_RE_ROCE_V1_PACKET 0 696 #define BNXT_RE_ROCEV2_IPV4_PACKET 2 697 #define BNXT_RE_ROCEV2_IPV6_PACKET 3 698 #define BNXT_RE_ACTIVE_MAP_PORT1 0x1 /*port-1 active */ 699 #define BNXT_RE_ACTIVE_MAP_PORT2 0x2 /*port-2 active */ 700 701 #define BNXT_RE_MEMBER_PORT_MAP (BNXT_RE_ACTIVE_MAP_PORT1 | \ 702 BNXT_RE_ACTIVE_MAP_PORT2) 703 704 #define rdev_to_dev(rdev) ((rdev) ? (&(rdev)->ibdev.dev) : NULL) 705 706 void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev); 707 bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev); 708 709 #define bnxt_re_rdev_ready(rdev) (bnxt_re_is_rdev_valid(rdev) && \ 710 (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))) 711 #define BNXT_RE_SRIOV_CFG_TIMEOUT 6 712 713 int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev); 714 void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 removal_type, 715 struct auxiliary_device *aux_dev); 716 void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev); 717 int bnxt_re_add_device(struct bnxt_re_dev **rdev, 718 struct ifnet *netdev, 719 u8 qp_mode, u8 op_type, u8 wqe_mode, u32 num_msix_requested, 720 struct auxiliary_device *aux_dev); 721 void bnxt_re_create_base_interface(bool primary); 722 int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event, 723 struct ifnet *vlan_dev, 724 struct ifnet *netdev, 725 struct auxiliary_device *aux_dev); 726 void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev); 727 int _bnxt_re_ib_init(struct bnxt_re_dev *rdev); 728 int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev); 729 void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev); 730 void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev); 731 732 /* The rdev ref_count is to protect immature removal of the device */ 733 static inline void bnxt_re_hold(struct bnxt_re_dev *rdev) 734 { 735 atomic_inc(&rdev->ref_count); 736 dev_dbg(rdev_to_dev(rdev), 737 "Hold ref_count = 0x%x", atomic_read(&rdev->ref_count)); 738 } 739 740 static inline void bnxt_re_put(struct bnxt_re_dev *rdev) 741 { 742 atomic_dec(&rdev->ref_count); 743 dev_dbg(rdev_to_dev(rdev), 744 "Put ref_count = 0x%x", atomic_read(&rdev->ref_count)); 745 } 746 747 /* 748 * Responder Error reason codes 749 * FIXME: Remove these when the defs 750 * are properly included in hsi header 751 */ 752 enum res_err_state_reason { 753 /* No error. */ 754 CFCQ_RES_ERR_STATE_REASON_NO_ERROR = 0, 755 /* 756 * Incoming Send, RDMA write, or RDMA read exceeds the maximum 757 * transfer length. Detected on RX first and only packets for 758 * write. Detected on RX request for read. This is an RX 759 * Detected Error. 760 */ 761 CFCQ_RES_ERR_STATE_REASON_RES_EXCEED_MAX, 762 /* 763 * RDMA write payload size does not match write length. Detected 764 * when total write payload is not equal to the RDMA write 765 * length that was given in the first or only packet of the 766 * request. This is an RX Detected Error. 767 */ 768 CFCQ_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH, 769 /* 770 * Send payload exceeds RQ/SRQ WQE buffer capacity. The total 771 * send payload that arrived is more than the size of the WQE 772 * buffer that was fetched from the RQ/SRQ. This is an RX 773 * Detected Error. 774 */ 775 CFCQ_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE, 776 /* 777 * Responder detected opcode error. * First, only, middle, last 778 * for incoming requests are improperly ordered with respect to 779 * previous (PSN) packet. * First or middle packet is not full 780 * MTU size. This is an RX Detected Error. 781 */ 782 CFCQ_RES_ERR_STATE_REASON_RES_OPCODE_ERROR, 783 /* 784 * PSN sequence error retry limit exceeded. The responder 785 * encountered a PSN sequence error for the same PSN too many 786 * times. This can occur via implicit or explicit NAK. This is 787 * an RX Detected Error. 788 */ 789 CFCQ_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT, 790 /* 791 * Invalid R_Key. An incoming request contained an R_Key that 792 * did not reference a valid MR/MW. This error may be detected 793 * by the RX engine for RDMA write or by the TX engine for RDMA 794 * read (detected while servicing IRRQ). This is an RX Detected 795 * Error. 796 */ 797 CFCQ_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY, 798 /* 799 * Domain error. An incoming request specified an R_Key which 800 * referenced a MR/MW that was not in the same PD as the QP on 801 * which the request arrived. This is an RX Detected Error. 802 */ 803 CFCQ_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR, 804 /* 805 * No permission. An incoming request contained an R_Key that 806 * referenced a MR/MW which did not have the access permission 807 * needed for the operation. This is an RX Detected Error. 808 */ 809 CFCQ_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION, 810 /* 811 * Range error. An incoming request had a combination of R_Key, 812 * VA, and length that was out of bounds of the associated 813 * MR/MW. This is an RX Detected Error. 814 */ 815 CFCQ_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR, 816 /* 817 * Invalid R_Key. An incoming request contained an R_Key that 818 * did not reference a valid MR/MW. This error may be detected 819 * by the RX engine for RDMA write or by the TX engine for RDMA 820 * read (detected while servicing IRRQ). This is a TX Detected 821 * Error. 822 */ 823 CFCQ_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY, 824 /* 825 * Domain error. An incoming request specified an R_Key which 826 * referenced a MR/MW that was not in the same PD as the QP on 827 * which the request arrived. This is a TX Detected Error. 828 */ 829 CFCQ_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR, 830 /* 831 * No permission. An incoming request contained an R_Key that 832 * referenced a MR/MW which did not have the access permission 833 * needed for the operation. This is a TX Detected Error. 834 */ 835 CFCQ_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION, 836 /* 837 * Range error. An incoming request had a combination of R_Key, 838 * VA, and length that was out of bounds of the associated 839 * MR/MW. This is a TX Detected Error. 840 */ 841 CFCQ_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR, 842 /* 843 * IRRQ overflow. The peer sent us more RDMA read or atomic 844 * requests than the negotiated maximum. This is an RX Detected 845 * Error. 846 */ 847 CFCQ_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW, 848 /* 849 * Unsupported opcode. The peer sent us a request with an opcode 850 * for a request type that is not supported on this QP. This is 851 * an RX Detected Error. 852 */ 853 CFCQ_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE, 854 /* 855 * Unaligned atomic operation. The VA of an atomic request is on 856 * a memory boundary that prevents atomic execution. This is an 857 * RX Detected Error. 858 */ 859 CFCQ_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC, 860 /* 861 * Remote invalidate error. A send with invalidate request 862 * arrived in which the R_Key to invalidate did not describe a 863 * MR/MW which could be invalidated. RQ WQE completes with error 864 * status. This error is only reported if the send operation did 865 * not fail. If the send operation failed then the remote 866 * invalidate error is not reported. This is an RX Detected 867 * Error. 868 */ 869 CFCQ_RES_ERR_STATE_REASON_RES_REM_INVALIDATE, 870 /* 871 * Local memory error. An RQ/SRQ SGE described an inaccessible 872 * memory. This is an RX Detected Error. 873 */ 874 CFCQ_RES_ERR_STATE_REASON_RES_MEMORY_ERROR, 875 /* 876 * SRQ in error. The QP is moving to error state because it 877 * found SRQ it uses in error. This is an RX Detected Error. 878 */ 879 CFCQ_RES_ERR_STATE_REASON_RES_SRQ_ERROR, 880 /* 881 * Completion error. No CQE space available on queue or CQ not 882 * in VALID state. This is a Completion Detected Error. 883 */ 884 CFCQ_RES_ERR_STATE_REASON_RES_CMP_ERROR, 885 /* 886 * Invalid R_Key while resending responses to duplicate request. 887 * This is a TX Detected Error. 888 */ 889 CFCQ_RES_ERR_STATE_REASON_RES_IVALID_DUP_RKEY, 890 /* 891 * Problem was found in the format of a WQE in the RQ/SRQ. This 892 * is an RX Detected Error. 893 */ 894 CFCQ_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR, 895 /* 896 * A load error occurred on an attempt to load the CQ Context. 897 * This is a Completion Detected Error. 898 */ 899 CFCQ_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR = 0x18, 900 /* 901 * A load error occurred on an attempt to load the SRQ Context. 902 * This is an RX Detected Error. 903 */ 904 CFCQ_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR, 905 /* 906 * A fatal error was detected on an attempt to read from or 907 * write to PCIe on the transmit side. This error is detected by 908 * the TX side, but has the priority of a Completion Detected 909 * Error. 910 */ 911 CFCQ_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR = 0x1b, 912 /* 913 * A fatal error was detected on an attempt to read from or 914 * write to PCIe on the receive side. This error is detected by 915 * the RX side (or CAGR), but has the priority of a Completion 916 * Detected Error. 917 */ 918 CFCQ_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR = 0x1c 919 }; 920 921 int bnxt_re_host_pf_id_query(struct bnxt_re_dev *rdev, 922 struct bnxt_qplib_query_fn_info *fn_info, 923 u32 *pf_mask, u32 *first_pf); 924 925 /* Default DCBx and CC values */ 926 #define BNXT_RE_DEFAULT_CNP_DSCP 48 927 #define BNXT_RE_DEFAULT_CNP_PRI 7 928 #define BNXT_RE_DEFAULT_ROCE_DSCP 26 929 #define BNXT_RE_DEFAULT_ROCE_PRI 3 930 931 #define BNXT_RE_DEFAULT_L2_BW 50 932 #define BNXT_RE_DEFAULT_ROCE_BW 50 933 934 #define ROCE_PRIO_VALID 0x0 935 #define CNP_PRIO_VALID 0x1 936 #define ROCE_DSCP_VALID 0x0 937 #define CNP_DSCP_VALID 0x1 938 939 int bnxt_re_get_pri_dscp_settings(struct bnxt_re_dev *rdev, 940 u16 target_id, 941 struct bnxt_re_tc_rec *tc_rec); 942 943 int bnxt_re_setup_dscp(struct bnxt_re_dev *rdev); 944 int bnxt_re_clear_dscp(struct bnxt_re_dev *rdev); 945 int bnxt_re_setup_cnp_cos(struct bnxt_re_dev *rdev, bool reset); 946 947 static inline enum ib_port_state bnxt_re_get_link_state(struct bnxt_re_dev *rdev) 948 { 949 if (rdev->netdev->if_drv_flags & IFF_DRV_RUNNING && 950 rdev->netdev->if_link_state == LINK_STATE_UP) 951 return IB_PORT_ACTIVE; 952 return IB_PORT_DOWN; 953 } 954 955 static inline int bnxt_re_link_state(struct bnxt_re_dev *rdev) 956 { 957 return bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE ? 1:0; 958 } 959 960 static inline int is_cc_enabled(struct bnxt_re_dev *rdev) 961 { 962 return rdev->cc_param.enable; 963 } 964 965 static inline void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, 966 struct input *hdr, u16 opcd, 967 u16 crid, u16 trid) 968 { 969 hdr->req_type = cpu_to_le16(opcd); 970 hdr->cmpl_ring = cpu_to_le16(crid); 971 hdr->target_id = cpu_to_le16(trid); 972 } 973 974 static inline void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, 975 void *msg, int msg_len, void *resp, 976 int resp_max_len, int timeout) 977 { 978 fw_msg->msg = msg; 979 fw_msg->msg_len = msg_len; 980 fw_msg->resp = resp; 981 fw_msg->resp_max_len = resp_max_len; 982 fw_msg->timeout = timeout; 983 } 984 985 static inline bool is_qport_service_type_supported(struct bnxt_re_dev *rdev) 986 { 987 return rdev->tc_rec[0].serv_type_enabled; 988 } 989 990 static inline bool is_bnxt_roce_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type) 991 { 992 if (is_qport_service_type_supported(rdev)) 993 return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE); 994 else 995 return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE); 996 } 997 998 static inline bool is_bnxt_cnp_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type) 999 { 1000 if (is_qport_service_type_supported(rdev)) 1001 return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP); 1002 else 1003 return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP); 1004 } 1005 1006 #define BNXT_RE_MAP_SH_PAGE 0x0 1007 #define BNXT_RE_MAP_WC 0x1 1008 #define BNXT_RE_DBR_PAGE 0x2 1009 #define BNXT_RE_MAP_DB_RECOVERY_PAGE 0x3 1010 1011 #define BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT (20) /* 20 ms */ 1012 #define BNXT_RE_DBR_INT_TIME 5 /* ms */ 1013 #define BNXT_RE_PACING_EN_INT_THRESHOLD 50 /* Entries in DB FIFO */ 1014 #define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */ 1015 /* Percentage of DB FIFO depth */ 1016 #define BNXT_RE_PACING_DBQ_THRESHOLD BNXT_RE_PACING_DBQ_HIGH_WATERMARK 1017 1018 #define BNXT_RE_PACING_ALARM_TH_MULTIPLE(ctx) (_is_chip_p7(ctx) ? 0 : 2) 1019 1020 /* 1021 * Maximum Percentage of configurable DB FIFO depth. 1022 * The Doorbell FIFO depth is 0x2c00. But the DBR_REG_DB_THROTTLING register has only 12 bits 1023 * to program the high watermark. This means user can configure maximum 36% only(4095/11264). 1024 */ 1025 #define BNXT_RE_PACING_DBQ_HIGH_WATERMARK 36 1026 1027 /* Default do_pacing value when there is no congestion */ 1028 #define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */ 1029 1030 enum { 1031 BNXT_RE_DBQ_EVENT_SCHED = 0, 1032 BNXT_RE_DBR_PACING_EVENT = 1, 1033 BNXT_RE_DBR_NQ_PACING_NOTIFICATION = 2, 1034 }; 1035 1036 struct bnxt_re_dbq_work { 1037 struct work_struct work; 1038 struct bnxt_re_dev *rdev; 1039 struct hwrm_async_event_cmpl cmpl; 1040 u32 event; 1041 }; 1042 1043 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev); 1044 int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev); 1045 int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev); 1046 int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev, 1047 u16 nq_id, u32 throttle); 1048 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev); 1049 int bnxt_re_hwrm_pri2cos_qcfg(struct bnxt_re_dev *rdev, struct bnxt_re_tc_rec *tc_rec, 1050 u16 target_id); 1051 void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32); 1052 u32 readl_fbsd(struct bnxt_softc *bp, u32, u8); 1053 1054 static inline unsigned int bnxt_re_get_total_mr_mw_count(struct bnxt_re_dev *rdev) 1055 { 1056 return (atomic_read(&rdev->stats.rsors.mr_count) + 1057 atomic_read(&rdev->stats.rsors.mw_count)); 1058 } 1059 1060 static inline void bnxt_re_set_def_pacing_threshold(struct bnxt_re_dev *rdev) 1061 { 1062 rdev->qplib_res.pacing_data->pacing_th = rdev->pacing_algo_th; 1063 rdev->qplib_res.pacing_data->alarm_th = 1064 rdev->pacing_algo_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx); 1065 } 1066 1067 static inline void bnxt_re_set_def_do_pacing(struct bnxt_re_dev *rdev) 1068 { 1069 rdev->qplib_res.pacing_data->do_pacing = rdev->dbr_def_do_pacing; 1070 } 1071 1072 static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev) 1073 { 1074 rdev->qplib_res.pacing_data->dev_err_state = 1075 test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); 1076 } 1077 #endif 1078