1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Statistics 37 * 38 */ 39 40 #include <linux/interrupt.h> 41 #include <linux/types.h> 42 #include <linux/spinlock.h> 43 #include <linux/sched.h> 44 #include <linux/slab.h> 45 #include <linux/pci.h> 46 #include <linux/prefetch.h> 47 #include <linux/delay.h> 48 49 #include <rdma/ib_addr.h> 50 51 #include "bnxt_ulp.h" 52 #include "roce_hsi.h" 53 #include "qplib_res.h" 54 #include "qplib_sp.h" 55 #include "qplib_fp.h" 56 #include "qplib_rcfw.h" 57 #include "bnxt_re.h" 58 #include "hw_counters.h" 59 60 static const struct rdma_stat_desc bnxt_re_stat_descs[] = { 61 [BNXT_RE_ACTIVE_PD].name = "active_pds", 62 [BNXT_RE_ACTIVE_AH].name = "active_ahs", 63 [BNXT_RE_ACTIVE_QP].name = "active_qps", 64 [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps", 65 [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps", 66 [BNXT_RE_ACTIVE_SRQ].name = "active_srqs", 67 [BNXT_RE_ACTIVE_CQ].name = "active_cqs", 68 [BNXT_RE_ACTIVE_MR].name = "active_mrs", 69 [BNXT_RE_ACTIVE_MW].name = "active_mws", 70 [BNXT_RE_WATERMARK_PD].name = "watermark_pds", 71 [BNXT_RE_WATERMARK_AH].name = "watermark_ahs", 72 [BNXT_RE_WATERMARK_QP].name = "watermark_qps", 73 [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps", 74 [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps", 75 [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs", 76 [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs", 77 [BNXT_RE_WATERMARK_MR].name = "watermark_mrs", 78 [BNXT_RE_WATERMARK_MW].name = "watermark_mws", 79 [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt", 80 [BNXT_RE_RX_PKTS].name = "rx_pkts", 81 [BNXT_RE_RX_BYTES].name = "rx_bytes", 82 [BNXT_RE_TX_PKTS].name = "tx_pkts", 83 [BNXT_RE_TX_BYTES].name = "tx_bytes", 84 [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors", 85 [BNXT_RE_TX_ERRORS].name = "tx_roce_errors", 86 [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards", 87 [BNXT_RE_RX_ERRORS].name = "rx_roce_errors", 88 [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards", 89 [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits", 90 [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd", 91 [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded", 92 [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd", 93 [BNXT_RE_MISSING_RESP].name = "missing_resp", 94 [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err", 95 [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err", 96 [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err", 97 [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err", 98 [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err", 99 [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err", 100 [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err", 101 [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err", 102 [BNXT_RE_DUP_REQ].name = "dup_req", 103 [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max", 104 [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch", 105 [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe", 106 [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err", 107 [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey", 108 [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err", 109 [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm", 110 [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err", 111 [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey", 112 [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err", 113 [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm", 114 [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err", 115 [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow", 116 [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode", 117 [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic", 118 [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err", 119 [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err", 120 [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err", 121 [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err", 122 [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey", 123 [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err", 124 [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err", 125 [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err", 126 [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err", 127 [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err", 128 [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count", 129 [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req", 130 [BNXT_RE_TX_READ_REQ].name = "tx_read_req", 131 [BNXT_RE_TX_READ_RES].name = "tx_read_resp", 132 [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req", 133 [BNXT_RE_TX_SEND_REQ].name = "tx_send_req", 134 [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts", 135 [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes", 136 [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req", 137 [BNXT_RE_RX_READ_REQ].name = "rx_read_req", 138 [BNXT_RE_RX_READ_RESP].name = "rx_read_resp", 139 [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req", 140 [BNXT_RE_RX_SEND_REQ].name = "rx_send_req", 141 [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts", 142 [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes", 143 [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts", 144 [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes", 145 [BNXT_RE_OOB].name = "rx_out_of_buffer", 146 [BNXT_RE_TX_CNP].name = "tx_cnp_pkts", 147 [BNXT_RE_RX_CNP].name = "rx_cnp_pkts", 148 [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts", 149 [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule", 150 [BNXT_RE_PACING_CMPL].name = "pacing_complete", 151 [BNXT_RE_PACING_ALERT].name = "pacing_alerts", 152 [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register", 153 }; 154 155 static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev, 156 struct rdma_hw_stats *stats, 157 struct bnxt_qplib_ext_stat *s) 158 { 159 stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req; 160 stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req; 161 stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res; 162 stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req; 163 stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req; 164 stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts; 165 stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes; 166 stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req; 167 stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req; 168 stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res; 169 stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req; 170 stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req; 171 stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts; 172 stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes; 173 stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts; 174 stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes; 175 stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer; 176 stats->value[BNXT_RE_TX_CNP] = s->tx_cnp; 177 stats->value[BNXT_RE_RX_CNP] = s->rx_cnp; 178 stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked; 179 stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence; 180 } 181 182 static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev, 183 struct rdma_hw_stats *stats) 184 { 185 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; 186 u32 fid; 187 int rc; 188 189 fid = PCI_FUNC(rdev->en_dev->pdev->devfn); 190 rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); 191 if (rc) 192 goto done; 193 bnxt_re_copy_ext_stats(rdev, stats, estat); 194 195 done: 196 return rc; 197 } 198 199 static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev, 200 struct rdma_hw_stats *stats, 201 struct bnxt_qplib_roce_stats *err_s) 202 { 203 stats->value[BNXT_RE_TO_RETRANSMITS] = 204 err_s->to_retransmits; 205 stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = 206 err_s->seq_err_naks_rcvd; 207 stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = 208 err_s->max_retry_exceeded; 209 stats->value[BNXT_RE_RNR_NAKS_RCVD] = 210 err_s->rnr_naks_rcvd; 211 stats->value[BNXT_RE_MISSING_RESP] = 212 err_s->missing_resp; 213 stats->value[BNXT_RE_UNRECOVERABLE_ERR] = 214 err_s->unrecoverable_err; 215 stats->value[BNXT_RE_BAD_RESP_ERR] = 216 err_s->bad_resp_err; 217 stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = 218 err_s->local_qp_op_err; 219 stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = 220 err_s->local_protection_err; 221 stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = 222 err_s->mem_mgmt_op_err; 223 stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = 224 err_s->remote_invalid_req_err; 225 stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = 226 err_s->remote_access_err; 227 stats->value[BNXT_RE_REMOTE_OP_ERR] = 228 err_s->remote_op_err; 229 stats->value[BNXT_RE_DUP_REQ] = 230 err_s->dup_req; 231 stats->value[BNXT_RE_RES_EXCEED_MAX] = 232 err_s->res_exceed_max; 233 stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = 234 err_s->res_length_mismatch; 235 stats->value[BNXT_RE_RES_EXCEEDS_WQE] = 236 err_s->res_exceeds_wqe; 237 stats->value[BNXT_RE_RES_OPCODE_ERR] = 238 err_s->res_opcode_err; 239 stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = 240 err_s->res_rx_invalid_rkey; 241 stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = 242 err_s->res_rx_domain_err; 243 stats->value[BNXT_RE_RES_RX_NO_PERM] = 244 err_s->res_rx_no_perm; 245 stats->value[BNXT_RE_RES_RX_RANGE_ERR] = 246 err_s->res_rx_range_err; 247 stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = 248 err_s->res_tx_invalid_rkey; 249 stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = 250 err_s->res_tx_domain_err; 251 stats->value[BNXT_RE_RES_TX_NO_PERM] = 252 err_s->res_tx_no_perm; 253 stats->value[BNXT_RE_RES_TX_RANGE_ERR] = 254 err_s->res_tx_range_err; 255 stats->value[BNXT_RE_RES_IRRQ_OFLOW] = 256 err_s->res_irrq_oflow; 257 stats->value[BNXT_RE_RES_UNSUP_OPCODE] = 258 err_s->res_unsup_opcode; 259 stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = 260 err_s->res_unaligned_atomic; 261 stats->value[BNXT_RE_RES_REM_INV_ERR] = 262 err_s->res_rem_inv_err; 263 stats->value[BNXT_RE_RES_MEM_ERROR] = 264 err_s->res_mem_error; 265 stats->value[BNXT_RE_RES_SRQ_ERR] = 266 err_s->res_srq_err; 267 stats->value[BNXT_RE_RES_CMP_ERR] = 268 err_s->res_cmp_err; 269 stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = 270 err_s->res_invalid_dup_rkey; 271 stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = 272 err_s->res_wqe_format_err; 273 stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = 274 err_s->res_cq_load_err; 275 stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = 276 err_s->res_srq_load_err; 277 stats->value[BNXT_RE_RES_TX_PCI_ERR] = 278 err_s->res_tx_pci_err; 279 stats->value[BNXT_RE_RES_RX_PCI_ERR] = 280 err_s->res_rx_pci_err; 281 stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = 282 err_s->res_oos_drop_count; 283 } 284 285 static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev, 286 struct rdma_hw_stats *stats) 287 { 288 struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing; 289 290 stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched; 291 stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete; 292 stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts; 293 stats->value[BNXT_RE_DB_FIFO_REG] = 294 readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off); 295 } 296 297 int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, 298 struct rdma_hw_stats *stats, 299 u32 port, int index) 300 { 301 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 302 struct bnxt_re_res_cntrs *res_s = &rdev->stats.res; 303 struct bnxt_qplib_roce_stats *err_s = NULL; 304 struct ctx_hw_stats *hw_stats = NULL; 305 int rc = 0; 306 307 hw_stats = rdev->qplib_ctx.stats.dma; 308 if (!port || !stats) 309 return -EINVAL; 310 311 stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count); 312 stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count); 313 stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count); 314 stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count); 315 stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count); 316 stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count); 317 stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count); 318 stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count); 319 stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count); 320 stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark; 321 stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark; 322 stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark; 323 stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark; 324 stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark; 325 stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark; 326 stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark; 327 stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark; 328 stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark; 329 stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count); 330 331 if (hw_stats) { 332 stats->value[BNXT_RE_RECOVERABLE_ERRORS] = 333 le64_to_cpu(hw_stats->tx_bcast_pkts); 334 stats->value[BNXT_RE_TX_DISCARDS] = 335 le64_to_cpu(hw_stats->tx_discard_pkts); 336 stats->value[BNXT_RE_TX_ERRORS] = 337 le64_to_cpu(hw_stats->tx_error_pkts); 338 stats->value[BNXT_RE_RX_ERRORS] = 339 le64_to_cpu(hw_stats->rx_error_pkts); 340 stats->value[BNXT_RE_RX_DISCARDS] = 341 le64_to_cpu(hw_stats->rx_discard_pkts); 342 stats->value[BNXT_RE_RX_PKTS] = 343 le64_to_cpu(hw_stats->rx_ucast_pkts); 344 stats->value[BNXT_RE_RX_BYTES] = 345 le64_to_cpu(hw_stats->rx_ucast_bytes); 346 stats->value[BNXT_RE_TX_PKTS] = 347 le64_to_cpu(hw_stats->tx_ucast_pkts); 348 stats->value[BNXT_RE_TX_BYTES] = 349 le64_to_cpu(hw_stats->tx_ucast_bytes); 350 } 351 err_s = &rdev->stats.rstat.errs; 352 if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) { 353 rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s); 354 if (rc) { 355 clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, 356 &rdev->flags); 357 goto done; 358 } 359 bnxt_re_copy_err_stats(rdev, stats, err_s); 360 if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) && 361 !rdev->is_virtfn) { 362 rc = bnxt_re_get_ext_stat(rdev, stats); 363 if (rc) { 364 clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, 365 &rdev->flags); 366 goto done; 367 } 368 } 369 if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 370 bnxt_re_copy_db_pacing_stats(rdev, stats); 371 } 372 373 done: 374 return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ? 375 BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS; 376 } 377 378 struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev, 379 u32 port_num) 380 { 381 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 382 int num_counters = 0; 383 384 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 385 num_counters = BNXT_RE_NUM_EXT_COUNTERS; 386 else 387 num_counters = BNXT_RE_NUM_STD_COUNTERS; 388 389 return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters, 390 RDMA_HW_STATS_DEFAULT_LIFESPAN); 391 } 392