1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Statistics 37 * 38 */ 39 40 #include <linux/types.h> 41 #include <linux/pci.h> 42 #include <rdma/ib_mad.h> 43 #include <rdma/ib_pma.h> 44 45 #include "roce_hsi.h" 46 #include "qplib_res.h" 47 #include "qplib_sp.h" 48 #include "qplib_fp.h" 49 #include "qplib_rcfw.h" 50 #include "bnxt_re.h" 51 #include "hw_counters.h" 52 53 static const struct rdma_stat_desc bnxt_re_stat_descs[] = { 54 [BNXT_RE_ACTIVE_PD].name = "active_pds", 55 [BNXT_RE_ACTIVE_AH].name = "active_ahs", 56 [BNXT_RE_ACTIVE_QP].name = "active_qps", 57 [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps", 58 [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps", 59 [BNXT_RE_ACTIVE_SRQ].name = "active_srqs", 60 [BNXT_RE_ACTIVE_CQ].name = "active_cqs", 61 [BNXT_RE_ACTIVE_MR].name = "active_mrs", 62 [BNXT_RE_ACTIVE_MW].name = "active_mws", 63 [BNXT_RE_WATERMARK_PD].name = "watermark_pds", 64 [BNXT_RE_WATERMARK_AH].name = "watermark_ahs", 65 [BNXT_RE_WATERMARK_QP].name = "watermark_qps", 66 [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps", 67 [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps", 68 [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs", 69 [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs", 70 [BNXT_RE_WATERMARK_MR].name = "watermark_mrs", 71 [BNXT_RE_WATERMARK_MW].name = "watermark_mws", 72 [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt", 73 [BNXT_RE_RX_PKTS].name = "rx_pkts", 74 [BNXT_RE_RX_BYTES].name = "rx_bytes", 75 [BNXT_RE_TX_PKTS].name = "tx_pkts", 76 [BNXT_RE_TX_BYTES].name = "tx_bytes", 77 [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors", 78 [BNXT_RE_TX_ERRORS].name = "tx_roce_errors", 79 [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards", 80 [BNXT_RE_RX_ERRORS].name = "rx_roce_errors", 81 [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards", 82 [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits", 83 [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd", 84 [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded", 85 [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd", 86 [BNXT_RE_MISSING_RESP].name = "missing_resp", 87 [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err", 88 [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err", 89 [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err", 90 [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err", 91 [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err", 92 [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err", 93 [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err", 94 [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err", 95 [BNXT_RE_DUP_REQ].name = "dup_req", 96 [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max", 97 [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch", 98 [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe", 99 [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err", 100 [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey", 101 [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err", 102 [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm", 103 [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err", 104 [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey", 105 [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err", 106 [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm", 107 [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err", 108 [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow", 109 [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode", 110 [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic", 111 [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err", 112 [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err", 113 [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err", 114 [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err", 115 [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey", 116 [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err", 117 [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err", 118 [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err", 119 [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err", 120 [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err", 121 [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count", 122 [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req", 123 [BNXT_RE_TX_READ_REQ].name = "tx_read_req", 124 [BNXT_RE_TX_READ_RES].name = "tx_read_resp", 125 [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req", 126 [BNXT_RE_TX_SEND_REQ].name = "tx_send_req", 127 [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts", 128 [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes", 129 [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req", 130 [BNXT_RE_RX_READ_REQ].name = "rx_read_req", 131 [BNXT_RE_RX_READ_RESP].name = "rx_read_resp", 132 [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req", 133 [BNXT_RE_RX_SEND_REQ].name = "rx_send_req", 134 [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts", 135 [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes", 136 [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts", 137 [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes", 138 [BNXT_RE_OOB].name = "rx_out_of_buffer", 139 [BNXT_RE_TX_CNP].name = "tx_cnp_pkts", 140 [BNXT_RE_RX_CNP].name = "rx_cnp_pkts", 141 [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts", 142 [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule", 143 [BNXT_RE_PACING_CMPL].name = "pacing_complete", 144 [BNXT_RE_PACING_ALERT].name = "pacing_alerts", 145 [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register", 146 }; 147 148 static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev, 149 struct rdma_hw_stats *stats, 150 struct bnxt_qplib_ext_stat *s) 151 { 152 stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req; 153 stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req; 154 stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res; 155 stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req; 156 stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req; 157 stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts; 158 stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes; 159 stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req; 160 stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req; 161 stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res; 162 stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req; 163 stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req; 164 stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts; 165 stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes; 166 stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts; 167 stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes; 168 stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer; 169 stats->value[BNXT_RE_TX_CNP] = s->tx_cnp; 170 stats->value[BNXT_RE_RX_CNP] = s->rx_cnp; 171 stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked; 172 stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence; 173 } 174 175 static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev, 176 struct rdma_hw_stats *stats) 177 { 178 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; 179 u32 fid; 180 int rc; 181 182 fid = PCI_FUNC(rdev->en_dev->pdev->devfn); 183 rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); 184 if (rc) 185 goto done; 186 bnxt_re_copy_ext_stats(rdev, stats, estat); 187 188 done: 189 return rc; 190 } 191 192 static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev, 193 struct rdma_hw_stats *stats, 194 struct bnxt_qplib_roce_stats *err_s) 195 { 196 stats->value[BNXT_RE_TO_RETRANSMITS] = 197 err_s->to_retransmits; 198 stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = 199 err_s->seq_err_naks_rcvd; 200 stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = 201 err_s->max_retry_exceeded; 202 stats->value[BNXT_RE_RNR_NAKS_RCVD] = 203 err_s->rnr_naks_rcvd; 204 stats->value[BNXT_RE_MISSING_RESP] = 205 err_s->missing_resp; 206 stats->value[BNXT_RE_UNRECOVERABLE_ERR] = 207 err_s->unrecoverable_err; 208 stats->value[BNXT_RE_BAD_RESP_ERR] = 209 err_s->bad_resp_err; 210 stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = 211 err_s->local_qp_op_err; 212 stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = 213 err_s->local_protection_err; 214 stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = 215 err_s->mem_mgmt_op_err; 216 stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = 217 err_s->remote_invalid_req_err; 218 stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = 219 err_s->remote_access_err; 220 stats->value[BNXT_RE_REMOTE_OP_ERR] = 221 err_s->remote_op_err; 222 stats->value[BNXT_RE_DUP_REQ] = 223 err_s->dup_req; 224 stats->value[BNXT_RE_RES_EXCEED_MAX] = 225 err_s->res_exceed_max; 226 stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = 227 err_s->res_length_mismatch; 228 stats->value[BNXT_RE_RES_EXCEEDS_WQE] = 229 err_s->res_exceeds_wqe; 230 stats->value[BNXT_RE_RES_OPCODE_ERR] = 231 err_s->res_opcode_err; 232 stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = 233 err_s->res_rx_invalid_rkey; 234 stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = 235 err_s->res_rx_domain_err; 236 stats->value[BNXT_RE_RES_RX_NO_PERM] = 237 err_s->res_rx_no_perm; 238 stats->value[BNXT_RE_RES_RX_RANGE_ERR] = 239 err_s->res_rx_range_err; 240 stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = 241 err_s->res_tx_invalid_rkey; 242 stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = 243 err_s->res_tx_domain_err; 244 stats->value[BNXT_RE_RES_TX_NO_PERM] = 245 err_s->res_tx_no_perm; 246 stats->value[BNXT_RE_RES_TX_RANGE_ERR] = 247 err_s->res_tx_range_err; 248 stats->value[BNXT_RE_RES_IRRQ_OFLOW] = 249 err_s->res_irrq_oflow; 250 stats->value[BNXT_RE_RES_UNSUP_OPCODE] = 251 err_s->res_unsup_opcode; 252 stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = 253 err_s->res_unaligned_atomic; 254 stats->value[BNXT_RE_RES_REM_INV_ERR] = 255 err_s->res_rem_inv_err; 256 stats->value[BNXT_RE_RES_MEM_ERROR] = 257 err_s->res_mem_error; 258 stats->value[BNXT_RE_RES_SRQ_ERR] = 259 err_s->res_srq_err; 260 stats->value[BNXT_RE_RES_CMP_ERR] = 261 err_s->res_cmp_err; 262 stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = 263 err_s->res_invalid_dup_rkey; 264 stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = 265 err_s->res_wqe_format_err; 266 stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = 267 err_s->res_cq_load_err; 268 stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = 269 err_s->res_srq_load_err; 270 stats->value[BNXT_RE_RES_TX_PCI_ERR] = 271 err_s->res_tx_pci_err; 272 stats->value[BNXT_RE_RES_RX_PCI_ERR] = 273 err_s->res_rx_pci_err; 274 stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = 275 err_s->res_oos_drop_count; 276 } 277 278 static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev, 279 struct rdma_hw_stats *stats) 280 { 281 struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing; 282 283 stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched; 284 stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete; 285 stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts; 286 stats->value[BNXT_RE_DB_FIFO_REG] = 287 readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off); 288 } 289 290 int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad) 291 { 292 struct ib_pma_portcounters_ext *pma_cnt_ext; 293 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; 294 struct ctx_hw_stats *hw_stats = NULL; 295 int rc; 296 297 hw_stats = rdev->qplib_ctx.stats.dma; 298 299 pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40); 300 if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) { 301 u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn); 302 303 rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); 304 if (rc) 305 return rc; 306 } 307 308 pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40); 309 if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) || 310 !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 311 pma_cnt_ext->port_xmit_data = 312 cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4); 313 pma_cnt_ext->port_rcv_data = 314 cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4); 315 pma_cnt_ext->port_xmit_packets = 316 cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts)); 317 pma_cnt_ext->port_rcv_packets = 318 cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts)); 319 pma_cnt_ext->port_unicast_rcv_packets = 320 cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts)); 321 pma_cnt_ext->port_unicast_xmit_packets = 322 cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts)); 323 324 } else { 325 pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts); 326 pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4); 327 pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts); 328 pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4); 329 pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts); 330 pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts); 331 } 332 return 0; 333 } 334 335 int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad) 336 { 337 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; 338 struct ib_pma_portcounters *pma_cnt; 339 struct ctx_hw_stats *hw_stats = NULL; 340 int rc; 341 342 hw_stats = rdev->qplib_ctx.stats.dma; 343 344 pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40); 345 if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) { 346 u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn); 347 348 rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); 349 if (rc) 350 return rc; 351 } 352 if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) || 353 !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 354 pma_cnt->port_rcv_packets = 355 cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF); 356 pma_cnt->port_rcv_data = 357 cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) & 358 0xFFFFFFFF) / 4)); 359 pma_cnt->port_xmit_packets = 360 cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF); 361 pma_cnt->port_xmit_data = 362 cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes) 363 & 0xFFFFFFFF) / 4)); 364 } else { 365 pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts); 366 pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4)); 367 pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts); 368 pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4)); 369 } 370 pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF); 371 pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts) 372 & 0xFFFF)); 373 pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF); 374 pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts) 375 & 0xFFFF)); 376 377 return 0; 378 } 379 380 int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, 381 struct rdma_hw_stats *stats, 382 u32 port, int index) 383 { 384 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 385 struct bnxt_re_res_cntrs *res_s = &rdev->stats.res; 386 struct bnxt_qplib_roce_stats *err_s = NULL; 387 struct ctx_hw_stats *hw_stats = NULL; 388 int rc = 0; 389 390 hw_stats = rdev->qplib_ctx.stats.dma; 391 if (!port || !stats) 392 return -EINVAL; 393 394 stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count); 395 stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count); 396 stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count); 397 stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count); 398 stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count); 399 stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count); 400 stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count); 401 stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count); 402 stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count); 403 stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark; 404 stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark; 405 stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark; 406 stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark; 407 stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark; 408 stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark; 409 stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark; 410 stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark; 411 stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark; 412 stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count); 413 414 if (hw_stats) { 415 stats->value[BNXT_RE_RECOVERABLE_ERRORS] = 416 le64_to_cpu(hw_stats->tx_bcast_pkts); 417 stats->value[BNXT_RE_TX_DISCARDS] = 418 le64_to_cpu(hw_stats->tx_discard_pkts); 419 stats->value[BNXT_RE_TX_ERRORS] = 420 le64_to_cpu(hw_stats->tx_error_pkts); 421 stats->value[BNXT_RE_RX_ERRORS] = 422 le64_to_cpu(hw_stats->rx_error_pkts); 423 stats->value[BNXT_RE_RX_DISCARDS] = 424 le64_to_cpu(hw_stats->rx_discard_pkts); 425 stats->value[BNXT_RE_RX_PKTS] = 426 le64_to_cpu(hw_stats->rx_ucast_pkts); 427 stats->value[BNXT_RE_RX_BYTES] = 428 le64_to_cpu(hw_stats->rx_ucast_bytes); 429 stats->value[BNXT_RE_TX_PKTS] = 430 le64_to_cpu(hw_stats->tx_ucast_pkts); 431 stats->value[BNXT_RE_TX_BYTES] = 432 le64_to_cpu(hw_stats->tx_ucast_bytes); 433 } 434 err_s = &rdev->stats.rstat.errs; 435 if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) { 436 rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s); 437 if (rc) { 438 clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, 439 &rdev->flags); 440 goto done; 441 } 442 bnxt_re_copy_err_stats(rdev, stats, err_s); 443 if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, 444 rdev->is_virtfn)) { 445 rc = bnxt_re_get_ext_stat(rdev, stats); 446 if (rc) { 447 clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, 448 &rdev->flags); 449 goto done; 450 } 451 } 452 if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 453 bnxt_re_copy_db_pacing_stats(rdev, stats); 454 } 455 456 done: 457 return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ? 458 BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS; 459 } 460 461 struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev, 462 u32 port_num) 463 { 464 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 465 int num_counters = 0; 466 467 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) 468 num_counters = BNXT_RE_NUM_EXT_COUNTERS; 469 else 470 num_counters = BNXT_RE_NUM_STD_COUNTERS; 471 472 return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters, 473 RDMA_HW_STATS_DEFAULT_LIFESPAN); 474 } 475