xref: /linux/drivers/infiniband/hw/bnxt_re/hw_counters.c (revision 8d765af51a099884bab37a51e211c7047f67f1f3)
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: Statistics
37  *
38  */
39 
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <rdma/ib_mad.h>
43 #include <rdma/ib_pma.h>
44 
45 #include "roce_hsi.h"
46 #include "qplib_res.h"
47 #include "qplib_sp.h"
48 #include "qplib_fp.h"
49 #include "qplib_rcfw.h"
50 #include "bnxt_re.h"
51 #include "hw_counters.h"
52 
53 static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
54 	[BNXT_RE_ACTIVE_PD].name		=  "active_pds",
55 	[BNXT_RE_ACTIVE_AH].name		=  "active_ahs",
56 	[BNXT_RE_ACTIVE_QP].name		=  "active_qps",
57 	[BNXT_RE_ACTIVE_RC_QP].name             =  "active_rc_qps",
58 	[BNXT_RE_ACTIVE_UD_QP].name             =  "active_ud_qps",
59 	[BNXT_RE_ACTIVE_SRQ].name		=  "active_srqs",
60 	[BNXT_RE_ACTIVE_CQ].name		=  "active_cqs",
61 	[BNXT_RE_ACTIVE_MR].name		=  "active_mrs",
62 	[BNXT_RE_ACTIVE_MW].name		=  "active_mws",
63 	[BNXT_RE_WATERMARK_PD].name             =  "watermark_pds",
64 	[BNXT_RE_WATERMARK_AH].name             =  "watermark_ahs",
65 	[BNXT_RE_WATERMARK_QP].name             =  "watermark_qps",
66 	[BNXT_RE_WATERMARK_RC_QP].name          =  "watermark_rc_qps",
67 	[BNXT_RE_WATERMARK_UD_QP].name          =  "watermark_ud_qps",
68 	[BNXT_RE_WATERMARK_SRQ].name            =  "watermark_srqs",
69 	[BNXT_RE_WATERMARK_CQ].name             =  "watermark_cqs",
70 	[BNXT_RE_WATERMARK_MR].name             =  "watermark_mrs",
71 	[BNXT_RE_WATERMARK_MW].name             =  "watermark_mws",
72 	[BNXT_RE_RESIZE_CQ_CNT].name            =  "resize_cq_cnt",
73 	[BNXT_RE_RX_PKTS].name		=  "rx_pkts",
74 	[BNXT_RE_RX_BYTES].name		=  "rx_bytes",
75 	[BNXT_RE_TX_PKTS].name		=  "tx_pkts",
76 	[BNXT_RE_TX_BYTES].name		=  "tx_bytes",
77 	[BNXT_RE_RECOVERABLE_ERRORS].name	=  "recoverable_errors",
78 	[BNXT_RE_TX_ERRORS].name                =  "tx_roce_errors",
79 	[BNXT_RE_TX_DISCARDS].name              =  "tx_roce_discards",
80 	[BNXT_RE_RX_ERRORS].name		=  "rx_roce_errors",
81 	[BNXT_RE_RX_DISCARDS].name		=  "rx_roce_discards",
82 	[BNXT_RE_TO_RETRANSMITS].name           =  "local_ack_timeout_err",
83 	[BNXT_RE_SEQ_ERR_NAKS_RCVD].name        =  "packet_seq_err",
84 	[BNXT_RE_MAX_RETRY_EXCEEDED].name	=  "max_retry_exceeded",
85 	[BNXT_RE_RNR_NAKS_RCVD].name            =  "rnr_nak_retry_err",
86 	[BNXT_RE_MISSING_RESP].name             =  "implied_nak_seq_err",
87 	[BNXT_RE_UNRECOVERABLE_ERR].name     = "unrecoverable_err",
88 	[BNXT_RE_BAD_RESP_ERR].name          = "bad_resp_err",
89 	[BNXT_RE_LOCAL_QP_OP_ERR].name       = "local_qp_op_err",
90 	[BNXT_RE_LOCAL_PROTECTION_ERR].name  = "local_protection_err",
91 	[BNXT_RE_MEM_MGMT_OP_ERR].name       = "mem_mgmt_op_err",
92 	[BNXT_RE_REMOTE_INVALID_REQ_ERR].name   = "req_remote_invalid_request",
93 	[BNXT_RE_REMOTE_ACCESS_ERR].name        = "req_remote_access_errors",
94 	[BNXT_RE_REMOTE_OP_ERR].name         = "remote_op_err",
95 	[BNXT_RE_DUP_REQ].name               = "duplicate_request",
96 	[BNXT_RE_RES_EXCEED_MAX].name        = "res_exceed_max",
97 	[BNXT_RE_RES_LENGTH_MISMATCH].name   = "resp_local_length_error",
98 	[BNXT_RE_RES_EXCEEDS_WQE].name       = "res_exceeds_wqe",
99 	[BNXT_RE_RES_OPCODE_ERR].name        = "res_opcode_err",
100 	[BNXT_RE_RES_RX_INVALID_RKEY].name   = "res_rx_invalid_rkey",
101 	[BNXT_RE_RES_RX_DOMAIN_ERR].name     = "res_rx_domain_err",
102 	[BNXT_RE_RES_RX_NO_PERM].name        = "res_rx_no_perm",
103 	[BNXT_RE_RES_RX_RANGE_ERR].name      = "res_rx_range_err",
104 	[BNXT_RE_RES_TX_INVALID_RKEY].name   = "res_tx_invalid_rkey",
105 	[BNXT_RE_RES_TX_DOMAIN_ERR].name     = "res_tx_domain_err",
106 	[BNXT_RE_RES_TX_NO_PERM].name        = "res_tx_no_perm",
107 	[BNXT_RE_RES_TX_RANGE_ERR].name      = "res_tx_range_err",
108 	[BNXT_RE_RES_IRRQ_OFLOW].name        = "res_irrq_oflow",
109 	[BNXT_RE_RES_UNSUP_OPCODE].name      = "res_unsup_opcode",
110 	[BNXT_RE_RES_UNALIGNED_ATOMIC].name  = "res_unaligned_atomic",
111 	[BNXT_RE_RES_REM_INV_ERR].name       = "res_rem_inv_err",
112 	[BNXT_RE_RES_MEM_ERROR].name         = "res_mem_err",
113 	[BNXT_RE_RES_SRQ_ERR].name           = "res_srq_err",
114 	[BNXT_RE_RES_CMP_ERR].name           = "res_cmp_err",
115 	[BNXT_RE_RES_INVALID_DUP_RKEY].name  = "res_invalid_dup_rkey",
116 	[BNXT_RE_RES_WQE_FORMAT_ERR].name    = "res_wqe_format_err",
117 	[BNXT_RE_RES_CQ_LOAD_ERR].name       = "res_cq_load_err",
118 	[BNXT_RE_RES_SRQ_LOAD_ERR].name      = "res_srq_load_err",
119 	[BNXT_RE_RES_TX_PCI_ERR].name        = "res_tx_pci_err",
120 	[BNXT_RE_RES_RX_PCI_ERR].name        = "res_rx_pci_err",
121 	[BNXT_RE_OUT_OF_SEQ_ERR].name        = "out_of_sequence",
122 	[BNXT_RE_TX_ATOMIC_REQ].name	     = "tx_atomic_req",
123 	[BNXT_RE_TX_READ_REQ].name	     = "tx_read_req",
124 	[BNXT_RE_TX_READ_RES].name	     = "tx_read_resp",
125 	[BNXT_RE_TX_WRITE_REQ].name	     = "tx_write_req",
126 	[BNXT_RE_TX_SEND_REQ].name	     = "tx_send_req",
127 	[BNXT_RE_TX_ROCE_PKTS].name          = "tx_roce_only_pkts",
128 	[BNXT_RE_TX_ROCE_BYTES].name         = "tx_roce_only_bytes",
129 	[BNXT_RE_RX_ATOMIC_REQ].name	     = "rx_atomic_requests",
130 	[BNXT_RE_RX_READ_REQ].name	     = "rx_read_requests",
131 	[BNXT_RE_RX_READ_RESP].name	     = "rx_read_resp",
132 	[BNXT_RE_RX_WRITE_REQ].name	     = "rx_write_requests",
133 	[BNXT_RE_RX_SEND_REQ].name	     = "rx_send_req",
134 	[BNXT_RE_RX_ROCE_PKTS].name          = "rx_roce_only_pkts",
135 	[BNXT_RE_RX_ROCE_BYTES].name         = "rx_roce_only_bytes",
136 	[BNXT_RE_RX_ROCE_GOOD_PKTS].name     = "rx_roce_good_pkts",
137 	[BNXT_RE_RX_ROCE_GOOD_BYTES].name    = "rx_roce_good_bytes",
138 	[BNXT_RE_OOB].name		     = "out_of_buffer",
139 	[BNXT_RE_TX_CNP].name                = "np_cnp_pkts",
140 	[BNXT_RE_RX_CNP].name                = "rp_cnp_handled",
141 	[BNXT_RE_RX_ECN].name                = "np_ecn_marked_roce_packets",
142 	[BNXT_RE_PACING_RESCHED].name        = "pacing_reschedule",
143 	[BNXT_RE_PACING_CMPL].name           = "pacing_complete",
144 	[BNXT_RE_PACING_ALERT].name          = "pacing_alerts",
145 	[BNXT_RE_DB_FIFO_REG].name           = "db_fifo_register",
146 	[BNXT_RE_REQ_CQE_ERROR].name            = "req_cqe_error",
147 	[BNXT_RE_RESP_CQE_ERROR].name           = "resp_cqe_error",
148 	[BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name  = "resp_remote_access_errors",
149 };
150 
151 static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
152 				   struct rdma_hw_stats *stats,
153 				   struct bnxt_qplib_ext_stat *s)
154 {
155 	stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req;
156 	stats->value[BNXT_RE_TX_READ_REQ]   = s->tx_read_req;
157 	stats->value[BNXT_RE_TX_READ_RES]   = s->tx_read_res;
158 	stats->value[BNXT_RE_TX_WRITE_REQ]  = s->tx_write_req;
159 	stats->value[BNXT_RE_TX_SEND_REQ]   = s->tx_send_req;
160 	stats->value[BNXT_RE_TX_ROCE_PKTS]  = s->tx_roce_pkts;
161 	stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes;
162 	stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
163 	stats->value[BNXT_RE_RX_READ_REQ]   = s->rx_read_req;
164 	stats->value[BNXT_RE_RX_READ_RESP]  = s->rx_read_res;
165 	stats->value[BNXT_RE_RX_WRITE_REQ]  = s->rx_write_req;
166 	stats->value[BNXT_RE_RX_SEND_REQ]   = s->rx_send_req;
167 	stats->value[BNXT_RE_RX_ROCE_PKTS]  = s->rx_roce_pkts;
168 	stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes;
169 	stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
170 	stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
171 	stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
172 	stats->value[BNXT_RE_TX_CNP] = s->tx_cnp;
173 	stats->value[BNXT_RE_RX_CNP] = s->rx_cnp;
174 	stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked;
175 	stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence;
176 }
177 
178 static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
179 				struct rdma_hw_stats *stats)
180 {
181 	struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
182 	u32 fid;
183 	int rc;
184 
185 	fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
186 	rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
187 	if (rc)
188 		goto done;
189 	bnxt_re_copy_ext_stats(rdev, stats, estat);
190 
191 done:
192 	return rc;
193 }
194 
195 static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
196 				   struct rdma_hw_stats *stats,
197 				   struct bnxt_qplib_roce_stats *err_s)
198 {
199 	stats->value[BNXT_RE_TO_RETRANSMITS] =
200 				err_s->to_retransmits;
201 	stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
202 				err_s->seq_err_naks_rcvd;
203 	stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
204 				err_s->max_retry_exceeded;
205 	stats->value[BNXT_RE_RNR_NAKS_RCVD] =
206 				err_s->rnr_naks_rcvd;
207 	stats->value[BNXT_RE_MISSING_RESP] =
208 				err_s->missing_resp;
209 	stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
210 				err_s->unrecoverable_err;
211 	stats->value[BNXT_RE_BAD_RESP_ERR] =
212 				err_s->bad_resp_err;
213 	stats->value[BNXT_RE_LOCAL_QP_OP_ERR]	=
214 			err_s->local_qp_op_err;
215 	stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
216 			err_s->local_protection_err;
217 	stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
218 			err_s->mem_mgmt_op_err;
219 	stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
220 			err_s->remote_invalid_req_err;
221 	stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
222 			err_s->remote_access_err;
223 	stats->value[BNXT_RE_REMOTE_OP_ERR] =
224 			err_s->remote_op_err;
225 	stats->value[BNXT_RE_DUP_REQ] =
226 			err_s->dup_req;
227 	stats->value[BNXT_RE_RES_EXCEED_MAX] =
228 			err_s->res_exceed_max;
229 	stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
230 			err_s->res_length_mismatch;
231 	stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
232 			err_s->res_exceeds_wqe;
233 	stats->value[BNXT_RE_RES_OPCODE_ERR] =
234 			err_s->res_opcode_err;
235 	stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
236 			err_s->res_rx_invalid_rkey;
237 	stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
238 			err_s->res_rx_domain_err;
239 	stats->value[BNXT_RE_RES_RX_NO_PERM] =
240 			err_s->res_rx_no_perm;
241 	stats->value[BNXT_RE_RES_RX_RANGE_ERR]  =
242 			err_s->res_rx_range_err;
243 	stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
244 			err_s->res_tx_invalid_rkey;
245 	stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
246 			err_s->res_tx_domain_err;
247 	stats->value[BNXT_RE_RES_TX_NO_PERM] =
248 			err_s->res_tx_no_perm;
249 	stats->value[BNXT_RE_RES_TX_RANGE_ERR]  =
250 			err_s->res_tx_range_err;
251 	stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
252 			err_s->res_irrq_oflow;
253 	stats->value[BNXT_RE_RES_UNSUP_OPCODE]  =
254 			err_s->res_unsup_opcode;
255 	stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
256 			err_s->res_unaligned_atomic;
257 	stats->value[BNXT_RE_RES_REM_INV_ERR]   =
258 			err_s->res_rem_inv_err;
259 	stats->value[BNXT_RE_RES_MEM_ERROR] =
260 			err_s->res_mem_error;
261 	stats->value[BNXT_RE_RES_SRQ_ERR] =
262 			err_s->res_srq_err;
263 	stats->value[BNXT_RE_RES_CMP_ERR] =
264 			err_s->res_cmp_err;
265 	stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
266 			err_s->res_invalid_dup_rkey;
267 	stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
268 			err_s->res_wqe_format_err;
269 	stats->value[BNXT_RE_RES_CQ_LOAD_ERR]   =
270 			err_s->res_cq_load_err;
271 	stats->value[BNXT_RE_RES_SRQ_LOAD_ERR]  =
272 			err_s->res_srq_load_err;
273 	stats->value[BNXT_RE_RES_TX_PCI_ERR]    =
274 			err_s->res_tx_pci_err;
275 	stats->value[BNXT_RE_RES_RX_PCI_ERR]    =
276 			err_s->res_rx_pci_err;
277 	stats->value[BNXT_RE_OUT_OF_SEQ_ERR]    =
278 			err_s->res_oos_drop_count;
279 	stats->value[BNXT_RE_REQ_CQE_ERROR]     =
280 			err_s->bad_resp_err +
281 			err_s->local_qp_op_err +
282 			err_s->local_protection_err +
283 			err_s->mem_mgmt_op_err +
284 			err_s->remote_invalid_req_err +
285 			err_s->remote_access_err +
286 			err_s->remote_op_err;
287 	stats->value[BNXT_RE_RESP_CQE_ERROR] =
288 			err_s->res_cmp_err +
289 			err_s->res_cq_load_err;
290 	stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
291 			err_s->res_rx_no_perm +
292 			err_s->res_tx_no_perm;
293 }
294 
295 static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
296 					 struct rdma_hw_stats *stats)
297 {
298 	struct bnxt_re_db_pacing_stats *pacing_s =  &rdev->stats.pacing;
299 
300 	stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
301 	stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
302 	stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
303 	stats->value[BNXT_RE_DB_FIFO_REG] =
304 		readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
305 }
306 
307 int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
308 {
309 	struct ib_pma_portcounters_ext *pma_cnt_ext;
310 	struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
311 	struct ctx_hw_stats *hw_stats = NULL;
312 	int rc;
313 
314 	hw_stats = rdev->qplib_ctx.stats.dma;
315 
316 	pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
317 	if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
318 		u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
319 
320 		rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
321 		if (rc)
322 			return rc;
323 	}
324 
325 	pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
326 	if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
327 	    !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
328 		pma_cnt_ext->port_xmit_data =
329 			cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4);
330 		pma_cnt_ext->port_rcv_data =
331 			cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4);
332 		pma_cnt_ext->port_xmit_packets =
333 			cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
334 		pma_cnt_ext->port_rcv_packets =
335 			cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
336 		pma_cnt_ext->port_unicast_rcv_packets =
337 			cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
338 		pma_cnt_ext->port_unicast_xmit_packets =
339 			cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
340 
341 	} else {
342 		pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
343 		pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4);
344 		pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
345 		pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4);
346 		pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
347 		pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
348 	}
349 	return 0;
350 }
351 
352 int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
353 {
354 	struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
355 	struct ib_pma_portcounters *pma_cnt;
356 	struct ctx_hw_stats *hw_stats = NULL;
357 	int rc;
358 
359 	hw_stats = rdev->qplib_ctx.stats.dma;
360 
361 	pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40);
362 	if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
363 		u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
364 
365 		rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
366 		if (rc)
367 			return rc;
368 	}
369 	if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
370 	    !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
371 		pma_cnt->port_rcv_packets =
372 			cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF);
373 		pma_cnt->port_rcv_data =
374 			cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) &
375 					   0xFFFFFFFF) / 4));
376 		pma_cnt->port_xmit_packets =
377 			cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF);
378 		pma_cnt->port_xmit_data =
379 			cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes)
380 					   & 0xFFFFFFFF) / 4));
381 	} else {
382 		pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts);
383 		pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4));
384 		pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts);
385 		pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4));
386 	}
387 	pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF);
388 	pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts)
389 						     & 0xFFFF));
390 	pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF);
391 	pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts)
392 							& 0xFFFF));
393 
394 	return 0;
395 }
396 
397 int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
398 			    struct rdma_hw_stats *stats,
399 			    u32 port, int index)
400 {
401 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
402 	struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
403 	struct bnxt_qplib_roce_stats *err_s = NULL;
404 	struct ctx_hw_stats *hw_stats = NULL;
405 	int rc  = 0;
406 
407 	hw_stats = rdev->qplib_ctx.stats.dma;
408 	if (!port || !stats)
409 		return -EINVAL;
410 
411 	stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
412 	stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
413 	stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
414 	stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
415 	stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
416 	stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
417 	stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
418 	stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
419 	stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
420 	stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
421 	stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
422 	stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
423 	stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
424 	stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
425 	stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
426 	stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
427 	stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
428 	stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
429 	stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
430 
431 	if (hw_stats) {
432 		stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
433 			le64_to_cpu(hw_stats->tx_bcast_pkts);
434 		stats->value[BNXT_RE_TX_DISCARDS] =
435 			le64_to_cpu(hw_stats->tx_discard_pkts);
436 		stats->value[BNXT_RE_TX_ERRORS] =
437 			le64_to_cpu(hw_stats->tx_error_pkts);
438 		stats->value[BNXT_RE_RX_ERRORS] =
439 			le64_to_cpu(hw_stats->rx_error_pkts);
440 		stats->value[BNXT_RE_RX_DISCARDS] =
441 			le64_to_cpu(hw_stats->rx_discard_pkts);
442 		stats->value[BNXT_RE_RX_PKTS] =
443 			le64_to_cpu(hw_stats->rx_ucast_pkts);
444 		stats->value[BNXT_RE_RX_BYTES] =
445 			le64_to_cpu(hw_stats->rx_ucast_bytes);
446 		stats->value[BNXT_RE_TX_PKTS] =
447 			le64_to_cpu(hw_stats->tx_ucast_pkts);
448 		stats->value[BNXT_RE_TX_BYTES] =
449 			le64_to_cpu(hw_stats->tx_ucast_bytes);
450 	}
451 	err_s = &rdev->stats.rstat.errs;
452 	if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
453 		rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s);
454 		if (rc) {
455 			clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
456 				  &rdev->flags);
457 			goto done;
458 		}
459 		bnxt_re_copy_err_stats(rdev, stats, err_s);
460 		if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
461 					     rdev->is_virtfn)) {
462 			rc = bnxt_re_get_ext_stat(rdev, stats);
463 			if (rc) {
464 				clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
465 					  &rdev->flags);
466 				goto done;
467 			}
468 		}
469 		if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
470 			bnxt_re_copy_db_pacing_stats(rdev, stats);
471 	}
472 
473 done:
474 	return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
475 		BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
476 }
477 
478 struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
479 						     u32 port_num)
480 {
481 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
482 	int num_counters = 0;
483 
484 	if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
485 		num_counters = BNXT_RE_NUM_EXT_COUNTERS;
486 	else
487 		num_counters = BNXT_RE_NUM_STD_COUNTERS;
488 
489 	return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters,
490 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
491 }
492