xref: /freebsd/sys/dev/bnxt/bnxt_re/stats.c (revision acd884dec99adcf8c4cdd0aa8a50be79c216f8e8)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: statistics related functions
29  */
30 
31 #include "bnxt_re.h"
32 #include "bnxt.h"
33 
bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev * rdev,struct bnxt_re_flow_counters * stats,struct bnxt_qplib_query_stats_info * sinfo)34 int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev,
35 					   struct bnxt_re_flow_counters *stats,
36 					   struct bnxt_qplib_query_stats_info *sinfo)
37 {
38 	struct hwrm_cfa_flow_stats_output resp = {};
39 	struct hwrm_cfa_flow_stats_input req = {};
40 	struct bnxt_en_dev *en_dev = rdev->en_dev;
41 	struct bnxt_fw_msg fw_msg = {};
42 	u16 target_id;
43 	int rc = 0;
44 
45 	if (sinfo->function_id == 0xFFFFFFFF)
46 		target_id = -1;
47 	else
48 		target_id = sinfo->function_id + 1;
49 
50 	/* Issue HWRM cmd to read flow counters for CNP tx and rx */
51 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_CFA_FLOW_STATS, -1, target_id);
52 	req.num_flows = cpu_to_le16(6);
53 	req.flow_handle_0 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT);
54 	req.flow_handle_1 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT |
55 					HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
56 	req.flow_handle_2 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT);
57 	req.flow_handle_3 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT |
58 					HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
59 	req.flow_handle_4 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT);
60 	req.flow_handle_5 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT |
61 					HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
62 	bnxt_re_fill_fw_msg(&fw_msg, &req, sizeof(req), &resp,
63 			    sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
64 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
65 	if (rc) {
66 		dev_err(rdev_to_dev(rdev),
67 			"Failed to get CFA Flow stats : rc = 0x%x\n", rc);
68 		return rc;
69 	}
70 
71 	stats->cnp_stats.cnp_tx_pkts = le64_to_cpu(resp.packet_0);
72 	stats->cnp_stats.cnp_tx_bytes = le64_to_cpu(resp.byte_0);
73 	stats->cnp_stats.cnp_rx_pkts = le64_to_cpu(resp.packet_1);
74 	stats->cnp_stats.cnp_rx_bytes = le64_to_cpu(resp.byte_1);
75 
76 	stats->ro_stats.tx_pkts = le64_to_cpu(resp.packet_2) +
77 		le64_to_cpu(resp.packet_4);
78 	stats->ro_stats.tx_bytes = le64_to_cpu(resp.byte_2) +
79 		le64_to_cpu(resp.byte_4);
80 	stats->ro_stats.rx_pkts = le64_to_cpu(resp.packet_3) +
81 		le64_to_cpu(resp.packet_5);
82 	stats->ro_stats.rx_bytes = le64_to_cpu(resp.byte_3) +
83 		le64_to_cpu(resp.byte_5);
84 
85 	return 0;
86 }
87 
bnxt_re_get_qos_stats(struct bnxt_re_dev * rdev)88 int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev)
89 {
90 	struct bnxt_re_ro_counters roce_only_tmp[2] = {{}, {}};
91 	struct bnxt_re_cnp_counters tmp_counters[2] = {{}, {}};
92 	struct hwrm_cfa_flow_stats_output resp = {};
93 	struct hwrm_cfa_flow_stats_input req = {};
94 	struct bnxt_en_dev *en_dev = rdev->en_dev;
95 	struct bnxt_fw_msg fw_msg = {};
96 	struct bnxt_re_cc_stat *cnps;
97 	struct bnxt_re_rstat *dstat;
98 	int rc = 0;
99 	u64 bytes;
100 	u64 pkts;
101 
102 	/* Issue HWRM cmd to read flow counters for CNP tx and rx */
103 	bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_CFA_FLOW_STATS, -1, -1);
104 	req.num_flows = cpu_to_le16(6);
105 	req.flow_handle_0 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT);
106 	req.flow_handle_1 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT |
107 					HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
108 	req.flow_handle_2 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT);
109 	req.flow_handle_3 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT |
110 					HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
111 	req.flow_handle_4 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT);
112 	req.flow_handle_5 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT |
113 				       HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
114 	bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
115 			    sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
116 	rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
117 	if (rc) {
118 		dev_err(rdev_to_dev(rdev),
119 			"Failed to get CFA Flow stats : rc = 0x%x\n", rc);
120 		goto done;
121 	}
122 
123 	tmp_counters[0].cnp_tx_pkts = le64_to_cpu(resp.packet_0);
124 	tmp_counters[0].cnp_tx_bytes = le64_to_cpu(resp.byte_0);
125 	tmp_counters[0].cnp_rx_pkts = le64_to_cpu(resp.packet_1);
126 	tmp_counters[0].cnp_rx_bytes = le64_to_cpu(resp.byte_1);
127 
128 	roce_only_tmp[0].tx_pkts = le64_to_cpu(resp.packet_2) +
129 				   le64_to_cpu(resp.packet_4);
130 	roce_only_tmp[0].tx_bytes = le64_to_cpu(resp.byte_2) +
131 				    le64_to_cpu(resp.byte_4);
132 	roce_only_tmp[0].rx_pkts = le64_to_cpu(resp.packet_3) +
133 				   le64_to_cpu(resp.packet_5);
134 	roce_only_tmp[0].rx_bytes = le64_to_cpu(resp.byte_3) +
135 				    le64_to_cpu(resp.byte_5);
136 
137 	cnps = &rdev->stats.cnps;
138 	dstat = &rdev->stats.dstat;
139 	if (!cnps->is_first) {
140 		/* First query done.. */
141 		cnps->is_first = true;
142 		cnps->prev[0].cnp_tx_pkts = tmp_counters[0].cnp_tx_pkts;
143 		cnps->prev[0].cnp_tx_bytes = tmp_counters[0].cnp_tx_bytes;
144 		cnps->prev[0].cnp_rx_pkts = tmp_counters[0].cnp_rx_pkts;
145 		cnps->prev[0].cnp_rx_bytes = tmp_counters[0].cnp_rx_bytes;
146 
147 		cnps->prev[1].cnp_tx_pkts = tmp_counters[1].cnp_tx_pkts;
148 		cnps->prev[1].cnp_tx_bytes = tmp_counters[1].cnp_tx_bytes;
149 		cnps->prev[1].cnp_rx_pkts = tmp_counters[1].cnp_rx_pkts;
150 		cnps->prev[1].cnp_rx_bytes = tmp_counters[1].cnp_rx_bytes;
151 
152 		dstat->prev[0].tx_pkts = roce_only_tmp[0].tx_pkts;
153 		dstat->prev[0].tx_bytes = roce_only_tmp[0].tx_bytes;
154 		dstat->prev[0].rx_pkts = roce_only_tmp[0].rx_pkts;
155 		dstat->prev[0].rx_bytes = roce_only_tmp[0].rx_bytes;
156 
157 		dstat->prev[1].tx_pkts = roce_only_tmp[1].tx_pkts;
158 		dstat->prev[1].tx_bytes = roce_only_tmp[1].tx_bytes;
159 		dstat->prev[1].rx_pkts = roce_only_tmp[1].rx_pkts;
160 		dstat->prev[1].rx_bytes = roce_only_tmp[1].rx_bytes;
161 	} else {
162 		u64 byte_mask, pkts_mask;
163 		u64 diff;
164 
165 		byte_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx,
166 						      BYTE_MASK);
167 		pkts_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx,
168 						      PKTS_MASK);
169 		/*
170 		 * Calculate the number of cnp packets and use
171 		 * the value to calculate the CRC bytes.
172 		 * Multply pkts with 4 and add it to total bytes
173 		 */
174 		pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_pkts,
175 					 &cnps->prev[0].cnp_tx_pkts,
176 					 pkts_mask);
177 		cnps->cur[0].cnp_tx_pkts += pkts;
178 		diff = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_bytes,
179 					 &cnps->prev[0].cnp_tx_bytes,
180 					 byte_mask);
181 		bytes = diff + pkts * 4;
182 		cnps->cur[0].cnp_tx_bytes += bytes;
183 		pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_pkts,
184 					 &cnps->prev[0].cnp_rx_pkts,
185 					 pkts_mask);
186 		cnps->cur[0].cnp_rx_pkts += pkts;
187 		bytes = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_bytes,
188 					  &cnps->prev[0].cnp_rx_bytes,
189 					  byte_mask);
190 		cnps->cur[0].cnp_rx_bytes += bytes;
191 
192 		/*
193 		 * Calculate the number of cnp packets and use
194 		 * the value to calculate the CRC bytes.
195 		 * Multply pkts with 4 and add it to total bytes
196 		 */
197 		pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_pkts,
198 					 &cnps->prev[1].cnp_tx_pkts,
199 					 pkts_mask);
200 		cnps->cur[1].cnp_tx_pkts += pkts;
201 		diff = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_bytes,
202 					 &cnps->prev[1].cnp_tx_bytes,
203 					 byte_mask);
204 		cnps->cur[1].cnp_tx_bytes += diff + pkts * 4;
205 		pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_pkts,
206 					 &cnps->prev[1].cnp_rx_pkts,
207 					 pkts_mask);
208 		cnps->cur[1].cnp_rx_pkts += pkts;
209 		bytes = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_bytes,
210 					  &cnps->prev[1].cnp_rx_bytes,
211 					  byte_mask);
212 		cnps->cur[1].cnp_rx_bytes += bytes;
213 
214 		pkts = bnxt_re_stat_diff(roce_only_tmp[0].tx_pkts,
215 					 &dstat->prev[0].tx_pkts,
216 					 pkts_mask);
217 		dstat->cur[0].tx_pkts += pkts;
218 		diff = bnxt_re_stat_diff(roce_only_tmp[0].tx_bytes,
219 					 &dstat->prev[0].tx_bytes,
220 					 byte_mask);
221 		dstat->cur[0].tx_bytes += diff + pkts * 4;
222 		pkts = bnxt_re_stat_diff(roce_only_tmp[0].rx_pkts,
223 					 &dstat->prev[0].rx_pkts,
224 					 pkts_mask);
225 		dstat->cur[0].rx_pkts += pkts;
226 
227 		bytes = bnxt_re_stat_diff(roce_only_tmp[0].rx_bytes,
228 					  &dstat->prev[0].rx_bytes,
229 					  byte_mask);
230 		dstat->cur[0].rx_bytes += bytes;
231 		pkts = bnxt_re_stat_diff(roce_only_tmp[1].tx_pkts,
232 					 &dstat->prev[1].tx_pkts,
233 					 pkts_mask);
234 		dstat->cur[1].tx_pkts += pkts;
235 		diff = bnxt_re_stat_diff(roce_only_tmp[1].tx_bytes,
236 					 &dstat->prev[1].tx_bytes,
237 					 byte_mask);
238 		dstat->cur[1].tx_bytes += diff + pkts * 4;
239 		pkts = bnxt_re_stat_diff(roce_only_tmp[1].rx_pkts,
240 					 &dstat->prev[1].rx_pkts,
241 					 pkts_mask);
242 		dstat->cur[1].rx_pkts += pkts;
243 		bytes = bnxt_re_stat_diff(roce_only_tmp[1].rx_bytes,
244 					  &dstat->prev[1].rx_bytes,
245 					  byte_mask);
246 		dstat->cur[1].rx_bytes += bytes;
247 	}
248 done:
249 	return rc;
250 }
251 
bnxt_re_copy_ext_stats(struct bnxt_re_dev * rdev,u8 indx,struct bnxt_qplib_ext_stat * s)252 static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
253 				   u8 indx, struct bnxt_qplib_ext_stat *s)
254 {
255 	struct bnxt_re_ext_roce_stats *e_errs;
256 	struct bnxt_re_cnp_counters *cnp;
257 	struct bnxt_re_ext_rstat *ext_d;
258 	struct bnxt_re_ro_counters *ro;
259 
260 	cnp = &rdev->stats.cnps.cur[indx];
261 	ro = &rdev->stats.dstat.cur[indx];
262 	ext_d = &rdev->stats.dstat.ext_rstat[indx];
263 	e_errs = &rdev->stats.dstat.e_errs;
264 
265 	cnp->cnp_tx_pkts = s->tx_cnp;
266 	cnp->cnp_rx_pkts = s->rx_cnp;
267 	/* In bonding mode do not duplicate other stats */
268 	if (indx)
269 		return;
270 	cnp->ecn_marked = s->rx_ecn_marked;
271 
272 	ro->tx_pkts = s->tx_roce_pkts;
273 	ro->tx_bytes = s->tx_roce_bytes;
274 	ro->rx_pkts = s->rx_roce_pkts;
275 	ro->rx_bytes = s->rx_roce_bytes;
276 
277 	ext_d->tx.atomic_req = s->tx_atomic_req;
278 	ext_d->tx.read_req = s->tx_read_req;
279 	ext_d->tx.read_resp = s->tx_read_res;
280 	ext_d->tx.write_req = s->tx_write_req;
281 	ext_d->tx.send_req = s->tx_send_req;
282 	ext_d->rx.atomic_req = s->rx_atomic_req;
283 	ext_d->rx.read_req = s->rx_read_req;
284 	ext_d->rx.read_resp = s->rx_read_res;
285 	ext_d->rx.write_req = s->rx_write_req;
286 	ext_d->rx.send_req = s->rx_send_req;
287 	ext_d->grx.rx_pkts = s->rx_roce_good_pkts;
288 	ext_d->grx.rx_bytes = s->rx_roce_good_bytes;
289 	ext_d->rx_dcn_payload_cut = s->rx_dcn_payload_cut;
290 	ext_d->te_bypassed = s->te_bypassed;
291 	e_errs->oob = s->rx_out_of_buffer;
292 	e_errs->oos = s->rx_out_of_sequence;
293 	e_errs->seq_err_naks_rcvd = s->seq_err_naks_rcvd;
294 	e_errs->rnr_naks_rcvd = s->rnr_naks_rcvd;
295 	e_errs->missing_resp = s->missing_resp;
296 	e_errs->to_retransmits = s->to_retransmits;
297 	e_errs->dup_req = s->dup_req;
298 }
299 
bnxt_re_get_ext_stat(struct bnxt_re_dev * rdev)300 static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev)
301 {
302 	struct bnxt_qplib_ext_stat estat[2] = {{}, {}};
303 	struct bnxt_qplib_query_stats_info sinfo;
304 	u32 fid;
305 	int rc;
306 
307 	fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
308 	/* Set default values for sinfo */
309 	sinfo.function_id = 0xFFFFFFFF;
310 	sinfo.collection_id = 0xFF;
311 	sinfo.vf_valid  = false;
312 	rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, &estat[0], &sinfo);
313 	if (rc)
314 		goto done;
315 	bnxt_re_copy_ext_stats(rdev, 0, &estat[0]);
316 
317 done:
318 	return rc;
319 }
320 
bnxt_re_copy_rstat(struct bnxt_re_rdata_counters * d,struct ctx_hw_stats_ext * s,bool is_thor)321 static void bnxt_re_copy_rstat(struct bnxt_re_rdata_counters *d,
322 			       struct ctx_hw_stats_ext *s,
323 			       bool is_thor)
324 {
325 	d->tx_ucast_pkts = le64_to_cpu(s->tx_ucast_pkts);
326 	d->tx_mcast_pkts = le64_to_cpu(s->tx_mcast_pkts);
327 	d->tx_bcast_pkts = le64_to_cpu(s->tx_bcast_pkts);
328 	d->tx_discard_pkts = le64_to_cpu(s->tx_discard_pkts);
329 	d->tx_error_pkts = le64_to_cpu(s->tx_error_pkts);
330 	d->tx_ucast_bytes = le64_to_cpu(s->tx_ucast_bytes);
331 	/* Add four bytes of CRC bytes per packet */
332 	d->tx_ucast_bytes +=  d->tx_ucast_pkts * 4;
333 	d->tx_mcast_bytes = le64_to_cpu(s->tx_mcast_bytes);
334 	d->tx_bcast_bytes = le64_to_cpu(s->tx_bcast_bytes);
335 	d->rx_ucast_pkts = le64_to_cpu(s->rx_ucast_pkts);
336 	d->rx_mcast_pkts = le64_to_cpu(s->rx_mcast_pkts);
337 	d->rx_bcast_pkts = le64_to_cpu(s->rx_bcast_pkts);
338 	d->rx_discard_pkts = le64_to_cpu(s->rx_discard_pkts);
339 	d->rx_error_pkts = le64_to_cpu(s->rx_error_pkts);
340 	d->rx_ucast_bytes = le64_to_cpu(s->rx_ucast_bytes);
341 	d->rx_mcast_bytes = le64_to_cpu(s->rx_mcast_bytes);
342 	d->rx_bcast_bytes = le64_to_cpu(s->rx_bcast_bytes);
343 	if (is_thor) {
344 		d->rx_agg_pkts = le64_to_cpu(s->rx_tpa_pkt);
345 		d->rx_agg_bytes = le64_to_cpu(s->rx_tpa_bytes);
346 		d->rx_agg_events = le64_to_cpu(s->rx_tpa_events);
347 		d->rx_agg_aborts = le64_to_cpu(s->rx_tpa_errors);
348 	}
349 }
350 
bnxt_re_get_roce_data_stats(struct bnxt_re_dev * rdev)351 static void bnxt_re_get_roce_data_stats(struct bnxt_re_dev *rdev)
352 {
353 	bool is_thor = _is_chip_gen_p5_p7(rdev->chip_ctx);
354 	struct bnxt_re_rdata_counters *rstat;
355 
356 	rstat = &rdev->stats.dstat.rstat[0];
357 	bnxt_re_copy_rstat(rstat, rdev->qplib_res.hctx->stats.dma, is_thor);
358 
359 }
360 
bnxt_re_get_device_stats(struct bnxt_re_dev * rdev)361 int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev)
362 {
363 	struct bnxt_qplib_query_stats_info sinfo;
364 	int rc = 0;
365 
366 	/* Stats are in 1s cadence */
367 	if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags)) {
368 		if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
369 					     rdev->is_virtfn))
370 			rc = bnxt_re_get_ext_stat(rdev);
371 		else
372 			rc = bnxt_re_get_qos_stats(rdev);
373 
374 		if (rc && rc != -ENOMEM)
375 			clear_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS,
376 				  &rdev->flags);
377 	}
378 
379 	if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
380 		bnxt_re_get_roce_data_stats(rdev);
381 
382 		/* Set default values for sinfo */
383 		sinfo.function_id = 0xFFFFFFFF;
384 		sinfo.collection_id = 0xFF;
385 		sinfo.vf_valid  = false;
386 		rc = bnxt_qplib_get_roce_error_stats(&rdev->rcfw,
387 						     &rdev->stats.dstat.errs,
388 						     &sinfo);
389 		if (rc && rc != -ENOMEM)
390 			clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
391 				  &rdev->flags);
392 	}
393 
394 	return rc;
395 }
396 
397 static const char * const bnxt_re_stat_descs[] = {
398 	"link_state",
399 	"max_qp",
400 	"max_srq",
401 	"max_cq",
402 	"max_mr",
403 	"max_mw",
404 	"max_ah",
405 	"max_pd",
406 	"active_qp",
407 	"active_rc_qp",
408 	"active_ud_qp",
409 	"active_srq",
410 	"active_cq",
411 	"active_mr",
412 	"active_mw",
413 	"active_ah",
414 	"active_pd",
415 	"qp_watermark",
416 	"rc_qp_watermark",
417 	"ud_qp_watermark",
418 	"srq_watermark",
419 	"cq_watermark",
420 	"mr_watermark",
421 	"mw_watermark",
422 	"ah_watermark",
423 	"pd_watermark",
424 	"resize_cq_count",
425 	"hw_retransmission",
426 	"recoverable_errors",
427 	"rx_pkts",
428 	"rx_bytes",
429 	"tx_pkts",
430 	"tx_bytes",
431 	"cnp_tx_pkts",
432 	"cnp_tx_bytes",
433 	"cnp_rx_pkts",
434 	"cnp_rx_bytes",
435 	"roce_only_rx_pkts",
436 	"roce_only_rx_bytes",
437 	"roce_only_tx_pkts",
438 	"roce_only_tx_bytes",
439 	"rx_roce_error_pkts",
440 	"rx_roce_discard_pkts",
441 	"tx_roce_error_pkts",
442 	"tx_roce_discards_pkts",
443 	"res_oob_drop_count",
444 	"tx_atomic_req",
445 	"rx_atomic_req",
446 	"tx_read_req",
447 	"tx_read_resp",
448 	"rx_read_req",
449 	"rx_read_resp",
450 	"tx_write_req",
451 	"rx_write_req",
452 	"tx_send_req",
453 	"rx_send_req",
454 	"rx_good_pkts",
455 	"rx_good_bytes",
456 	"rx_dcn_payload_cut",
457 	"te_bypassed",
458 	"rx_ecn_marked_pkts",
459 	"max_retry_exceeded",
460 	"to_retransmits",
461 	"seq_err_naks_rcvd",
462 	"rnr_naks_rcvd",
463 	"missing_resp",
464 	"dup_reqs",
465 	"unrecoverable_err",
466 	"bad_resp_err",
467 	"local_qp_op_err",
468 	"local_protection_err",
469 	"mem_mgmt_op_err",
470 	"remote_invalid_req_err",
471 	"remote_access_err",
472 	"remote_op_err",
473 	"res_exceed_max",
474 	"res_length_mismatch",
475 	"res_exceeds_wqe",
476 	"res_opcode_err",
477 	"res_rx_invalid_rkey",
478 	"res_rx_domain_err",
479 	"res_rx_no_perm",
480 	"res_rx_range_err",
481 	"res_tx_invalid_rkey",
482 	"res_tx_domain_err",
483 	"res_tx_no_perm",
484 	"res_tx_range_err",
485 	"res_irrq_oflow",
486 	"res_unsup_opcode",
487 	"res_unaligned_atomic",
488 	"res_rem_inv_err",
489 	"res_mem_error64",
490 	"res_srq_err",
491 	"res_cmp_err",
492 	"res_invalid_dup_rkey",
493 	"res_wqe_format_err",
494 	"res_cq_load_err",
495 	"res_srq_load_err",
496 	"res_tx_pci_err",
497 	"res_rx_pci_err",
498 	"res_oos_drop_count",
499 	"num_irq_started",
500 	"num_irq_stopped",
501 	"poll_in_intr_en",
502 	"poll_in_intr_dis",
503 	"cmdq_full_dbg_cnt",
504 	"fw_service_prof_type_sup",
505 	"dbq_int_recv",
506 	"dbq_int_en",
507 	"dbq_pacing_resched",
508 	"dbq_pacing_complete",
509 	"dbq_pacing_alerts",
510 	"dbq_dbr_fifo_reg"
511 
512 };
513 
bnxt_re_print_ext_stat(struct bnxt_re_dev * rdev,struct rdma_hw_stats * stats)514 static void bnxt_re_print_ext_stat(struct bnxt_re_dev *rdev,
515 				   struct rdma_hw_stats *stats)
516 {
517 	struct bnxt_re_cnp_counters *cnp;
518 	struct bnxt_re_ext_rstat *ext_s;
519 
520 	ext_s = &rdev->stats.dstat.ext_rstat[0];
521 	cnp = &rdev->stats.cnps.cur[0];
522 
523 	stats->value[BNXT_RE_TX_ATOMIC_REQ] = ext_s->tx.atomic_req;
524 	stats->value[BNXT_RE_RX_ATOMIC_REQ] = ext_s->rx.atomic_req;
525 	stats->value[BNXT_RE_TX_READ_REQ] = ext_s->tx.read_req;
526 	stats->value[BNXT_RE_TX_READ_RESP] = ext_s->tx.read_resp;
527 	stats->value[BNXT_RE_RX_READ_REQ] = ext_s->rx.read_req;
528 	stats->value[BNXT_RE_RX_READ_RESP] = ext_s->rx.read_resp;
529 	stats->value[BNXT_RE_TX_WRITE_REQ] = ext_s->tx.write_req;
530 	stats->value[BNXT_RE_RX_WRITE_REQ] = ext_s->rx.write_req;
531 	stats->value[BNXT_RE_TX_SEND_REQ] = ext_s->tx.send_req;
532 	stats->value[BNXT_RE_RX_SEND_REQ] = ext_s->rx.send_req;
533 	stats->value[BNXT_RE_RX_GOOD_PKTS] = ext_s->grx.rx_pkts;
534 	stats->value[BNXT_RE_RX_GOOD_BYTES] = ext_s->grx.rx_bytes;
535 	if (_is_chip_p7(rdev->chip_ctx)) {
536 		stats->value[BNXT_RE_RX_DCN_PAYLOAD_CUT] = ext_s->rx_dcn_payload_cut;
537 		stats->value[BNXT_RE_TE_BYPASSED] = ext_s->te_bypassed;
538 	}
539 	stats->value[BNXT_RE_RX_ECN_MARKED_PKTS] = cnp->ecn_marked;
540 }
541 
bnxt_re_print_roce_only_counters(struct bnxt_re_dev * rdev,struct rdma_hw_stats * stats)542 static void bnxt_re_print_roce_only_counters(struct bnxt_re_dev *rdev,
543 					     struct rdma_hw_stats *stats)
544 {
545 	struct bnxt_re_ro_counters *roce_only = &rdev->stats.dstat.cur[0];
546 
547 	stats->value[BNXT_RE_ROCE_ONLY_RX_PKTS] = roce_only->rx_pkts;
548 	stats->value[BNXT_RE_ROCE_ONLY_RX_BYTES] = roce_only->rx_bytes;
549 	stats->value[BNXT_RE_ROCE_ONLY_TX_PKTS] = roce_only->tx_pkts;
550 	stats->value[BNXT_RE_ROCE_ONLY_TX_BYTES] = roce_only->tx_bytes;
551 }
552 
bnxt_re_print_normal_total_counters(struct bnxt_re_dev * rdev,struct rdma_hw_stats * stats)553 static void bnxt_re_print_normal_total_counters(struct bnxt_re_dev *rdev,
554 						struct rdma_hw_stats *stats)
555 {
556 	struct bnxt_re_ro_counters *roce_only;
557 	struct bnxt_re_cc_stat *cnps;
558 
559 	cnps = &rdev->stats.cnps;
560 	roce_only = &rdev->stats.dstat.cur[0];
561 
562 	stats->value[BNXT_RE_RX_PKTS] = cnps->cur[0].cnp_rx_pkts + roce_only->rx_pkts;
563 	stats->value[BNXT_RE_RX_BYTES] = cnps->cur[0].cnp_rx_bytes + roce_only->rx_bytes;
564 	stats->value[BNXT_RE_TX_PKTS] = cnps->cur[0].cnp_tx_pkts + roce_only->tx_pkts;
565 	stats->value[BNXT_RE_TX_BYTES] = cnps->cur[0].cnp_tx_bytes + roce_only->tx_bytes;
566 }
567 
bnxt_re_print_normal_counters(struct bnxt_re_dev * rdev,struct rdma_hw_stats * rstats)568 static void bnxt_re_print_normal_counters(struct bnxt_re_dev *rdev,
569 					  struct rdma_hw_stats *rstats)
570 {
571 	struct bnxt_re_rdata_counters *stats;
572 	struct bnxt_re_cc_stat *cnps;
573 	bool en_disp;
574 
575 	stats = &rdev->stats.dstat.rstat[0];
576 	cnps = &rdev->stats.cnps;
577 	en_disp = !_is_chip_gen_p5_p7(rdev->chip_ctx);
578 
579 	bnxt_re_print_normal_total_counters(rdev, rstats);
580 	if (!rdev->is_virtfn) {
581 		rstats->value[BNXT_RE_CNP_TX_PKTS] = cnps->cur[0].cnp_tx_pkts;
582 		if (en_disp)
583 			rstats->value[BNXT_RE_CNP_TX_BYTES] = cnps->cur[0].cnp_tx_bytes;
584 		rstats->value[BNXT_RE_CNP_RX_PKTS] = cnps->cur[0].cnp_rx_pkts;
585 		if (en_disp)
586 			rstats->value[BNXT_RE_CNP_RX_BYTES] = cnps->cur[0].cnp_rx_bytes;
587 	}
588 	/* Print RoCE only bytes.. CNP counters include RoCE packets also */
589 	bnxt_re_print_roce_only_counters(rdev, rstats);
590 
591 	rstats->value[BNXT_RE_RX_ROCE_ERROR_PKTS] = stats ? stats->rx_error_pkts : 0;
592 	rstats->value[BNXT_RE_RX_ROCE_DISCARD_PKTS] = stats ? stats->rx_discard_pkts : 0;
593 	if (!en_disp) {
594 		rstats->value[BNXT_RE_TX_ROCE_ERROR_PKTS] = stats ? stats->tx_error_pkts : 0;
595 		rstats->value[BNXT_RE_TX_ROCE_DISCARDS_PKTS] = stats ? stats->tx_discard_pkts : 0;
596 	}
597 
598 	if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
599 				     rdev->is_virtfn)) {
600 		rstats->value[BNXT_RE_RES_OOB_DROP_COUNT] = rdev->stats.dstat.e_errs.oob;
601 		bnxt_re_print_ext_stat(rdev, rstats);
602 	}
603 }
604 
bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev * rdev,struct rdma_hw_stats * stats)605 static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
606 					 struct rdma_hw_stats *stats)
607 {
608 	struct bnxt_re_dbr_sw_stats *dbr_sw_stats = rdev->dbr_sw_stats;
609 
610 	stats->value[BNXT_RE_DBQ_PACING_RESCHED] = dbr_sw_stats->dbq_pacing_resched;
611 	stats->value[BNXT_RE_DBQ_PACING_CMPL] = dbr_sw_stats->dbq_pacing_complete;
612 	stats->value[BNXT_RE_DBQ_PACING_ALERT] = dbr_sw_stats->dbq_pacing_alerts;
613 	stats->value[BNXT_RE_DBQ_DBR_FIFO_REG] = readl_fbsd(rdev->en_dev->softc,
614 						       rdev->dbr_db_fifo_reg_off, 0);
615 }
616 
bnxt_re_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port,int index)617 int bnxt_re_get_hw_stats(struct ib_device *ibdev,
618 			    struct rdma_hw_stats *stats,
619 			    u8 port, int index)
620 {
621 	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
622 	struct bnxt_re_ext_roce_stats *e_errs;
623 	struct bnxt_re_rdata_counters *rstat;
624 	struct bnxt_qplib_roce_stats *errs;
625 	unsigned long tstamp_diff;
626 	struct pci_dev *pdev;
627 	int sched_msec;
628 	int rc = 0;
629 
630 	if (!port || !stats)
631 		return -EINVAL;
632 
633 	if (!rdev)
634 		return -ENODEV;
635 
636 	if (!__bnxt_re_is_rdev_valid(rdev)) {
637 		return -ENODEV;
638 	}
639 
640 	pdev = rdev->en_dev->pdev;
641 	errs = &rdev->stats.dstat.errs;
642 	rstat = &rdev->stats.dstat.rstat[0];
643 	e_errs = &rdev->stats.dstat.e_errs;
644 #define BNXT_RE_STATS_CTX_UPDATE_TIMER	250
645 	sched_msec = BNXT_RE_STATS_CTX_UPDATE_TIMER;
646 	tstamp_diff = jiffies - rdev->stats.read_tstamp;
647 	if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
648 		if (/* restrict_stats && */ tstamp_diff < msecs_to_jiffies(sched_msec))
649 			goto skip_query;
650 		rc = bnxt_re_get_device_stats(rdev);
651 		if (rc)
652 			dev_err(rdev_to_dev(rdev),
653 				"Failed to query device stats\n");
654 		rdev->stats.read_tstamp = jiffies;
655 	}
656 
657 	if (rdev->dbr_pacing)
658 		bnxt_re_copy_db_pacing_stats(rdev, stats);
659 
660 skip_query:
661 
662 	if (rdev->netdev)
663 		stats->value[BNXT_RE_LINK_STATE] = bnxt_re_link_state(rdev);
664 	stats->value[BNXT_RE_MAX_QP] = rdev->dev_attr->max_qp;
665 	stats->value[BNXT_RE_MAX_SRQ] = rdev->dev_attr->max_srq;
666 	stats->value[BNXT_RE_MAX_CQ] = rdev->dev_attr->max_cq;
667 	stats->value[BNXT_RE_MAX_MR] = rdev->dev_attr->max_mr;
668 	stats->value[BNXT_RE_MAX_MW] = rdev->dev_attr->max_mw;
669 	stats->value[BNXT_RE_MAX_AH] = rdev->dev_attr->max_ah;
670 	stats->value[BNXT_RE_MAX_PD] = rdev->dev_attr->max_pd;
671 	stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->stats.rsors.qp_count);
672 	stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&rdev->stats.rsors.rc_qp_count);
673 	stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&rdev->stats.rsors.ud_qp_count);
674 	stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->stats.rsors.srq_count);
675 	stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->stats.rsors.cq_count);
676 	stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->stats.rsors.mr_count);
677 	stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->stats.rsors.mw_count);
678 	stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->stats.rsors.ah_count);
679 	stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->stats.rsors.pd_count);
680 	stats->value[BNXT_RE_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_qp_count);
681 	stats->value[BNXT_RE_RC_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_rc_qp_count);
682 	stats->value[BNXT_RE_UD_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_ud_qp_count);
683 	stats->value[BNXT_RE_SRQ_WATERMARK] = atomic_read(&rdev->stats.rsors.max_srq_count);
684 	stats->value[BNXT_RE_CQ_WATERMARK] = atomic_read(&rdev->stats.rsors.max_cq_count);
685 	stats->value[BNXT_RE_MR_WATERMARK] = atomic_read(&rdev->stats.rsors.max_mr_count);
686 	stats->value[BNXT_RE_MW_WATERMARK] = atomic_read(&rdev->stats.rsors.max_mw_count);
687 	stats->value[BNXT_RE_AH_WATERMARK] = atomic_read(&rdev->stats.rsors.max_ah_count);
688 	stats->value[BNXT_RE_PD_WATERMARK] = atomic_read(&rdev->stats.rsors.max_pd_count);
689 	stats->value[BNXT_RE_RESIZE_CQ_COUNT] = atomic_read(&rdev->stats.rsors.resize_count);
690 	stats->value[BNXT_RE_HW_RETRANSMISSION] = BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags) ?  1 : 0;
691 	stats->value[BNXT_RE_RECOVERABLE_ERRORS] = rstat ? rstat->tx_bcast_pkts : 0;
692 
693 	bnxt_re_print_normal_counters(rdev, stats);
694 
695 
696 	stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = errs->max_retry_exceeded;
697 	if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
698 				     rdev->is_virtfn) &&
699 	    _is_hw_retx_supported(rdev->dev_attr->dev_cap_flags)) {
700 		stats->value[BNXT_RE_TO_RETRANSMITS] = e_errs->to_retransmits;
701 		stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = e_errs->seq_err_naks_rcvd;
702 		stats->value[BNXT_RE_RNR_NAKS_RCVD] = e_errs->rnr_naks_rcvd;
703 		stats->value[BNXT_RE_MISSING_RESP] = e_errs->missing_resp;
704 		stats->value[BNXT_RE_DUP_REQS] = e_errs->dup_req;
705 	} else {
706 		stats->value[BNXT_RE_TO_RETRANSMITS] = errs->to_retransmits;
707 		stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = errs->seq_err_naks_rcvd;
708 		stats->value[BNXT_RE_RNR_NAKS_RCVD] = errs->rnr_naks_rcvd;
709 		stats->value[BNXT_RE_MISSING_RESP] = errs->missing_resp;
710 		stats->value[BNXT_RE_DUP_REQS] = errs->dup_req;
711 	}
712 
713 	stats->value[BNXT_RE_UNRECOVERABLE_ERR] = errs->unrecoverable_err;
714 	stats->value[BNXT_RE_BAD_RESP_ERR] = errs->bad_resp_err;
715 	stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = errs->local_qp_op_err;
716 	stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = errs->local_protection_err;
717 	stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = errs->mem_mgmt_op_err;
718 	stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = errs->remote_invalid_req_err;
719 	stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = errs->remote_access_err;
720 	stats->value[BNXT_RE_REMOTE_OP_ERR] = errs->remote_op_err;
721 	stats->value[BNXT_RE_RES_EXCEED_MAX] = errs->res_exceed_max;
722 	stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = errs->res_length_mismatch;
723 	stats->value[BNXT_RE_RES_EXCEEDS_WQE] = errs->res_exceeds_wqe;
724 	stats->value[BNXT_RE_RES_OPCODE_ERR] = errs->res_opcode_err;
725 	stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = errs->res_rx_invalid_rkey;
726 	stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = errs->res_rx_domain_err;
727 	stats->value[BNXT_RE_RES_RX_NO_PERM] = errs->res_rx_no_perm;
728 	stats->value[BNXT_RE_RES_RX_RANGE_ERR] = errs->res_rx_range_err;
729 	stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = errs->res_tx_invalid_rkey;
730 	stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = errs->res_tx_domain_err;
731 	stats->value[BNXT_RE_RES_TX_NO_PERM] = errs->res_tx_no_perm;
732 	stats->value[BNXT_RE_RES_TX_RANGE_ERR] = errs->res_tx_range_err;
733 	stats->value[BNXT_RE_RES_IRRQ_OFLOW] = errs->res_irrq_oflow;
734 	stats->value[BNXT_RE_RES_UNSUP_OPCODE] = errs->res_unsup_opcode;
735 	stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = errs->res_unaligned_atomic;
736 	stats->value[BNXT_RE_RES_REM_INV_ERR] = errs->res_rem_inv_err;
737 	stats->value[BNXT_RE_RES_MEM_ERROR64] = errs->res_mem_error;
738 	stats->value[BNXT_RE_RES_SRQ_ERR] = errs->res_srq_err;
739 	stats->value[BNXT_RE_RES_CMP_ERR] = errs->res_cmp_err;
740 	stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = errs->res_invalid_dup_rkey;
741 	stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = errs->res_wqe_format_err;
742 	stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = errs->res_cq_load_err;
743 	stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = errs->res_srq_load_err;
744 	stats->value[BNXT_RE_RES_TX_PCI_ERR] = errs->res_tx_pci_err;
745 	stats->value[BNXT_RE_RES_RX_PCI_ERR] = errs->res_rx_pci_err;
746 
747 
748 	if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
749 				     rdev->is_virtfn)) {
750 	stats->value[BNXT_RE_RES_OOS_DROP_COUNT] = e_errs->oos;
751 	} else {
752 		/* Display on function 0 as OOS counters are chip-wide */
753 		if (PCI_FUNC(pdev->devfn) == 0)
754 			stats->value[BNXT_RE_RES_OOS_DROP_COUNT] = errs->res_oos_drop_count;
755 	}
756 	stats->value[BNXT_RE_NUM_IRQ_STARTED] = rdev->rcfw.num_irq_started;
757 	stats->value[BNXT_RE_NUM_IRQ_STOPPED] = rdev->rcfw.num_irq_stopped;
758 	stats->value[BNXT_RE_POLL_IN_INTR_EN] = rdev->rcfw.poll_in_intr_en;
759 	stats->value[BNXT_RE_POLL_IN_INTR_DIS] = rdev->rcfw.poll_in_intr_dis;
760 	stats->value[BNXT_RE_CMDQ_FULL_DBG_CNT] = rdev->rcfw.cmdq_full_dbg;
761 	if (!rdev->is_virtfn)
762 		stats->value[BNXT_RE_FW_SERVICE_PROF_TYPE_SUP] = is_qport_service_type_supported(rdev);
763 
764 	return ARRAY_SIZE(bnxt_re_stat_descs);
765 }
766 
bnxt_re_alloc_hw_port_stats(struct ib_device * ibdev,u8 port_num)767 struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev,
768 						     u8 port_num)
769 {
770 	return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs,
771 					  ARRAY_SIZE(bnxt_re_stat_descs),
772 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
773 }
774