xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c (revision f2a3b12b305c7bb72467b2a56d19a4587b6007f9)
1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/events.h"
34 #include "en.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 #include "en/port.h"
39 
40 #include <net/page_pool/helpers.h>
41 
mlx5e_ethtool_put_stat(u64 ** data,u64 val)42 void mlx5e_ethtool_put_stat(u64 **data, u64 val)
43 {
44 	*(*data)++ = val;
45 }
46 
stats_grps_num(struct mlx5e_priv * priv)47 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
48 {
49 	return !priv->profile->stats_grps_num ? 0 :
50 		priv->profile->stats_grps_num(priv);
51 }
52 
mlx5e_stats_total_num(struct mlx5e_priv * priv)53 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
54 {
55 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
56 	const unsigned int num_stats_grps = stats_grps_num(priv);
57 	unsigned int total = 0;
58 	int i;
59 
60 	for (i = 0; i < num_stats_grps; i++)
61 		total += stats_grps[i]->get_num_stats(priv);
62 
63 	return total;
64 }
65 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)66 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
67 {
68 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
69 	const unsigned int num_stats_grps = stats_grps_num(priv);
70 	int i;
71 
72 	for (i = num_stats_grps - 1; i >= 0; i--)
73 		if (stats_grps[i]->update_stats &&
74 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
75 			stats_grps[i]->update_stats(priv);
76 }
77 
mlx5e_stats_update(struct mlx5e_priv * priv)78 void mlx5e_stats_update(struct mlx5e_priv *priv)
79 {
80 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
81 	const unsigned int num_stats_grps = stats_grps_num(priv);
82 	int i;
83 
84 	for (i = num_stats_grps - 1; i >= 0; i--)
85 		if (stats_grps[i]->update_stats)
86 			stats_grps[i]->update_stats(priv);
87 }
88 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)89 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
90 {
91 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
92 	const unsigned int num_stats_grps = stats_grps_num(priv);
93 	int i;
94 
95 	for (i = 0; i < num_stats_grps; i++)
96 		stats_grps[i]->fill_stats(priv, &data);
97 }
98 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)99 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
100 {
101 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
102 	const unsigned int num_stats_grps = stats_grps_num(priv);
103 	int i;
104 
105 	for (i = 0; i < num_stats_grps; i++)
106 		stats_grps[i]->fill_strings(priv, &data);
107 }
108 
109 /* Concrete NIC Stats */
110 
111 static const struct counter_desc sw_stats_desc[] = {
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
116 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
121 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
122 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
124 
125 #ifdef CONFIG_MLX5_EN_TLS
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
127 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
132 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
133 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
135 #endif
136 
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
189 #ifdef CONFIG_MLX5_EN_ARFS
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) },
191 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) },
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
195 #endif
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
208 #ifdef CONFIG_MLX5_EN_TLS
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
219 #endif
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
223 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
224 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
225 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
226 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
227 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
228 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
229 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
230 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
231 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
232 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
233 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
234 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
235 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
236 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
237 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
238 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
239 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
240 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
241 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
242 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
243 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
244 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
245 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
246 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
247 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
248 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
249 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
250 };
251 
252 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
253 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)254 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
255 {
256 	return NUM_SW_COUNTERS;
257 }
258 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)259 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
260 {
261 	int i;
262 
263 	for (i = 0; i < NUM_SW_COUNTERS; i++)
264 		ethtool_puts(data, sw_stats_desc[i].format);
265 }
266 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)267 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
268 {
269 	int i;
270 
271 	for (i = 0; i < NUM_SW_COUNTERS; i++)
272 		mlx5e_ethtool_put_stat(data,
273 				       MLX5E_READ_CTR64_CPU(&priv->stats.sw,
274 							    sw_stats_desc, i));
275 }
276 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)277 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
278 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
279 {
280 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
281 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
282 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
283 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
284 	s->tx_xdp_full  += xdpsq_red_stats->full;
285 	s->tx_xdp_err   += xdpsq_red_stats->err;
286 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
287 }
288 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)289 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
290 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
291 {
292 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
293 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
294 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
295 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
296 	s->rx_xdp_tx_full  += xdpsq_stats->full;
297 	s->rx_xdp_tx_err   += xdpsq_stats->err;
298 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
299 }
300 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)301 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
302 						  struct mlx5e_xdpsq_stats *xsksq_stats)
303 {
304 	s->tx_xsk_xmit  += xsksq_stats->xmit;
305 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
306 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
307 	s->tx_xsk_full  += xsksq_stats->full;
308 	s->tx_xsk_err   += xsksq_stats->err;
309 	s->tx_xsk_cqes  += xsksq_stats->cqes;
310 }
311 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)312 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
313 						  struct mlx5e_rq_stats *xskrq_stats)
314 {
315 	s->rx_xsk_packets                += xskrq_stats->packets;
316 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
317 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
318 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
319 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
320 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
321 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
322 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
323 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
324 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
325 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
326 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
327 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
328 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
329 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
330 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
331 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
332 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
333 }
334 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)335 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
336 						     struct mlx5e_rq_stats *rq_stats)
337 {
338 	s->rx_packets                 += rq_stats->packets;
339 	s->rx_bytes                   += rq_stats->bytes;
340 	s->rx_lro_packets             += rq_stats->lro_packets;
341 	s->rx_lro_bytes               += rq_stats->lro_bytes;
342 	s->rx_gro_packets             += rq_stats->gro_packets;
343 	s->rx_gro_bytes               += rq_stats->gro_bytes;
344 	s->rx_gro_skbs                += rq_stats->gro_skbs;
345 	s->rx_gro_large_hds           += rq_stats->gro_large_hds;
346 	s->rx_hds_nodata_packets      += rq_stats->hds_nodata_packets;
347 	s->rx_hds_nodata_bytes        += rq_stats->hds_nodata_bytes;
348 	s->rx_hds_nosplit_packets     += rq_stats->hds_nosplit_packets;
349 	s->rx_hds_nosplit_bytes       += rq_stats->hds_nosplit_bytes;
350 	s->rx_ecn_mark                += rq_stats->ecn_mark;
351 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
352 	s->rx_csum_none               += rq_stats->csum_none;
353 	s->rx_csum_complete           += rq_stats->csum_complete;
354 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
355 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
356 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
357 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
358 	s->rx_xdp_drop                += rq_stats->xdp_drop;
359 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
360 	s->rx_wqe_err                 += rq_stats->wqe_err;
361 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
362 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
363 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
364 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
365 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
366 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
367 	s->rx_congst_umr              += rq_stats->congst_umr;
368 #ifdef CONFIG_MLX5_EN_ARFS
369 	s->rx_arfs_add                += rq_stats->arfs_add;
370 	s->rx_arfs_request_in         += rq_stats->arfs_request_in;
371 	s->rx_arfs_request_out        += rq_stats->arfs_request_out;
372 	s->rx_arfs_expired            += rq_stats->arfs_expired;
373 	s->rx_arfs_err                += rq_stats->arfs_err;
374 #endif
375 	s->rx_recover                 += rq_stats->recover;
376 	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
377 	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
378 	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
379 	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
380 	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
381 	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
382 	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
383 	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
384 	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
385 	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
386 	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
387 #ifdef CONFIG_MLX5_EN_TLS
388 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
389 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
390 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
391 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
392 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
393 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
394 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
395 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
396 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
397 	s->rx_tls_err                 += rq_stats->tls_err;
398 #endif
399 }
400 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)401 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
402 						     struct mlx5e_ch_stats *ch_stats)
403 {
404 	s->ch_events      += ch_stats->events;
405 	s->ch_poll        += ch_stats->poll;
406 	s->ch_arm         += ch_stats->arm;
407 	s->ch_aff_change  += ch_stats->aff_change;
408 	s->ch_force_irq   += ch_stats->force_irq;
409 	s->ch_eq_rearm    += ch_stats->eq_rearm;
410 }
411 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)412 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
413 					       struct mlx5e_sq_stats *sq_stats)
414 {
415 	s->tx_packets               += sq_stats->packets;
416 	s->tx_bytes                 += sq_stats->bytes;
417 	s->tx_tso_packets           += sq_stats->tso_packets;
418 	s->tx_tso_bytes             += sq_stats->tso_bytes;
419 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
420 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
421 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
422 	s->tx_nop                   += sq_stats->nop;
423 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
424 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
425 	s->tx_queue_stopped         += sq_stats->stopped;
426 	s->tx_queue_wake            += sq_stats->wake;
427 	s->tx_queue_dropped         += sq_stats->dropped;
428 	s->tx_cqe_err               += sq_stats->cqe_err;
429 	s->tx_recover               += sq_stats->recover;
430 	s->tx_xmit_more             += sq_stats->xmit_more;
431 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
432 	s->tx_csum_none             += sq_stats->csum_none;
433 	s->tx_csum_partial          += sq_stats->csum_partial;
434 #ifdef CONFIG_MLX5_EN_TLS
435 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
436 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
437 	s->tx_tls_ooo               += sq_stats->tls_ooo;
438 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
439 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
440 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
441 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
442 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
443 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
444 #endif
445 	s->tx_cqes                  += sq_stats->cqes;
446 }
447 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)448 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
449 						struct mlx5e_sw_stats *s)
450 {
451 	int i;
452 
453 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
454 		return;
455 
456 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
457 
458 	if (priv->tx_ptp_opened) {
459 		for (i = 0; i < priv->max_opened_tc; i++) {
460 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
461 
462 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
463 			barrier();
464 		}
465 	}
466 	if (priv->rx_ptp_opened) {
467 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
468 
469 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
470 		barrier();
471 	}
472 }
473 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)474 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
475 						struct mlx5e_sw_stats *s)
476 {
477 	struct mlx5e_sq_stats **stats;
478 	u16 max_qos_sqs;
479 	int i;
480 
481 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
482 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
483 	stats = READ_ONCE(priv->htb_qos_sq_stats);
484 
485 	for (i = 0; i < max_qos_sqs; i++) {
486 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
487 
488 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
489 		barrier();
490 	}
491 }
492 
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)493 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
494 {
495 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
496 	struct page_pool *pool = c->rq.page_pool;
497 	struct page_pool_stats stats = { 0 };
498 
499 	if (!page_pool_get_stats(pool, &stats))
500 		return;
501 
502 	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
503 	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
504 	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
505 	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
506 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
507 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
508 
509 	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
510 	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
511 	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
512 	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
513 	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
514 }
515 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)516 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
517 {
518 	struct mlx5e_sw_stats *s = &priv->stats.sw;
519 	int i;
520 
521 	memset(s, 0, sizeof(*s));
522 
523 	for (i = 0; i < priv->channels.num; i++) /* for active channels only */
524 		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
525 
526 	for (i = 0; i < priv->stats_nch; i++) {
527 		struct mlx5e_channel_stats *channel_stats =
528 			priv->channel_stats[i];
529 
530 		int j;
531 
532 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
533 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
534 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
535 		/* xdp redirect */
536 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
537 		/* AF_XDP zero-copy */
538 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
539 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
540 
541 		for (j = 0; j < priv->max_opened_tc; j++) {
542 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
543 
544 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
545 			barrier();
546 		}
547 	}
548 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
549 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
550 }
551 
552 static const struct counter_desc q_stats_desc[] = {
553 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
554 };
555 
556 static const struct counter_desc drop_rq_stats_desc[] = {
557 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
558 };
559 
560 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
561 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
562 
q_counter_any(struct mlx5e_priv * priv)563 static bool q_counter_any(struct mlx5e_priv *priv)
564 {
565 	struct mlx5_core_dev *pos;
566 	int i;
567 
568 	mlx5_sd_for_each_dev(i, priv->mdev, pos)
569 		if (priv->q_counter[i++])
570 			return true;
571 
572 	return false;
573 }
574 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)575 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
576 {
577 	int num_stats = 0;
578 
579 	if (q_counter_any(priv))
580 		num_stats += NUM_Q_COUNTERS;
581 
582 	if (priv->drop_rq_q_counter)
583 		num_stats += NUM_DROP_RQ_COUNTERS;
584 
585 	return num_stats;
586 }
587 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)588 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
589 {
590 	int i;
591 
592 	for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
593 		ethtool_puts(data, q_stats_desc[i].format);
594 
595 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
596 		ethtool_puts(data, drop_rq_stats_desc[i].format);
597 }
598 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)599 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
600 {
601 	int i;
602 
603 	for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
604 		mlx5e_ethtool_put_stat(data,
605 				       MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
606 							    q_stats_desc, i));
607 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
608 		mlx5e_ethtool_put_stat(
609 			data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
610 						   drop_rq_stats_desc, i));
611 }
612 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)613 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
614 {
615 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
616 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
617 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
618 	struct mlx5_core_dev *pos;
619 	u32 rx_out_of_buffer = 0;
620 	int ret, i;
621 
622 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
623 
624 	mlx5_sd_for_each_dev(i, priv->mdev, pos) {
625 		if (priv->q_counter[i]) {
626 			MLX5_SET(query_q_counter_in, in, counter_set_id,
627 				 priv->q_counter[i]);
628 			ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
629 			if (!ret)
630 				rx_out_of_buffer += MLX5_GET(query_q_counter_out,
631 							     out, out_of_buffer);
632 		}
633 	}
634 	qcnt->rx_out_of_buffer = rx_out_of_buffer;
635 
636 	if (priv->drop_rq_q_counter) {
637 		MLX5_SET(query_q_counter_in, in, counter_set_id,
638 			 priv->drop_rq_q_counter);
639 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
640 		if (!ret)
641 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
642 							    out, out_of_buffer);
643 	}
644 }
645 
646 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
647 static const struct counter_desc vnic_env_stats_steer_desc[] = {
648 	{ "rx_steer_missed_packets",
649 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
650 };
651 
652 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
653 	{ "dev_internal_queue_oob",
654 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
655 };
656 
657 static const struct counter_desc vnic_env_stats_drop_desc[] = {
658 	{ "rx_oversize_pkts_buffer",
659 		VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
660 };
661 
662 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
663 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
664 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
665 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
666 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
667 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
668 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
669 	(MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
670 	 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
671 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)672 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
673 {
674 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
675 	       NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
676 	       NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
677 }
678 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)679 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
680 {
681 	int i;
682 
683 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
684 		ethtool_puts(data, vnic_env_stats_steer_desc[i].format);
685 
686 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
687 		ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format);
688 
689 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
690 		ethtool_puts(data, vnic_env_stats_drop_desc[i].format);
691 }
692 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)693 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
694 {
695 	int i;
696 
697 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
698 		mlx5e_ethtool_put_stat(
699 			data,
700 			MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
701 					    vnic_env_stats_steer_desc, i));
702 
703 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
704 		mlx5e_ethtool_put_stat(
705 			data,
706 			MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
707 					    vnic_env_stats_dev_oob_desc, i));
708 
709 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
710 		mlx5e_ethtool_put_stat(
711 			data,
712 			MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
713 					    vnic_env_stats_drop_desc, i));
714 }
715 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)716 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
717 {
718 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
719 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
720 	struct mlx5_core_dev *mdev = priv->mdev;
721 
722 	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
723 		return;
724 
725 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
726 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
727 }
728 
729 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
730 static const struct counter_desc vport_stats_desc[] = {
731 	{ "rx_vport_unicast_packets",
732 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
733 	{ "rx_vport_unicast_bytes",
734 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
735 	{ "tx_vport_unicast_packets",
736 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
737 	{ "tx_vport_unicast_bytes",
738 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
739 	{ "rx_vport_multicast_packets",
740 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
741 	{ "rx_vport_multicast_bytes",
742 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
743 	{ "tx_vport_multicast_packets",
744 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
745 	{ "tx_vport_multicast_bytes",
746 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
747 	{ "rx_vport_broadcast_packets",
748 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
749 	{ "rx_vport_broadcast_bytes",
750 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
751 	{ "tx_vport_broadcast_packets",
752 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
753 	{ "tx_vport_broadcast_bytes",
754 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
755 	{ "rx_vport_rdma_unicast_packets",
756 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
757 	{ "rx_vport_rdma_unicast_bytes",
758 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
759 	{ "tx_vport_rdma_unicast_packets",
760 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
761 	{ "tx_vport_rdma_unicast_bytes",
762 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
763 	{ "rx_vport_rdma_multicast_packets",
764 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
765 	{ "rx_vport_rdma_multicast_bytes",
766 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
767 	{ "tx_vport_rdma_multicast_packets",
768 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
769 	{ "tx_vport_rdma_multicast_bytes",
770 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
771 };
772 
773 static const struct counter_desc vport_loopback_stats_desc[] = {
774 	{ "vport_loopback_packets",
775 		VPORT_COUNTER_OFF(local_loopback.packets) },
776 	{ "vport_loopback_bytes",
777 		VPORT_COUNTER_OFF(local_loopback.octets) },
778 };
779 
780 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
781 #define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
782 	(MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
783 	 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
784 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)785 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
786 {
787 	return NUM_VPORT_COUNTERS +
788 		NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
789 }
790 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)791 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
792 {
793 	int i;
794 
795 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
796 		ethtool_puts(data, vport_stats_desc[i].format);
797 
798 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
799 		ethtool_puts(data, vport_loopback_stats_desc[i].format);
800 }
801 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)802 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
803 {
804 	int i;
805 
806 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
807 		mlx5e_ethtool_put_stat(
808 			data,
809 			MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
810 					    vport_stats_desc, i));
811 
812 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
813 		mlx5e_ethtool_put_stat(
814 			data,
815 			MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
816 					    vport_loopback_stats_desc, i));
817 }
818 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)819 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
820 {
821 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
822 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
823 	struct mlx5_core_dev *mdev = priv->mdev;
824 
825 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
826 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
827 }
828 
829 #define PPORT_802_3_OFF(c) \
830 	MLX5_BYTE_OFF(ppcnt_reg, \
831 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
832 static const struct counter_desc pport_802_3_stats_desc[] = {
833 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
834 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
835 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
836 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
837 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
838 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
839 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
840 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
841 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
842 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
843 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
844 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
845 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
846 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
847 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
848 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
849 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
850 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
851 };
852 
853 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
854 
855 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
856 {
857 	return NUM_PPORT_802_3_COUNTERS;
858 }
859 
860 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
861 {
862 	int i;
863 
864 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
865 		ethtool_puts(data, pport_802_3_stats_desc[i].format);
866 }
867 
868 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
869 {
870 	int i;
871 
872 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
873 		mlx5e_ethtool_put_stat(
874 			data, MLX5E_READ_CTR64_BE(
875 				      &priv->stats.pport.IEEE_802_3_counters,
876 				      pport_802_3_stats_desc, i));
877 }
878 
879 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
880 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
881 
882 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
883 {
884 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
885 	struct mlx5_core_dev *mdev = priv->mdev;
886 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
887 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
888 	void *out;
889 
890 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
891 		return;
892 
893 	MLX5_SET(ppcnt_reg, in, local_port, 1);
894 	out = pstats->IEEE_802_3_counters;
895 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
896 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
897 }
898 
899 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
900 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
901 		MLX5_BYTE_OFF(ppcnt_reg,		\
902 			      counter_set.set.c##_high)))
903 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)904 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
905 				u32 *ppcnt_ieee_802_3)
906 {
907 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
908 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
909 
910 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
911 		return -EOPNOTSUPP;
912 
913 	MLX5_SET(ppcnt_reg, in, local_port, 1);
914 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
915 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
916 				    sz, MLX5_REG_PPCNT, 0, 0);
917 }
918 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)919 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
920 			   struct ethtool_pause_stats *pause_stats)
921 {
922 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
923 	struct mlx5_core_dev *mdev = priv->mdev;
924 
925 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
926 		return;
927 
928 	pause_stats->tx_pause_frames =
929 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
930 				      eth_802_3_cntrs_grp_data_layout,
931 				      a_pause_mac_ctrl_frames_transmitted);
932 	pause_stats->rx_pause_frames =
933 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
934 				      eth_802_3_cntrs_grp_data_layout,
935 				      a_pause_mac_ctrl_frames_received);
936 }
937 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)938 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
939 			     struct ethtool_eth_phy_stats *phy_stats)
940 {
941 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
942 	struct mlx5_core_dev *mdev = priv->mdev;
943 
944 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
945 		return;
946 
947 	phy_stats->SymbolErrorDuringCarrier =
948 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
949 				      eth_802_3_cntrs_grp_data_layout,
950 				      a_symbol_error_during_carrier);
951 }
952 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)953 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
954 			     struct ethtool_eth_mac_stats *mac_stats)
955 {
956 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
957 	struct mlx5_core_dev *mdev = priv->mdev;
958 
959 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
960 		return;
961 
962 #define RD(name)							\
963 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
964 			      eth_802_3_cntrs_grp_data_layout,		\
965 			      name)
966 
967 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
968 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
969 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
970 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
971 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
972 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
973 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
974 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
975 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
976 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
977 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
978 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
979 #undef RD
980 }
981 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)982 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
983 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
984 {
985 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
986 	struct mlx5_core_dev *mdev = priv->mdev;
987 
988 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
989 		return;
990 
991 	ctrl_stats->MACControlFramesTransmitted =
992 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
993 				      eth_802_3_cntrs_grp_data_layout,
994 				      a_mac_control_frames_transmitted);
995 	ctrl_stats->MACControlFramesReceived =
996 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
997 				      eth_802_3_cntrs_grp_data_layout,
998 				      a_mac_control_frames_received);
999 	ctrl_stats->UnsupportedOpcodesReceived =
1000 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1001 				      eth_802_3_cntrs_grp_data_layout,
1002 				      a_unsupported_opcodes_received);
1003 }
1004 
1005 #define PPORT_2863_OFF(c) \
1006 	MLX5_BYTE_OFF(ppcnt_reg, \
1007 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
1008 static const struct counter_desc pport_2863_stats_desc[] = {
1009 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
1010 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
1011 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
1012 };
1013 
1014 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
1015 
1016 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
1017 {
1018 	return NUM_PPORT_2863_COUNTERS;
1019 }
1020 
1021 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
1022 {
1023 	int i;
1024 
1025 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1026 		ethtool_puts(data, pport_2863_stats_desc[i].format);
1027 }
1028 
1029 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
1030 {
1031 	int i;
1032 
1033 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1034 		mlx5e_ethtool_put_stat(
1035 			data, MLX5E_READ_CTR64_BE(
1036 				      &priv->stats.pport.RFC_2863_counters,
1037 				      pport_2863_stats_desc, i));
1038 }
1039 
1040 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1041 {
1042 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1043 	struct mlx5_core_dev *mdev = priv->mdev;
1044 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1045 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1046 	void *out;
1047 
1048 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1049 	out = pstats->RFC_2863_counters;
1050 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1051 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1052 }
1053 
1054 #define PPORT_2819_OFF(c) \
1055 	MLX5_BYTE_OFF(ppcnt_reg, \
1056 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1057 static const struct counter_desc pport_2819_stats_desc[] = {
1058 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1059 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1060 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1061 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1062 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1063 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1064 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1065 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1066 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1067 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1068 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1069 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1070 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1071 };
1072 
1073 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
1074 
1075 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1076 {
1077 	return NUM_PPORT_2819_COUNTERS;
1078 }
1079 
1080 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1081 {
1082 	int i;
1083 
1084 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1085 		ethtool_puts(data, pport_2819_stats_desc[i].format);
1086 }
1087 
1088 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1089 {
1090 	int i;
1091 
1092 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1093 		mlx5e_ethtool_put_stat(
1094 			data, MLX5E_READ_CTR64_BE(
1095 				      &priv->stats.pport.RFC_2819_counters,
1096 				      pport_2819_stats_desc, i));
1097 }
1098 
1099 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1100 {
1101 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1102 	struct mlx5_core_dev *mdev = priv->mdev;
1103 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1104 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1105 	void *out;
1106 
1107 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1108 		return;
1109 
1110 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1111 	out = pstats->RFC_2819_counters;
1112 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1113 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1114 }
1115 
1116 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1117 	{    0,    64 },
1118 	{   65,   127 },
1119 	{  128,   255 },
1120 	{  256,   511 },
1121 	{  512,  1023 },
1122 	{ 1024,  1518 },
1123 	{ 1519,  2047 },
1124 	{ 2048,  4095 },
1125 	{ 4096,  8191 },
1126 	{ 8192, 10239 },
1127 	{}
1128 };
1129 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1130 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1131 			  struct ethtool_rmon_stats *rmon,
1132 			  const struct ethtool_rmon_hist_range **ranges)
1133 {
1134 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1135 	struct mlx5_core_dev *mdev = priv->mdev;
1136 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1137 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1138 
1139 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1140 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1141 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1142 				 sz, MLX5_REG_PPCNT, 0, 0))
1143 		return;
1144 
1145 #define RD(name)						\
1146 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1147 			      eth_2819_cntrs_grp_data_layout,	\
1148 			      name)
1149 
1150 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1151 	rmon->fragments		= RD(ether_stats_fragments);
1152 	rmon->jabbers		= RD(ether_stats_jabbers);
1153 
1154 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1155 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1156 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1157 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1158 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1159 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1160 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1161 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1162 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1163 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1164 #undef RD
1165 
1166 	*ranges = mlx5e_rmon_ranges;
1167 }
1168 
mlx5e_stats_ts_get(struct mlx5e_priv * priv,struct ethtool_ts_stats * ts_stats)1169 void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
1170 			struct ethtool_ts_stats *ts_stats)
1171 {
1172 	int i, j;
1173 
1174 	mutex_lock(&priv->state_lock);
1175 
1176 	if (priv->tx_ptp_opened) {
1177 		struct mlx5e_ptp *ptp = priv->channels.ptp;
1178 
1179 		ts_stats->pkts = 0;
1180 		ts_stats->err = 0;
1181 		ts_stats->lost = 0;
1182 
1183 		if (!ptp)
1184 			goto out;
1185 
1186 		/* Aggregate stats across all TCs */
1187 		for (i = 0; i < ptp->num_tc; i++) {
1188 			struct mlx5e_ptp_cq_stats *stats =
1189 				ptp->ptpsq[i].cq_stats;
1190 
1191 			ts_stats->pkts += stats->cqe;
1192 			ts_stats->err += stats->abort + stats->err_cqe +
1193 				stats->late_cqe;
1194 			ts_stats->lost += stats->lost_cqe;
1195 		}
1196 	} else {
1197 		/* DMA layer will always successfully timestamp packets. Other
1198 		 * counters do not make sense for this layer.
1199 		 */
1200 		ts_stats->pkts = 0;
1201 
1202 		/* Aggregate stats across all SQs */
1203 		for (j = 0; j < priv->channels.num; j++) {
1204 			struct mlx5e_channel *c = priv->channels.c[j];
1205 
1206 			for (i = 0; i < c->num_tc; i++) {
1207 				struct mlx5e_sq_stats *stats = c->sq[i].stats;
1208 
1209 				ts_stats->pkts += stats->timestamps;
1210 			}
1211 		}
1212 	}
1213 
1214 out:
1215 	mutex_unlock(&priv->state_lock);
1216 }
1217 
1218 #define PPORT_PHY_LAYER_OFF(c) \
1219 	MLX5_BYTE_OFF(ppcnt_reg, \
1220 		      counter_set.phys_layer_cntrs.c)
1221 static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
1222 	{ "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
1223 };
1224 
1225 #define PPORT_PHY_STATISTICAL_OFF(c) \
1226 	MLX5_BYTE_OFF(ppcnt_reg, \
1227 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1228 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1229 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1230 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1231 };
1232 
1233 static const struct counter_desc
1234 pport_phy_statistical_err_lanes_stats_desc[] = {
1235 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1236 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1237 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1238 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1239 };
1240 
1241 #define PPORT_PHY_RECOVERY_OFF(c) \
1242 	MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
1243 static const struct counter_desc
1244 pport_phy_recovery_cntrs_stats_desc[] = {
1245 	{ "total_success_recovery_phy",
1246 	  PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
1247 };
1248 
1249 #define NUM_PPORT_PHY_LAYER_COUNTERS \
1250 	ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
1251 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1252 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1253 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1254 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1255 #define NUM_PPORT_PHY_RECOVERY_COUNTERS \
1256 	ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
1257 
1258 #define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
1259 	(MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
1260 	NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
1261 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
1262 	(MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
1263 	NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
1264 #define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
1265 	(MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
1266 	NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
1267 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1268 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1269 {
1270 	struct mlx5_core_dev *mdev = priv->mdev;
1271 	int num_stats;
1272 
1273 	num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
1274 
1275 	num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
1276 
1277 	num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
1278 
1279 	num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
1280 	return num_stats;
1281 }
1282 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1283 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1284 {
1285 	struct mlx5_core_dev *mdev = priv->mdev;
1286 	int i;
1287 
1288 	for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
1289 		ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
1290 
1291 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
1292 		ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
1293 
1294 	for (i = 0;
1295 	     i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
1296 	     i++)
1297 		ethtool_puts(data,
1298 			     pport_phy_statistical_err_lanes_stats_desc[i]
1299 			     .format);
1300 
1301 	for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
1302 		ethtool_puts(data,
1303 			     pport_phy_recovery_cntrs_stats_desc[i].format);
1304 }
1305 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1306 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1307 {
1308 	struct mlx5_core_dev *mdev = priv->mdev;
1309 	int i;
1310 
1311 	for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
1312 		mlx5e_ethtool_put_stat(
1313 				data,
1314 				MLX5E_READ_CTR32_BE(&priv->stats.pport
1315 					.phy_counters,
1316 					pport_phy_layer_cntrs_stats_desc, i));
1317 
1318 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
1319 		mlx5e_ethtool_put_stat(
1320 			data,
1321 			MLX5E_READ_CTR64_BE(
1322 				&priv->stats.pport.phy_statistical_counters,
1323 				pport_phy_statistical_stats_desc, i));
1324 
1325 	for (i = 0;
1326 	     i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
1327 	     i++)
1328 		mlx5e_ethtool_put_stat(
1329 			data,
1330 			MLX5E_READ_CTR64_BE(
1331 				&priv->stats.pport.phy_statistical_counters,
1332 				pport_phy_statistical_err_lanes_stats_desc, i));
1333 
1334 	for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
1335 		mlx5e_ethtool_put_stat(
1336 			data,
1337 			MLX5E_READ_CTR32_BE(
1338 				&priv->stats.pport.phy_recovery_counters,
1339 				pport_phy_recovery_cntrs_stats_desc, i));
1340 }
1341 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1342 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1343 {
1344 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1345 	struct mlx5_core_dev *mdev = priv->mdev;
1346 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1347 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1348 	void *out;
1349 
1350 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1351 	out = pstats->phy_counters;
1352 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1353 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1354 
1355 	if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
1356 		out = pstats->phy_statistical_counters;
1357 		MLX5_SET(ppcnt_reg, in, grp,
1358 			 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1359 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
1360 				     0);
1361 	}
1362 
1363 	if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
1364 		out = pstats->phy_recovery_counters;
1365 		MLX5_SET(ppcnt_reg, in, grp,
1366 			 MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
1367 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
1368 				     0);
1369 	}
1370 }
1371 
mlx5e_get_link_ext_stats(struct net_device * dev,struct ethtool_link_ext_stats * stats)1372 void mlx5e_get_link_ext_stats(struct net_device *dev,
1373 			      struct ethtool_link_ext_stats *stats)
1374 {
1375 	struct mlx5e_priv *priv = netdev_priv(dev);
1376 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1377 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1378 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1379 
1380 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1381 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1382 	mlx5_core_access_reg(priv->mdev, in, sz, out,
1383 			     MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1384 
1385 	stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1386 					   counter_set.phys_layer_cntrs.link_down_events);
1387 }
1388 
fec_num_lanes(struct mlx5_core_dev * dev)1389 static int fec_num_lanes(struct mlx5_core_dev *dev)
1390 {
1391 	u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1392 	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1393 	int err;
1394 
1395 	MLX5_SET(pmlp_reg, in, local_port, 1);
1396 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1397 				   MLX5_REG_PMLP, 0, 0);
1398 	if (err)
1399 		return 0;
1400 
1401 	return MLX5_GET(pmlp_reg, out, width);
1402 }
1403 
fec_active_mode(struct mlx5_core_dev * mdev)1404 static int fec_active_mode(struct mlx5_core_dev *mdev)
1405 {
1406 	unsigned long fec_active_long;
1407 	u32 fec_active;
1408 
1409 	if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1410 		return MLX5E_FEC_NOFEC;
1411 
1412 	fec_active_long = fec_active;
1413 	return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1414 }
1415 
1416 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1417 	fec_stats->corrected_blocks.lanes[(idx)] = \
1418 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1419 				      fc_fec_corrected_blocks_lane##idx); \
1420 	fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1421 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1422 				      fc_fec_uncorrectable_blocks_lane##idx); \
1423 })
1424 
fec_set_fc_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt,u8 lanes)1425 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1426 			     u32 *ppcnt, u8 lanes)
1427 {
1428 	if (lanes > 3) { /* 4 lanes */
1429 		MLX5E_STATS_SET_FEC_BLOCK(3);
1430 		MLX5E_STATS_SET_FEC_BLOCK(2);
1431 	}
1432 	if (lanes > 1) /* 2 lanes */
1433 		MLX5E_STATS_SET_FEC_BLOCK(1);
1434 	if (lanes > 0) /* 1 lane */
1435 		MLX5E_STATS_SET_FEC_BLOCK(0);
1436 }
1437 
fec_set_rs_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt)1438 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1439 {
1440 	fec_stats->corrected_blocks.total =
1441 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1442 				      rs_fec_corrected_blocks);
1443 	fec_stats->uncorrectable_blocks.total =
1444 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1445 				      rs_fec_uncorrectable_blocks);
1446 }
1447 
fec_set_block_stats(struct mlx5e_priv * priv,int mode,struct ethtool_fec_stats * fec_stats)1448 static void fec_set_block_stats(struct mlx5e_priv *priv,
1449 				int mode,
1450 				struct ethtool_fec_stats *fec_stats)
1451 {
1452 	struct mlx5_core_dev *mdev = priv->mdev;
1453 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1454 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1455 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1456 
1457 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1458 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1459 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1460 		return;
1461 
1462 	switch (mode) {
1463 	case MLX5E_FEC_RS_528_514:
1464 	case MLX5E_FEC_RS_544_514:
1465 	case MLX5E_FEC_LLRS_272_257_1:
1466 	case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
1467 		fec_set_rs_stats(fec_stats, out);
1468 		return;
1469 	case MLX5E_FEC_FIRECODE:
1470 		fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1471 	}
1472 }
1473 
fec_set_corrected_bits_total(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1474 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1475 					 struct ethtool_fec_stats *fec_stats)
1476 {
1477 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1478 	struct mlx5_core_dev *mdev = priv->mdev;
1479 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1480 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1481 
1482 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1483 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1484 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1485 				 sz, MLX5_REG_PPCNT, 0, 0))
1486 		return;
1487 
1488 	fec_stats->corrected_bits.total =
1489 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1490 				      phys_layer_statistical_cntrs,
1491 				      phy_corrected_bits);
1492 }
1493 
1494 #define MLX5_RS_HISTOGRAM_ENTRIES \
1495 	(MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist) / \
1496 	 MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist[0]))
1497 
1498 enum {
1499 	MLX5E_HISTOGRAM_FEC_RS_544_514 = 1,
1500 	MLX5E_HISTOGRAM_FEC_LLRS = 2,
1501 	MLX5E_HISTOGRAM_FEC_RS_528_514 = 3,
1502 };
1503 
fec_rs_validate_hist_type(int mode,int hist_type)1504 static bool fec_rs_validate_hist_type(int mode, int hist_type)
1505 {
1506 	switch (mode) {
1507 	case MLX5E_FEC_RS_528_514:
1508 		return hist_type == MLX5E_HISTOGRAM_FEC_RS_528_514;
1509 	case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
1510 	case MLX5E_FEC_RS_544_514:
1511 		return hist_type == MLX5E_HISTOGRAM_FEC_RS_544_514;
1512 	case MLX5E_FEC_LLRS_272_257_1:
1513 		return hist_type == MLX5E_HISTOGRAM_FEC_LLRS;
1514 	default:
1515 		break;
1516 	}
1517 
1518 	return false;
1519 }
1520 
1521 static u8
fec_rs_histogram_fill_ranges(struct mlx5e_priv * priv,int mode,const struct ethtool_fec_hist_range ** ranges)1522 fec_rs_histogram_fill_ranges(struct mlx5e_priv *priv, int mode,
1523 			     const struct ethtool_fec_hist_range **ranges)
1524 {
1525 	struct mlx5_core_dev *mdev = priv->mdev;
1526 	u32 out[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
1527 	u32 in[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
1528 	int sz = MLX5_ST_SZ_BYTES(pphcr_reg);
1529 	u8 hist_type, num_of_bins;
1530 
1531 	memset(priv->fec_ranges, 0,
1532 	       ETHTOOL_FEC_HIST_MAX * sizeof(*priv->fec_ranges));
1533 	MLX5_SET(pphcr_reg, in, local_port, 1);
1534 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPHCR, 0, 0))
1535 		return 0;
1536 
1537 	hist_type = MLX5_GET(pphcr_reg, out, active_hist_type);
1538 	if (!fec_rs_validate_hist_type(mode, hist_type))
1539 		return 0;
1540 
1541 	num_of_bins = MLX5_GET(pphcr_reg, out, num_of_bins);
1542 	if (WARN_ON_ONCE(num_of_bins > MLX5_RS_HISTOGRAM_ENTRIES))
1543 		return 0;
1544 
1545 	for (int i = 0; i < num_of_bins; i++) {
1546 		void *bin_range = MLX5_ADDR_OF(pphcr_reg, out, bin_range[i]);
1547 
1548 		priv->fec_ranges[i].high = MLX5_GET(bin_range_layout, bin_range,
1549 						    high_val);
1550 		priv->fec_ranges[i].low = MLX5_GET(bin_range_layout, bin_range,
1551 						   low_val);
1552 	}
1553 	*ranges = priv->fec_ranges;
1554 
1555 	return num_of_bins;
1556 }
1557 
fec_rs_histogram_fill_stats(struct mlx5e_priv * priv,u8 num_of_bins,struct ethtool_fec_hist * hist)1558 static void fec_rs_histogram_fill_stats(struct mlx5e_priv *priv,
1559 					u8 num_of_bins,
1560 					struct ethtool_fec_hist *hist)
1561 {
1562 	struct mlx5_core_dev *mdev = priv->mdev;
1563 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1564 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1565 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1566 	void *rs_histogram_cntrs;
1567 
1568 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1569 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RS_FEC_HISTOGRAM_GROUP);
1570 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1571 		return;
1572 
1573 	rs_histogram_cntrs = MLX5_ADDR_OF(ppcnt_reg, out,
1574 					  counter_set.rs_histogram_cntrs);
1575 	/* Guaranteed that num_of_bins is less than MLX5E_FEC_RS_HIST_MAX
1576 	 * by fec_rs_histogram_fill_ranges().
1577 	 */
1578 	for (int i = 0; i < num_of_bins; i++)
1579 		hist->values[i].sum = MLX5_GET64(rs_histogram_cntrs,
1580 						 rs_histogram_cntrs,
1581 						 hist[i]);
1582 }
1583 
fec_set_histograms_stats(struct mlx5e_priv * priv,int mode,struct ethtool_fec_hist * hist)1584 static void fec_set_histograms_stats(struct mlx5e_priv *priv, int mode,
1585 				     struct ethtool_fec_hist *hist)
1586 {
1587 	u8 num_of_bins;
1588 
1589 	switch (mode) {
1590 	case MLX5E_FEC_RS_528_514:
1591 	case MLX5E_FEC_RS_544_514:
1592 	case MLX5E_FEC_LLRS_272_257_1:
1593 	case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
1594 		num_of_bins =
1595 			fec_rs_histogram_fill_ranges(priv, mode, &hist->ranges);
1596 		if (num_of_bins)
1597 			return fec_rs_histogram_fill_stats(priv, num_of_bins,
1598 							   hist);
1599 		break;
1600 	default:
1601 		return;
1602 	}
1603 }
1604 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)1605 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1606 			 struct ethtool_fec_stats *fec_stats,
1607 			 struct ethtool_fec_hist *hist)
1608 {
1609 	int mode = fec_active_mode(priv->mdev);
1610 
1611 	if (mode == MLX5E_FEC_NOFEC)
1612 		return;
1613 
1614 	if (MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) {
1615 		fec_set_corrected_bits_total(priv, fec_stats);
1616 		fec_set_block_stats(priv, mode, fec_stats);
1617 	}
1618 
1619 	if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
1620 		fec_set_histograms_stats(priv, mode, hist);
1621 }
1622 
1623 #define PPORT_ETH_EXT_OFF(c) \
1624 	MLX5_BYTE_OFF(ppcnt_reg, \
1625 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1626 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1627 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1628 };
1629 
1630 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1631 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1632 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1633 {
1634 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1635 		return NUM_PPORT_ETH_EXT_COUNTERS;
1636 
1637 	return 0;
1638 }
1639 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1640 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1641 {
1642 	int i;
1643 
1644 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1645 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1646 			ethtool_puts(data, pport_eth_ext_stats_desc[i].format);
1647 }
1648 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1649 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1650 {
1651 	int i;
1652 
1653 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1654 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1655 			mlx5e_ethtool_put_stat(
1656 				data,
1657 				MLX5E_READ_CTR64_BE(
1658 					&priv->stats.pport.eth_ext_counters,
1659 					pport_eth_ext_stats_desc, i));
1660 }
1661 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1662 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1663 {
1664 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1665 	struct mlx5_core_dev *mdev = priv->mdev;
1666 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1667 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1668 	void *out;
1669 
1670 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1671 		return;
1672 
1673 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1674 	out = pstats->eth_ext_counters;
1675 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1676 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1677 }
1678 
1679 #define PCIE_PERF_OFF(c) \
1680 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1681 static const struct counter_desc pcie_perf_stats_desc[] = {
1682 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1683 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1684 };
1685 
1686 #define PCIE_PERF_OFF64(c) \
1687 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1688 static const struct counter_desc pcie_perf_stats_desc64[] = {
1689 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1690 };
1691 
1692 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1693 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1694 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1695 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1696 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1697 };
1698 
1699 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1700 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1701 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1702 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1703 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1704 {
1705 	int num_stats = 0;
1706 
1707 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1708 		num_stats += NUM_PCIE_PERF_COUNTERS;
1709 
1710 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1711 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1712 
1713 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1714 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1715 
1716 	return num_stats;
1717 }
1718 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1719 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1720 {
1721 	int i;
1722 
1723 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1724 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1725 			ethtool_puts(data, pcie_perf_stats_desc[i].format);
1726 
1727 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1728 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1729 			ethtool_puts(data, pcie_perf_stats_desc64[i].format);
1730 
1731 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1732 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1733 			ethtool_puts(data,
1734 				     pcie_perf_stall_stats_desc[i].format);
1735 }
1736 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1737 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1738 {
1739 	int i;
1740 
1741 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1742 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1743 			mlx5e_ethtool_put_stat(
1744 				data,
1745 				MLX5E_READ_CTR32_BE(
1746 					&priv->stats.pcie.pcie_perf_counters,
1747 					pcie_perf_stats_desc, i));
1748 
1749 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1750 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1751 			mlx5e_ethtool_put_stat(
1752 				data,
1753 				MLX5E_READ_CTR64_BE(
1754 					&priv->stats.pcie.pcie_perf_counters,
1755 					pcie_perf_stats_desc64, i));
1756 
1757 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1758 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1759 			mlx5e_ethtool_put_stat(
1760 				data,
1761 				MLX5E_READ_CTR32_BE(
1762 					&priv->stats.pcie.pcie_perf_counters,
1763 					pcie_perf_stall_stats_desc, i));
1764 }
1765 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1766 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1767 {
1768 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1769 	struct mlx5_core_dev *mdev = priv->mdev;
1770 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1771 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1772 	void *out;
1773 
1774 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1775 		return;
1776 
1777 	out = pcie_stats->pcie_perf_counters;
1778 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1779 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1780 }
1781 
1782 #define PPORT_PER_TC_PRIO_OFF(c) \
1783 	MLX5_BYTE_OFF(ppcnt_reg, \
1784 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1785 
1786 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1787 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1788 };
1789 
1790 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1791 
1792 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1793 	MLX5_BYTE_OFF(ppcnt_reg, \
1794 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1795 
1796 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1797 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1798 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1799 };
1800 
1801 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1802 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1803 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1804 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1805 {
1806 	struct mlx5_core_dev *mdev = priv->mdev;
1807 
1808 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1809 		return 0;
1810 
1811 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1812 }
1813 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1814 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1815 {
1816 	struct mlx5_core_dev *mdev = priv->mdev;
1817 	int i, prio;
1818 
1819 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1820 		return;
1821 
1822 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1823 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1824 			ethtool_sprintf(data,
1825 					pport_per_tc_prio_stats_desc[i].format,
1826 					prio);
1827 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1828 			ethtool_sprintf(data,
1829 					pport_per_tc_congest_prio_stats_desc[i].format,
1830 					prio);
1831 	}
1832 }
1833 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1834 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1835 {
1836 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1837 	struct mlx5_core_dev *mdev = priv->mdev;
1838 	int i, prio;
1839 
1840 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1841 		return;
1842 
1843 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1844 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1845 			mlx5e_ethtool_put_stat(
1846 				data,
1847 				MLX5E_READ_CTR64_BE(
1848 					&pport->per_tc_prio_counters[prio],
1849 					pport_per_tc_prio_stats_desc, i));
1850 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1851 			mlx5e_ethtool_put_stat(
1852 				data,
1853 				MLX5E_READ_CTR64_BE(
1854 					&pport->per_tc_congest_prio_counters
1855 						 [prio],
1856 					pport_per_tc_congest_prio_stats_desc,
1857 					i));
1858 	}
1859 }
1860 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1861 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1862 {
1863 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1864 	struct mlx5_core_dev *mdev = priv->mdev;
1865 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1866 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1867 	void *out;
1868 	int prio;
1869 
1870 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1871 		return;
1872 
1873 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1874 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1875 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1876 		out = pstats->per_tc_prio_counters[prio];
1877 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1878 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1879 	}
1880 }
1881 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1882 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1883 {
1884 	struct mlx5_core_dev *mdev = priv->mdev;
1885 
1886 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1887 		return 0;
1888 
1889 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1890 }
1891 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1892 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1893 {
1894 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1895 	struct mlx5_core_dev *mdev = priv->mdev;
1896 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1897 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1898 	void *out;
1899 	int prio;
1900 
1901 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1902 		return;
1903 
1904 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1905 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1906 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1907 		out = pstats->per_tc_congest_prio_counters[prio];
1908 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1909 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1910 	}
1911 }
1912 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1913 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1914 {
1915 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1916 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1917 }
1918 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1919 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1920 {
1921 	mlx5e_grp_per_tc_prio_update_stats(priv);
1922 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1923 }
1924 
1925 #define PPORT_PER_PRIO_OFF(c) \
1926 	MLX5_BYTE_OFF(ppcnt_reg, \
1927 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1928 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1929 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1930 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1931 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1932 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1933 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1934 };
1935 
1936 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1937 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1938 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1939 {
1940 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1941 }
1942 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 ** data)1943 static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1944 						    u8 **data)
1945 {
1946 	int i, prio;
1947 
1948 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1949 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1950 			ethtool_sprintf(data,
1951 					pport_per_prio_traffic_stats_desc[i].format,
1952 					prio);
1953 	}
1954 }
1955 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 ** data)1956 static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1957 						  u64 **data)
1958 {
1959 	int i, prio;
1960 
1961 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1962 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1963 			mlx5e_ethtool_put_stat(
1964 				data,
1965 				MLX5E_READ_CTR64_BE(
1966 					&priv->stats.pport
1967 						 .per_prio_counters[prio],
1968 					pport_per_prio_traffic_stats_desc, i));
1969 	}
1970 }
1971 
1972 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1973 	/* %s is "global" or "prio{i}" */
1974 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1975 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1976 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1977 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1978 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1979 };
1980 
1981 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1982 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1983 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1984 };
1985 
1986 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1987 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1988 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1989 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1990 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1991 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1992 {
1993 	struct mlx5_core_dev *mdev = priv->mdev;
1994 	u8 pfc_en_tx;
1995 	u8 pfc_en_rx;
1996 	int err;
1997 
1998 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1999 		return 0;
2000 
2001 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
2002 
2003 	return err ? 0 : pfc_en_tx | pfc_en_rx;
2004 }
2005 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)2006 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
2007 {
2008 	struct mlx5_core_dev *mdev = priv->mdev;
2009 	u32 rx_pause;
2010 	u32 tx_pause;
2011 	int err;
2012 
2013 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2014 		return false;
2015 
2016 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
2017 
2018 	return err ? false : rx_pause | tx_pause;
2019 }
2020 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)2021 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
2022 {
2023 	return (mlx5e_query_global_pause_combined(priv) +
2024 		hweight8(mlx5e_query_pfc_combined(priv))) *
2025 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
2026 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
2027 }
2028 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 ** data)2029 static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
2030 						u8 **data)
2031 {
2032 	unsigned long pfc_combined;
2033 	int i, prio;
2034 
2035 	pfc_combined = mlx5e_query_pfc_combined(priv);
2036 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
2037 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
2038 			char pfc_string[ETH_GSTRING_LEN];
2039 
2040 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
2041 			ethtool_sprintf(data,
2042 					pport_per_prio_pfc_stats_desc[i].format,
2043 					pfc_string);
2044 		}
2045 	}
2046 
2047 	if (mlx5e_query_global_pause_combined(priv)) {
2048 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
2049 			ethtool_sprintf(data,
2050 					pport_per_prio_pfc_stats_desc[i].format,
2051 					"global");
2052 		}
2053 	}
2054 
2055 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
2056 		ethtool_puts(data, pport_pfc_stall_stats_desc[i].format);
2057 }
2058 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 ** data)2059 static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
2060 					      u64 **data)
2061 {
2062 	unsigned long pfc_combined;
2063 	int i, prio;
2064 
2065 	pfc_combined = mlx5e_query_pfc_combined(priv);
2066 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
2067 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
2068 			mlx5e_ethtool_put_stat(
2069 				data,
2070 				MLX5E_READ_CTR64_BE(
2071 					&priv->stats.pport
2072 						 .per_prio_counters[prio],
2073 					pport_per_prio_pfc_stats_desc, i));
2074 		}
2075 	}
2076 
2077 	if (mlx5e_query_global_pause_combined(priv)) {
2078 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
2079 			mlx5e_ethtool_put_stat(
2080 				data,
2081 				MLX5E_READ_CTR64_BE(
2082 					&priv->stats.pport.per_prio_counters[0],
2083 					pport_per_prio_pfc_stats_desc, i));
2084 		}
2085 	}
2086 
2087 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
2088 		mlx5e_ethtool_put_stat(
2089 			data, MLX5E_READ_CTR64_BE(
2090 				      &priv->stats.pport.per_prio_counters[0],
2091 				      pport_pfc_stall_stats_desc, i));
2092 }
2093 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)2094 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
2095 {
2096 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
2097 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
2098 }
2099 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)2100 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
2101 {
2102 	mlx5e_grp_per_prio_traffic_fill_strings(priv, data);
2103 	mlx5e_grp_per_prio_pfc_fill_strings(priv, data);
2104 }
2105 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)2106 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
2107 {
2108 	mlx5e_grp_per_prio_traffic_fill_stats(priv, data);
2109 	mlx5e_grp_per_prio_pfc_fill_stats(priv, data);
2110 }
2111 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)2112 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
2113 {
2114 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
2115 	struct mlx5_core_dev *mdev = priv->mdev;
2116 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
2117 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
2118 	int prio;
2119 	void *out;
2120 
2121 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
2122 		return;
2123 
2124 	MLX5_SET(ppcnt_reg, in, local_port, 1);
2125 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
2126 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
2127 		out = pstats->per_prio_counters[prio];
2128 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
2129 		mlx5_core_access_reg(mdev, in, sz, out, sz,
2130 				     MLX5_REG_PPCNT, 0, 0);
2131 	}
2132 }
2133 
2134 static const struct counter_desc mlx5e_pme_status_desc[] = {
2135 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
2136 };
2137 
2138 static const struct counter_desc mlx5e_pme_error_desc[] = {
2139 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
2140 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
2141 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
2142 };
2143 
2144 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
2145 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
2146 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)2147 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
2148 {
2149 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
2150 }
2151 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)2152 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
2153 {
2154 	int i;
2155 
2156 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
2157 		ethtool_puts(data, mlx5e_pme_status_desc[i].format);
2158 
2159 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
2160 		ethtool_puts(data, mlx5e_pme_error_desc[i].format);
2161 }
2162 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)2163 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
2164 {
2165 	struct mlx5_pme_stats pme_stats;
2166 	int i;
2167 
2168 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
2169 
2170 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
2171 		mlx5e_ethtool_put_stat(
2172 			data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
2173 						   mlx5e_pme_status_desc, i));
2174 
2175 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
2176 		mlx5e_ethtool_put_stat(
2177 			data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
2178 						   mlx5e_pme_error_desc, i));
2179 }
2180 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)2181 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
2182 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)2183 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
2184 {
2185 	return mlx5e_ktls_get_count(priv);
2186 }
2187 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)2188 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
2189 {
2190 	mlx5e_ktls_get_strings(priv, data);
2191 }
2192 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)2193 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
2194 {
2195 	mlx5e_ktls_get_stats(priv, data);
2196 }
2197 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)2198 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
2199 
2200 static const struct counter_desc rq_stats_desc[] = {
2201 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
2202 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
2203 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
2204 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2205 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2206 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2207 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2208 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
2209 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
2210 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2211 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
2212 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
2213 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
2214 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
2215 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
2216 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
2217 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
2218 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
2219 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
2220 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
2221 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
2222 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2223 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
2224 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2225 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2226 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2227 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2228 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2229 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2230 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
2231 #ifdef CONFIG_MLX5_EN_ARFS
2232 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) },
2233 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) },
2234 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) },
2235 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) },
2236 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
2237 #endif
2238 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
2239 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
2240 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
2241 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
2242 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2243 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2244 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2245 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2246 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2247 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2248 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2249 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2250 #ifdef CONFIG_MLX5_EN_TLS
2251 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2252 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
2253 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2254 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2255 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2256 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2257 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
2258 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
2259 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2260 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2261 #endif
2262 };
2263 
2264 static const struct counter_desc sq_stats_desc[] = {
2265 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2266 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2267 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2268 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2269 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2270 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2271 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2272 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2273 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2274 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2275 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) },
2276 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2277 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2278 #ifdef CONFIG_MLX5_EN_TLS
2279 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2280 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2281 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2282 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2283 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2284 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2285 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2286 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2287 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2288 #endif
2289 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2290 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2291 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2292 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2293 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2294 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2295 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2296 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2297 };
2298 
2299 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2300 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2301 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2302 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2303 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2304 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2305 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2306 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2307 };
2308 
2309 static const struct counter_desc xdpsq_stats_desc[] = {
2310 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2311 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2312 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2313 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2314 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2315 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2316 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2317 };
2318 
2319 static const struct counter_desc xskrq_stats_desc[] = {
2320 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2321 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2322 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2323 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2324 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2325 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2326 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2327 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2328 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2329 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2330 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2331 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2332 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2333 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2334 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2335 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2336 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2337 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2338 };
2339 
2340 static const struct counter_desc xsksq_stats_desc[] = {
2341 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2342 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2343 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2344 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2345 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2346 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2347 };
2348 
2349 static const struct counter_desc ch_stats_desc[] = {
2350 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2351 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2352 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2353 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2354 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2355 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2356 };
2357 
2358 static const struct counter_desc ptp_sq_stats_desc[] = {
2359 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2360 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2361 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2362 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2363 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2364 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2365 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2366 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2367 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2368 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2369 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2370 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2371 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2372 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2373 };
2374 
2375 static const struct counter_desc ptp_ch_stats_desc[] = {
2376 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2377 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2378 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2379 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2380 };
2381 
2382 static const struct counter_desc ptp_cq_stats_desc[] = {
2383 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2384 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2385 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2386 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2387 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
2388 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) },
2389 };
2390 
2391 static const struct counter_desc ptp_rq_stats_desc[] = {
2392 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2393 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2394 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2395 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2396 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2397 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2398 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2399 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2400 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2401 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2402 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2403 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2404 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2405 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2406 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2407 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2408 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2409 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2410 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2411 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2412 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2413 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2414 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2415 };
2416 
2417 static const struct counter_desc qos_sq_stats_desc[] = {
2418 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2419 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2420 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2421 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2422 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2423 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2424 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2425 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2426 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2427 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2428 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) },
2429 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2430 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2431 #ifdef CONFIG_MLX5_EN_TLS
2432 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2433 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2434 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2435 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2436 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2437 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2438 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2439 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2440 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2441 #endif
2442 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2443 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2444 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2445 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2446 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2447 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2448 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2449 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2450 };
2451 
2452 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
2453 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
2454 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
2455 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
2456 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
2457 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
2458 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
2459 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
2460 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
2461 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
2462 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2463 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
2464 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)2465 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2466 {
2467 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2468 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2469 }
2470 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2471 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2472 {
2473 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2474 	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2475 	int i, qid;
2476 
2477 	for (qid = 0; qid < max_qos_sqs; qid++)
2478 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2479 			ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
2480 }
2481 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2482 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2483 {
2484 	struct mlx5e_sq_stats **stats;
2485 	u16 max_qos_sqs;
2486 	int i, qid;
2487 
2488 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2489 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2490 	stats = READ_ONCE(priv->htb_qos_sq_stats);
2491 
2492 	for (qid = 0; qid < max_qos_sqs; qid++) {
2493 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2494 
2495 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2496 			mlx5e_ethtool_put_stat(
2497 				data,
2498 				MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i));
2499 	}
2500 }
2501 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2502 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2503 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2504 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2505 {
2506 	int num = NUM_PTP_CH_STATS;
2507 
2508 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2509 		return 0;
2510 
2511 	if (priv->tx_ptp_opened)
2512 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2513 	if (priv->rx_ptp_opened)
2514 		num += NUM_PTP_RQ_STATS;
2515 
2516 	return num;
2517 }
2518 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2519 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2520 {
2521 	int i, tc;
2522 
2523 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2524 		return;
2525 
2526 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2527 		ethtool_puts(data, ptp_ch_stats_desc[i].format);
2528 
2529 	if (priv->tx_ptp_opened) {
2530 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2531 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2532 				ethtool_sprintf(data,
2533 						ptp_sq_stats_desc[i].format,
2534 						tc);
2535 
2536 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2537 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2538 				ethtool_sprintf(data,
2539 						ptp_cq_stats_desc[i].format,
2540 						tc);
2541 	}
2542 	if (priv->rx_ptp_opened) {
2543 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2544 			ethtool_puts(data, ptp_rq_stats_desc[i].format);
2545 	}
2546 }
2547 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2548 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2549 {
2550 	int i, tc;
2551 
2552 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2553 		return;
2554 
2555 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2556 		mlx5e_ethtool_put_stat(
2557 			data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2558 						   ptp_ch_stats_desc, i));
2559 
2560 	if (priv->tx_ptp_opened) {
2561 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2562 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2563 				mlx5e_ethtool_put_stat(
2564 					data, MLX5E_READ_CTR64_CPU(
2565 						      &priv->ptp_stats.sq[tc],
2566 						      ptp_sq_stats_desc, i));
2567 
2568 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2569 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2570 				mlx5e_ethtool_put_stat(
2571 					data, MLX5E_READ_CTR64_CPU(
2572 						      &priv->ptp_stats.cq[tc],
2573 						      ptp_cq_stats_desc, i));
2574 	}
2575 	if (priv->rx_ptp_opened) {
2576 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2577 			mlx5e_ethtool_put_stat(
2578 				data,
2579 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2580 						     ptp_rq_stats_desc, i));
2581 	}
2582 }
2583 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2584 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2585 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2586 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2587 {
2588 	int max_nch = priv->stats_nch;
2589 
2590 	return (NUM_RQ_STATS * max_nch) +
2591 	       (NUM_CH_STATS * max_nch) +
2592 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2593 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2594 	       (NUM_XDPSQ_STATS * max_nch) +
2595 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2596 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2597 }
2598 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2599 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2600 {
2601 	bool is_xsk = priv->xsk.ever_used;
2602 	int max_nch = priv->stats_nch;
2603 	int i, j, tc;
2604 
2605 	for (i = 0; i < max_nch; i++)
2606 		for (j = 0; j < NUM_CH_STATS; j++)
2607 			ethtool_sprintf(data, ch_stats_desc[j].format, i);
2608 
2609 	for (i = 0; i < max_nch; i++) {
2610 		for (j = 0; j < NUM_RQ_STATS; j++)
2611 			ethtool_sprintf(data, rq_stats_desc[j].format, i);
2612 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2613 			ethtool_sprintf(data, xskrq_stats_desc[j].format, i);
2614 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2615 			ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i);
2616 	}
2617 
2618 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2619 		for (i = 0; i < max_nch; i++)
2620 			for (j = 0; j < NUM_SQ_STATS; j++)
2621 				ethtool_sprintf(data, sq_stats_desc[j].format,
2622 						i + tc * max_nch);
2623 
2624 	for (i = 0; i < max_nch; i++) {
2625 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2626 			ethtool_sprintf(data, xsksq_stats_desc[j].format, i);
2627 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2628 			ethtool_sprintf(data, xdpsq_stats_desc[j].format, i);
2629 	}
2630 }
2631 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2632 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2633 {
2634 	bool is_xsk = priv->xsk.ever_used;
2635 	int max_nch = priv->stats_nch;
2636 	int i, j, tc;
2637 
2638 	for (i = 0; i < max_nch; i++)
2639 		for (j = 0; j < NUM_CH_STATS; j++)
2640 			mlx5e_ethtool_put_stat(
2641 				data, MLX5E_READ_CTR64_CPU(
2642 					      &priv->channel_stats[i]->ch,
2643 					      ch_stats_desc, j));
2644 
2645 	for (i = 0; i < max_nch; i++) {
2646 		for (j = 0; j < NUM_RQ_STATS; j++)
2647 			mlx5e_ethtool_put_stat(
2648 				data, MLX5E_READ_CTR64_CPU(
2649 					      &priv->channel_stats[i]->rq,
2650 					      rq_stats_desc, j));
2651 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2652 			mlx5e_ethtool_put_stat(
2653 				data, MLX5E_READ_CTR64_CPU(
2654 					      &priv->channel_stats[i]->xskrq,
2655 					      xskrq_stats_desc, j));
2656 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2657 			mlx5e_ethtool_put_stat(
2658 				data, MLX5E_READ_CTR64_CPU(
2659 					      &priv->channel_stats[i]->rq_xdpsq,
2660 					      rq_xdpsq_stats_desc, j));
2661 	}
2662 
2663 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2664 		for (i = 0; i < max_nch; i++)
2665 			for (j = 0; j < NUM_SQ_STATS; j++)
2666 				mlx5e_ethtool_put_stat(
2667 					data,
2668 					MLX5E_READ_CTR64_CPU(
2669 						&priv->channel_stats[i]->sq[tc],
2670 						sq_stats_desc, j));
2671 
2672 	for (i = 0; i < max_nch; i++) {
2673 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2674 			mlx5e_ethtool_put_stat(
2675 				data, MLX5E_READ_CTR64_CPU(
2676 					      &priv->channel_stats[i]->xsksq,
2677 					      xsksq_stats_desc, j));
2678 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2679 			mlx5e_ethtool_put_stat(
2680 				data, MLX5E_READ_CTR64_CPU(
2681 					      &priv->channel_stats[i]->xdpsq,
2682 					      xdpsq_stats_desc, j));
2683 	}
2684 }
2685 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2686 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2687 
2688 MLX5E_DEFINE_STATS_GRP(sw, 0);
2689 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2690 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2691 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2692 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2693 MLX5E_DEFINE_STATS_GRP(2863, 0);
2694 MLX5E_DEFINE_STATS_GRP(2819, 0);
2695 MLX5E_DEFINE_STATS_GRP(phy, 0);
2696 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2697 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2698 MLX5E_DEFINE_STATS_GRP(pme, 0);
2699 MLX5E_DEFINE_STATS_GRP(channels, 0);
2700 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2701 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2702 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2703 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2704 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2705 
2706 /* The stats groups order is opposite to the update_stats() order calls */
2707 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2708 	&MLX5E_STATS_GRP(sw),
2709 	&MLX5E_STATS_GRP(qcnt),
2710 	&MLX5E_STATS_GRP(vnic_env),
2711 	&MLX5E_STATS_GRP(vport),
2712 	&MLX5E_STATS_GRP(802_3),
2713 	&MLX5E_STATS_GRP(2863),
2714 	&MLX5E_STATS_GRP(2819),
2715 	&MLX5E_STATS_GRP(phy),
2716 	&MLX5E_STATS_GRP(eth_ext),
2717 	&MLX5E_STATS_GRP(pcie),
2718 	&MLX5E_STATS_GRP(per_prio),
2719 	&MLX5E_STATS_GRP(pme),
2720 #ifdef CONFIG_MLX5_EN_IPSEC
2721 	&MLX5E_STATS_GRP(ipsec_hw),
2722 	&MLX5E_STATS_GRP(ipsec_sw),
2723 #endif
2724 	&MLX5E_STATS_GRP(tls),
2725 	&MLX5E_STATS_GRP(channels),
2726 	&MLX5E_STATS_GRP(per_port_buff_congest),
2727 	&MLX5E_STATS_GRP(ptp),
2728 	&MLX5E_STATS_GRP(qos),
2729 #ifdef CONFIG_MLX5_MACSEC
2730 	&MLX5E_STATS_GRP(macsec_hw),
2731 #endif
2732 	&MLX5E_STATS_GRP(pcie_cong),
2733 };
2734 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2735 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2736 {
2737 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2738 }
2739