xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/events.h"
34 #include "en.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 #include "en/port.h"
39 
40 #include <net/page_pool/helpers.h>
41 
mlx5e_ethtool_put_stat(u64 ** data,u64 val)42 void mlx5e_ethtool_put_stat(u64 **data, u64 val)
43 {
44 	*(*data)++ = val;
45 }
46 
stats_grps_num(struct mlx5e_priv * priv)47 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
48 {
49 	return !priv->profile->stats_grps_num ? 0 :
50 		priv->profile->stats_grps_num(priv);
51 }
52 
mlx5e_stats_total_num(struct mlx5e_priv * priv)53 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
54 {
55 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
56 	const unsigned int num_stats_grps = stats_grps_num(priv);
57 	unsigned int total = 0;
58 	int i;
59 
60 	for (i = 0; i < num_stats_grps; i++)
61 		total += stats_grps[i]->get_num_stats(priv);
62 
63 	return total;
64 }
65 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)66 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
67 {
68 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
69 	const unsigned int num_stats_grps = stats_grps_num(priv);
70 	int i;
71 
72 	for (i = num_stats_grps - 1; i >= 0; i--)
73 		if (stats_grps[i]->update_stats &&
74 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
75 			stats_grps[i]->update_stats(priv);
76 }
77 
mlx5e_stats_update(struct mlx5e_priv * priv)78 void mlx5e_stats_update(struct mlx5e_priv *priv)
79 {
80 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
81 	const unsigned int num_stats_grps = stats_grps_num(priv);
82 	int i;
83 
84 	for (i = num_stats_grps - 1; i >= 0; i--)
85 		if (stats_grps[i]->update_stats)
86 			stats_grps[i]->update_stats(priv);
87 }
88 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)89 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
90 {
91 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
92 	const unsigned int num_stats_grps = stats_grps_num(priv);
93 	int i;
94 
95 	for (i = 0; i < num_stats_grps; i++)
96 		stats_grps[i]->fill_stats(priv, &data);
97 }
98 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)99 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
100 {
101 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
102 	const unsigned int num_stats_grps = stats_grps_num(priv);
103 	int i;
104 
105 	for (i = 0; i < num_stats_grps; i++)
106 		stats_grps[i]->fill_strings(priv, &data);
107 }
108 
109 /* Concrete NIC Stats */
110 
111 static const struct counter_desc sw_stats_desc[] = {
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
116 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
121 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
122 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
124 
125 #ifdef CONFIG_MLX5_EN_TLS
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
127 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
132 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
133 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
135 #endif
136 
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
189 #ifdef CONFIG_MLX5_EN_ARFS
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) },
191 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) },
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
195 #endif
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
208 #ifdef CONFIG_MLX5_EN_TLS
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
219 #endif
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
223 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
224 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
225 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
226 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
227 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
228 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
229 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
230 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
231 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
232 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
233 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
234 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
235 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
236 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
237 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
238 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
239 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
240 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
241 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
242 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
243 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
244 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
245 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
246 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
247 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
248 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
249 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
250 };
251 
252 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
253 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)254 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
255 {
256 	return NUM_SW_COUNTERS;
257 }
258 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)259 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
260 {
261 	int i;
262 
263 	for (i = 0; i < NUM_SW_COUNTERS; i++)
264 		ethtool_puts(data, sw_stats_desc[i].format);
265 }
266 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)267 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
268 {
269 	int i;
270 
271 	for (i = 0; i < NUM_SW_COUNTERS; i++)
272 		mlx5e_ethtool_put_stat(data,
273 				       MLX5E_READ_CTR64_CPU(&priv->stats.sw,
274 							    sw_stats_desc, i));
275 }
276 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)277 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
278 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
279 {
280 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
281 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
282 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
283 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
284 	s->tx_xdp_full  += xdpsq_red_stats->full;
285 	s->tx_xdp_err   += xdpsq_red_stats->err;
286 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
287 }
288 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)289 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
290 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
291 {
292 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
293 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
294 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
295 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
296 	s->rx_xdp_tx_full  += xdpsq_stats->full;
297 	s->rx_xdp_tx_err   += xdpsq_stats->err;
298 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
299 }
300 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)301 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
302 						  struct mlx5e_xdpsq_stats *xsksq_stats)
303 {
304 	s->tx_xsk_xmit  += xsksq_stats->xmit;
305 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
306 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
307 	s->tx_xsk_full  += xsksq_stats->full;
308 	s->tx_xsk_err   += xsksq_stats->err;
309 	s->tx_xsk_cqes  += xsksq_stats->cqes;
310 }
311 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)312 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
313 						  struct mlx5e_rq_stats *xskrq_stats)
314 {
315 	s->rx_xsk_packets                += xskrq_stats->packets;
316 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
317 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
318 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
319 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
320 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
321 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
322 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
323 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
324 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
325 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
326 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
327 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
328 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
329 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
330 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
331 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
332 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
333 }
334 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)335 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
336 						     struct mlx5e_rq_stats *rq_stats)
337 {
338 	s->rx_packets                 += rq_stats->packets;
339 	s->rx_bytes                   += rq_stats->bytes;
340 	s->rx_lro_packets             += rq_stats->lro_packets;
341 	s->rx_lro_bytes               += rq_stats->lro_bytes;
342 	s->rx_gro_packets             += rq_stats->gro_packets;
343 	s->rx_gro_bytes               += rq_stats->gro_bytes;
344 	s->rx_gro_skbs                += rq_stats->gro_skbs;
345 	s->rx_gro_large_hds           += rq_stats->gro_large_hds;
346 	s->rx_hds_nodata_packets      += rq_stats->hds_nodata_packets;
347 	s->rx_hds_nodata_bytes        += rq_stats->hds_nodata_bytes;
348 	s->rx_hds_nosplit_packets     += rq_stats->hds_nosplit_packets;
349 	s->rx_hds_nosplit_bytes       += rq_stats->hds_nosplit_bytes;
350 	s->rx_ecn_mark                += rq_stats->ecn_mark;
351 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
352 	s->rx_csum_none               += rq_stats->csum_none;
353 	s->rx_csum_complete           += rq_stats->csum_complete;
354 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
355 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
356 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
357 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
358 	s->rx_xdp_drop                += rq_stats->xdp_drop;
359 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
360 	s->rx_wqe_err                 += rq_stats->wqe_err;
361 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
362 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
363 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
364 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
365 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
366 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
367 	s->rx_congst_umr              += rq_stats->congst_umr;
368 #ifdef CONFIG_MLX5_EN_ARFS
369 	s->rx_arfs_add                += rq_stats->arfs_add;
370 	s->rx_arfs_request_in         += rq_stats->arfs_request_in;
371 	s->rx_arfs_request_out        += rq_stats->arfs_request_out;
372 	s->rx_arfs_expired            += rq_stats->arfs_expired;
373 	s->rx_arfs_err                += rq_stats->arfs_err;
374 #endif
375 	s->rx_recover                 += rq_stats->recover;
376 	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
377 	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
378 	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
379 	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
380 	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
381 	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
382 	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
383 	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
384 	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
385 	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
386 	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
387 #ifdef CONFIG_MLX5_EN_TLS
388 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
389 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
390 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
391 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
392 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
393 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
394 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
395 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
396 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
397 	s->rx_tls_err                 += rq_stats->tls_err;
398 #endif
399 }
400 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)401 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
402 						     struct mlx5e_ch_stats *ch_stats)
403 {
404 	s->ch_events      += ch_stats->events;
405 	s->ch_poll        += ch_stats->poll;
406 	s->ch_arm         += ch_stats->arm;
407 	s->ch_aff_change  += ch_stats->aff_change;
408 	s->ch_force_irq   += ch_stats->force_irq;
409 	s->ch_eq_rearm    += ch_stats->eq_rearm;
410 }
411 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)412 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
413 					       struct mlx5e_sq_stats *sq_stats)
414 {
415 	s->tx_packets               += sq_stats->packets;
416 	s->tx_bytes                 += sq_stats->bytes;
417 	s->tx_tso_packets           += sq_stats->tso_packets;
418 	s->tx_tso_bytes             += sq_stats->tso_bytes;
419 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
420 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
421 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
422 	s->tx_nop                   += sq_stats->nop;
423 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
424 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
425 	s->tx_queue_stopped         += sq_stats->stopped;
426 	s->tx_queue_wake            += sq_stats->wake;
427 	s->tx_queue_dropped         += sq_stats->dropped;
428 	s->tx_cqe_err               += sq_stats->cqe_err;
429 	s->tx_recover               += sq_stats->recover;
430 	s->tx_xmit_more             += sq_stats->xmit_more;
431 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
432 	s->tx_csum_none             += sq_stats->csum_none;
433 	s->tx_csum_partial          += sq_stats->csum_partial;
434 #ifdef CONFIG_MLX5_EN_TLS
435 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
436 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
437 	s->tx_tls_ooo               += sq_stats->tls_ooo;
438 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
439 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
440 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
441 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
442 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
443 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
444 #endif
445 	s->tx_cqes                  += sq_stats->cqes;
446 }
447 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)448 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
449 						struct mlx5e_sw_stats *s)
450 {
451 	int i;
452 
453 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
454 		return;
455 
456 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
457 
458 	if (priv->tx_ptp_opened) {
459 		for (i = 0; i < priv->max_opened_tc; i++) {
460 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
461 
462 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
463 			barrier();
464 		}
465 	}
466 	if (priv->rx_ptp_opened) {
467 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
468 
469 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
470 		barrier();
471 	}
472 }
473 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)474 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
475 						struct mlx5e_sw_stats *s)
476 {
477 	struct mlx5e_sq_stats **stats;
478 	u16 max_qos_sqs;
479 	int i;
480 
481 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
482 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
483 	stats = READ_ONCE(priv->htb_qos_sq_stats);
484 
485 	for (i = 0; i < max_qos_sqs; i++) {
486 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
487 
488 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
489 		barrier();
490 	}
491 }
492 
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)493 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
494 {
495 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
496 	struct page_pool *pool = c->rq.page_pool;
497 	struct page_pool_stats stats = { 0 };
498 
499 	if (!page_pool_get_stats(pool, &stats))
500 		return;
501 
502 	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
503 	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
504 	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
505 	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
506 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
507 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
508 
509 	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
510 	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
511 	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
512 	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
513 	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
514 }
515 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)516 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
517 {
518 	struct mlx5e_sw_stats *s = &priv->stats.sw;
519 	int i;
520 
521 	memset(s, 0, sizeof(*s));
522 
523 	for (i = 0; i < priv->channels.num; i++) /* for active channels only */
524 		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
525 
526 	for (i = 0; i < priv->stats_nch; i++) {
527 		struct mlx5e_channel_stats *channel_stats =
528 			priv->channel_stats[i];
529 
530 		int j;
531 
532 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
533 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
534 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
535 		/* xdp redirect */
536 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
537 		/* AF_XDP zero-copy */
538 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
539 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
540 
541 		for (j = 0; j < priv->max_opened_tc; j++) {
542 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
543 
544 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
545 			barrier();
546 		}
547 	}
548 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
549 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
550 }
551 
552 static const struct counter_desc q_stats_desc[] = {
553 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
554 };
555 
556 static const struct counter_desc drop_rq_stats_desc[] = {
557 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
558 };
559 
560 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
561 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
562 
q_counter_any(struct mlx5e_priv * priv)563 static bool q_counter_any(struct mlx5e_priv *priv)
564 {
565 	struct mlx5_core_dev *pos;
566 	int i;
567 
568 	mlx5_sd_for_each_dev(i, priv->mdev, pos)
569 		if (priv->q_counter[i++])
570 			return true;
571 
572 	return false;
573 }
574 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)575 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
576 {
577 	int num_stats = 0;
578 
579 	if (q_counter_any(priv))
580 		num_stats += NUM_Q_COUNTERS;
581 
582 	if (priv->drop_rq_q_counter)
583 		num_stats += NUM_DROP_RQ_COUNTERS;
584 
585 	return num_stats;
586 }
587 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)588 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
589 {
590 	int i;
591 
592 	for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
593 		ethtool_puts(data, q_stats_desc[i].format);
594 
595 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
596 		ethtool_puts(data, drop_rq_stats_desc[i].format);
597 }
598 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)599 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
600 {
601 	int i;
602 
603 	for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
604 		mlx5e_ethtool_put_stat(data,
605 				       MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
606 							    q_stats_desc, i));
607 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
608 		mlx5e_ethtool_put_stat(
609 			data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
610 						   drop_rq_stats_desc, i));
611 }
612 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)613 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
614 {
615 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
616 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
617 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
618 	struct mlx5_core_dev *pos;
619 	u32 rx_out_of_buffer = 0;
620 	int ret, i;
621 
622 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
623 
624 	mlx5_sd_for_each_dev(i, priv->mdev, pos) {
625 		if (priv->q_counter[i]) {
626 			MLX5_SET(query_q_counter_in, in, counter_set_id,
627 				 priv->q_counter[i]);
628 			ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
629 			if (!ret)
630 				rx_out_of_buffer += MLX5_GET(query_q_counter_out,
631 							     out, out_of_buffer);
632 		}
633 	}
634 	qcnt->rx_out_of_buffer = rx_out_of_buffer;
635 
636 	if (priv->drop_rq_q_counter) {
637 		MLX5_SET(query_q_counter_in, in, counter_set_id,
638 			 priv->drop_rq_q_counter);
639 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
640 		if (!ret)
641 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
642 							    out, out_of_buffer);
643 	}
644 }
645 
646 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
647 static const struct counter_desc vnic_env_stats_steer_desc[] = {
648 	{ "rx_steer_missed_packets",
649 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
650 };
651 
652 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
653 	{ "dev_internal_queue_oob",
654 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
655 };
656 
657 static const struct counter_desc vnic_env_stats_drop_desc[] = {
658 	{ "rx_oversize_pkts_buffer",
659 		VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
660 };
661 
662 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
663 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
664 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
665 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
666 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
667 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
668 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
669 	(MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
670 	 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
671 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)672 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
673 {
674 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
675 	       NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
676 	       NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
677 }
678 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)679 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
680 {
681 	int i;
682 
683 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
684 		ethtool_puts(data, vnic_env_stats_steer_desc[i].format);
685 
686 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
687 		ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format);
688 
689 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
690 		ethtool_puts(data, vnic_env_stats_drop_desc[i].format);
691 }
692 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)693 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
694 {
695 	int i;
696 
697 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
698 		mlx5e_ethtool_put_stat(
699 			data,
700 			MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
701 					    vnic_env_stats_steer_desc, i));
702 
703 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
704 		mlx5e_ethtool_put_stat(
705 			data,
706 			MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
707 					    vnic_env_stats_dev_oob_desc, i));
708 
709 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
710 		mlx5e_ethtool_put_stat(
711 			data,
712 			MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
713 					    vnic_env_stats_drop_desc, i));
714 }
715 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)716 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
717 {
718 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
719 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
720 	struct mlx5_core_dev *mdev = priv->mdev;
721 
722 	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
723 		return;
724 
725 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
726 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
727 }
728 
729 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
730 static const struct counter_desc vport_stats_desc[] = {
731 	{ "rx_vport_unicast_packets",
732 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
733 	{ "rx_vport_unicast_bytes",
734 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
735 	{ "tx_vport_unicast_packets",
736 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
737 	{ "tx_vport_unicast_bytes",
738 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
739 	{ "rx_vport_multicast_packets",
740 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
741 	{ "rx_vport_multicast_bytes",
742 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
743 	{ "tx_vport_multicast_packets",
744 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
745 	{ "tx_vport_multicast_bytes",
746 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
747 	{ "rx_vport_broadcast_packets",
748 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
749 	{ "rx_vport_broadcast_bytes",
750 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
751 	{ "tx_vport_broadcast_packets",
752 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
753 	{ "tx_vport_broadcast_bytes",
754 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
755 	{ "rx_vport_rdma_unicast_packets",
756 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
757 	{ "rx_vport_rdma_unicast_bytes",
758 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
759 	{ "tx_vport_rdma_unicast_packets",
760 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
761 	{ "tx_vport_rdma_unicast_bytes",
762 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
763 	{ "rx_vport_rdma_multicast_packets",
764 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
765 	{ "rx_vport_rdma_multicast_bytes",
766 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
767 	{ "tx_vport_rdma_multicast_packets",
768 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
769 	{ "tx_vport_rdma_multicast_bytes",
770 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
771 };
772 
773 static const struct counter_desc vport_loopback_stats_desc[] = {
774 	{ "vport_loopback_packets",
775 		VPORT_COUNTER_OFF(local_loopback.packets) },
776 	{ "vport_loopback_bytes",
777 		VPORT_COUNTER_OFF(local_loopback.octets) },
778 };
779 
780 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
781 #define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
782 	(MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
783 	 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
784 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)785 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
786 {
787 	return NUM_VPORT_COUNTERS +
788 		NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
789 }
790 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)791 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
792 {
793 	int i;
794 
795 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
796 		ethtool_puts(data, vport_stats_desc[i].format);
797 
798 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
799 		ethtool_puts(data, vport_loopback_stats_desc[i].format);
800 }
801 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)802 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
803 {
804 	int i;
805 
806 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
807 		mlx5e_ethtool_put_stat(
808 			data,
809 			MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
810 					    vport_stats_desc, i));
811 
812 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
813 		mlx5e_ethtool_put_stat(
814 			data,
815 			MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
816 					    vport_loopback_stats_desc, i));
817 }
818 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)819 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
820 {
821 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
822 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
823 	struct mlx5_core_dev *mdev = priv->mdev;
824 
825 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
826 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
827 }
828 
829 #define PPORT_802_3_OFF(c) \
830 	MLX5_BYTE_OFF(ppcnt_reg, \
831 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
832 static const struct counter_desc pport_802_3_stats_desc[] = {
833 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
834 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
835 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
836 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
837 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
838 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
839 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
840 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
841 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
842 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
843 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
844 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
845 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
846 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
847 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
848 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
849 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
850 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
851 };
852 
853 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
854 
855 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
856 {
857 	return NUM_PPORT_802_3_COUNTERS;
858 }
859 
860 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
861 {
862 	int i;
863 
864 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
865 		ethtool_puts(data, pport_802_3_stats_desc[i].format);
866 }
867 
868 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
869 {
870 	int i;
871 
872 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
873 		mlx5e_ethtool_put_stat(
874 			data, MLX5E_READ_CTR64_BE(
875 				      &priv->stats.pport.IEEE_802_3_counters,
876 				      pport_802_3_stats_desc, i));
877 }
878 
879 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
880 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
881 
882 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
883 {
884 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
885 	struct mlx5_core_dev *mdev = priv->mdev;
886 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
887 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
888 	void *out;
889 
890 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
891 		return;
892 
893 	MLX5_SET(ppcnt_reg, in, local_port, 1);
894 	out = pstats->IEEE_802_3_counters;
895 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
896 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
897 }
898 
899 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
900 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
901 		MLX5_BYTE_OFF(ppcnt_reg,		\
902 			      counter_set.set.c##_high)))
903 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)904 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
905 				u32 *ppcnt_ieee_802_3)
906 {
907 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
908 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
909 
910 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
911 		return -EOPNOTSUPP;
912 
913 	MLX5_SET(ppcnt_reg, in, local_port, 1);
914 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
915 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
916 				    sz, MLX5_REG_PPCNT, 0, 0);
917 }
918 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)919 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
920 			   struct ethtool_pause_stats *pause_stats)
921 {
922 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
923 	struct mlx5_core_dev *mdev = priv->mdev;
924 
925 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
926 		return;
927 
928 	pause_stats->tx_pause_frames =
929 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
930 				      eth_802_3_cntrs_grp_data_layout,
931 				      a_pause_mac_ctrl_frames_transmitted);
932 	pause_stats->rx_pause_frames =
933 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
934 				      eth_802_3_cntrs_grp_data_layout,
935 				      a_pause_mac_ctrl_frames_received);
936 }
937 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)938 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
939 			     struct ethtool_eth_phy_stats *phy_stats)
940 {
941 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
942 	struct mlx5_core_dev *mdev = priv->mdev;
943 
944 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
945 		return;
946 
947 	phy_stats->SymbolErrorDuringCarrier =
948 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
949 				      eth_802_3_cntrs_grp_data_layout,
950 				      a_symbol_error_during_carrier);
951 }
952 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)953 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
954 			     struct ethtool_eth_mac_stats *mac_stats)
955 {
956 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
957 	struct mlx5_core_dev *mdev = priv->mdev;
958 
959 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
960 		return;
961 
962 #define RD(name)							\
963 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
964 			      eth_802_3_cntrs_grp_data_layout,		\
965 			      name)
966 
967 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
968 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
969 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
970 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
971 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
972 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
973 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
974 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
975 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
976 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
977 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
978 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
979 #undef RD
980 }
981 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)982 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
983 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
984 {
985 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
986 	struct mlx5_core_dev *mdev = priv->mdev;
987 
988 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
989 		return;
990 
991 	ctrl_stats->MACControlFramesTransmitted =
992 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
993 				      eth_802_3_cntrs_grp_data_layout,
994 				      a_mac_control_frames_transmitted);
995 	ctrl_stats->MACControlFramesReceived =
996 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
997 				      eth_802_3_cntrs_grp_data_layout,
998 				      a_mac_control_frames_received);
999 	ctrl_stats->UnsupportedOpcodesReceived =
1000 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1001 				      eth_802_3_cntrs_grp_data_layout,
1002 				      a_unsupported_opcodes_received);
1003 }
1004 
1005 #define PPORT_2863_OFF(c) \
1006 	MLX5_BYTE_OFF(ppcnt_reg, \
1007 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
1008 static const struct counter_desc pport_2863_stats_desc[] = {
1009 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
1010 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
1011 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
1012 };
1013 
1014 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
1015 
1016 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
1017 {
1018 	return NUM_PPORT_2863_COUNTERS;
1019 }
1020 
1021 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
1022 {
1023 	int i;
1024 
1025 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1026 		ethtool_puts(data, pport_2863_stats_desc[i].format);
1027 }
1028 
1029 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
1030 {
1031 	int i;
1032 
1033 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1034 		mlx5e_ethtool_put_stat(
1035 			data, MLX5E_READ_CTR64_BE(
1036 				      &priv->stats.pport.RFC_2863_counters,
1037 				      pport_2863_stats_desc, i));
1038 }
1039 
1040 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1041 {
1042 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1043 	struct mlx5_core_dev *mdev = priv->mdev;
1044 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1045 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1046 	void *out;
1047 
1048 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1049 	out = pstats->RFC_2863_counters;
1050 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1051 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1052 }
1053 
1054 #define PPORT_2819_OFF(c) \
1055 	MLX5_BYTE_OFF(ppcnt_reg, \
1056 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1057 static const struct counter_desc pport_2819_stats_desc[] = {
1058 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1059 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1060 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1061 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1062 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1063 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1064 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1065 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1066 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1067 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1068 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1069 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1070 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1071 };
1072 
1073 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
1074 
1075 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1076 {
1077 	return NUM_PPORT_2819_COUNTERS;
1078 }
1079 
1080 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1081 {
1082 	int i;
1083 
1084 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1085 		ethtool_puts(data, pport_2819_stats_desc[i].format);
1086 }
1087 
1088 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1089 {
1090 	int i;
1091 
1092 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1093 		mlx5e_ethtool_put_stat(
1094 			data, MLX5E_READ_CTR64_BE(
1095 				      &priv->stats.pport.RFC_2819_counters,
1096 				      pport_2819_stats_desc, i));
1097 }
1098 
1099 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1100 {
1101 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1102 	struct mlx5_core_dev *mdev = priv->mdev;
1103 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1104 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1105 	void *out;
1106 
1107 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1108 		return;
1109 
1110 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1111 	out = pstats->RFC_2819_counters;
1112 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1113 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1114 }
1115 
1116 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1117 	{    0,    64 },
1118 	{   65,   127 },
1119 	{  128,   255 },
1120 	{  256,   511 },
1121 	{  512,  1023 },
1122 	{ 1024,  1518 },
1123 	{ 1519,  2047 },
1124 	{ 2048,  4095 },
1125 	{ 4096,  8191 },
1126 	{ 8192, 10239 },
1127 	{}
1128 };
1129 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1130 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1131 			  struct ethtool_rmon_stats *rmon,
1132 			  const struct ethtool_rmon_hist_range **ranges)
1133 {
1134 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1135 	struct mlx5_core_dev *mdev = priv->mdev;
1136 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1137 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1138 
1139 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1140 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1141 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1142 				 sz, MLX5_REG_PPCNT, 0, 0))
1143 		return;
1144 
1145 #define RD(name)						\
1146 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1147 			      eth_2819_cntrs_grp_data_layout,	\
1148 			      name)
1149 
1150 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1151 	rmon->fragments		= RD(ether_stats_fragments);
1152 	rmon->jabbers		= RD(ether_stats_jabbers);
1153 
1154 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1155 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1156 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1157 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1158 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1159 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1160 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1161 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1162 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1163 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1164 #undef RD
1165 
1166 	*ranges = mlx5e_rmon_ranges;
1167 }
1168 
mlx5e_stats_ts_get(struct mlx5e_priv * priv,struct ethtool_ts_stats * ts_stats)1169 void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
1170 			struct ethtool_ts_stats *ts_stats)
1171 {
1172 	int i, j;
1173 
1174 	mutex_lock(&priv->state_lock);
1175 
1176 	if (priv->tx_ptp_opened) {
1177 		struct mlx5e_ptp *ptp = priv->channels.ptp;
1178 
1179 		ts_stats->pkts = 0;
1180 		ts_stats->err = 0;
1181 		ts_stats->lost = 0;
1182 
1183 		if (!ptp)
1184 			goto out;
1185 
1186 		/* Aggregate stats across all TCs */
1187 		for (i = 0; i < ptp->num_tc; i++) {
1188 			struct mlx5e_ptp_cq_stats *stats =
1189 				ptp->ptpsq[i].cq_stats;
1190 
1191 			ts_stats->pkts += stats->cqe;
1192 			ts_stats->err += stats->abort + stats->err_cqe +
1193 				stats->late_cqe;
1194 			ts_stats->lost += stats->lost_cqe;
1195 		}
1196 	} else {
1197 		/* DMA layer will always successfully timestamp packets. Other
1198 		 * counters do not make sense for this layer.
1199 		 */
1200 		ts_stats->pkts = 0;
1201 
1202 		/* Aggregate stats across all SQs */
1203 		for (j = 0; j < priv->channels.num; j++) {
1204 			struct mlx5e_channel *c = priv->channels.c[j];
1205 
1206 			for (i = 0; i < c->num_tc; i++) {
1207 				struct mlx5e_sq_stats *stats = c->sq[i].stats;
1208 
1209 				ts_stats->pkts += stats->timestamps;
1210 			}
1211 		}
1212 	}
1213 
1214 out:
1215 	mutex_unlock(&priv->state_lock);
1216 }
1217 
1218 #define PPORT_PHY_LAYER_OFF(c) \
1219 	MLX5_BYTE_OFF(ppcnt_reg, \
1220 		      counter_set.phys_layer_cntrs.c)
1221 static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
1222 	{ "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
1223 };
1224 
1225 #define PPORT_PHY_STATISTICAL_OFF(c) \
1226 	MLX5_BYTE_OFF(ppcnt_reg, \
1227 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1228 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1229 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1230 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1231 };
1232 
1233 static const struct counter_desc
1234 pport_phy_statistical_err_lanes_stats_desc[] = {
1235 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1236 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1237 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1238 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1239 };
1240 
1241 #define PPORT_PHY_RECOVERY_OFF(c) \
1242 	MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
1243 static const struct counter_desc
1244 pport_phy_recovery_cntrs_stats_desc[] = {
1245 	{ "total_success_recovery_phy",
1246 	  PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
1247 };
1248 
1249 #define NUM_PPORT_PHY_LAYER_COUNTERS \
1250 	ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
1251 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1252 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1253 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1254 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1255 #define NUM_PPORT_PHY_RECOVERY_COUNTERS \
1256 	ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
1257 
1258 #define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
1259 	(MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
1260 	NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
1261 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
1262 	(MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
1263 	NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
1264 #define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
1265 	(MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
1266 	NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
1267 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1268 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1269 {
1270 	struct mlx5_core_dev *mdev = priv->mdev;
1271 	int num_stats;
1272 
1273 	num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
1274 
1275 	num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
1276 
1277 	num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
1278 
1279 	num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
1280 	return num_stats;
1281 }
1282 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1283 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1284 {
1285 	struct mlx5_core_dev *mdev = priv->mdev;
1286 	int i;
1287 
1288 	for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
1289 		ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
1290 
1291 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
1292 		ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
1293 
1294 	for (i = 0;
1295 	     i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
1296 	     i++)
1297 		ethtool_puts(data,
1298 			     pport_phy_statistical_err_lanes_stats_desc[i]
1299 			     .format);
1300 
1301 	for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
1302 		ethtool_puts(data,
1303 			     pport_phy_recovery_cntrs_stats_desc[i].format);
1304 }
1305 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1306 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1307 {
1308 	struct mlx5_core_dev *mdev = priv->mdev;
1309 	int i;
1310 
1311 	for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
1312 		mlx5e_ethtool_put_stat(
1313 				data,
1314 				MLX5E_READ_CTR32_BE(&priv->stats.pport
1315 					.phy_counters,
1316 					pport_phy_layer_cntrs_stats_desc, i));
1317 
1318 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
1319 		mlx5e_ethtool_put_stat(
1320 			data,
1321 			MLX5E_READ_CTR64_BE(
1322 				&priv->stats.pport.phy_statistical_counters,
1323 				pport_phy_statistical_stats_desc, i));
1324 
1325 	for (i = 0;
1326 	     i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
1327 	     i++)
1328 		mlx5e_ethtool_put_stat(
1329 			data,
1330 			MLX5E_READ_CTR64_BE(
1331 				&priv->stats.pport.phy_statistical_counters,
1332 				pport_phy_statistical_err_lanes_stats_desc, i));
1333 
1334 	for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
1335 		mlx5e_ethtool_put_stat(
1336 			data,
1337 			MLX5E_READ_CTR32_BE(
1338 				&priv->stats.pport.phy_recovery_counters,
1339 				pport_phy_recovery_cntrs_stats_desc, i));
1340 }
1341 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1342 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1343 {
1344 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1345 	struct mlx5_core_dev *mdev = priv->mdev;
1346 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1347 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1348 	void *out;
1349 
1350 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1351 	out = pstats->phy_counters;
1352 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1353 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1354 
1355 	if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
1356 		out = pstats->phy_statistical_counters;
1357 		MLX5_SET(ppcnt_reg, in, grp,
1358 			 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1359 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
1360 				     0);
1361 	}
1362 
1363 	if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
1364 		out = pstats->phy_recovery_counters;
1365 		MLX5_SET(ppcnt_reg, in, grp,
1366 			 MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
1367 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
1368 				     0);
1369 	}
1370 }
1371 
mlx5e_get_link_ext_stats(struct net_device * dev,struct ethtool_link_ext_stats * stats)1372 void mlx5e_get_link_ext_stats(struct net_device *dev,
1373 			      struct ethtool_link_ext_stats *stats)
1374 {
1375 	struct mlx5e_priv *priv = netdev_priv(dev);
1376 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1377 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1378 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1379 
1380 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1381 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1382 	mlx5_core_access_reg(priv->mdev, in, sz, out,
1383 			     MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1384 
1385 	stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1386 					   counter_set.phys_layer_cntrs.link_down_events);
1387 }
1388 
fec_num_lanes(struct mlx5_core_dev * dev)1389 static int fec_num_lanes(struct mlx5_core_dev *dev)
1390 {
1391 	u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1392 	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1393 	int err;
1394 
1395 	MLX5_SET(pmlp_reg, in, local_port, 1);
1396 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1397 				   MLX5_REG_PMLP, 0, 0);
1398 	if (err)
1399 		return 0;
1400 
1401 	return MLX5_GET(pmlp_reg, out, width);
1402 }
1403 
fec_active_mode(struct mlx5_core_dev * mdev)1404 static int fec_active_mode(struct mlx5_core_dev *mdev)
1405 {
1406 	unsigned long fec_active_long;
1407 	u32 fec_active;
1408 
1409 	if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1410 		return MLX5E_FEC_NOFEC;
1411 
1412 	fec_active_long = fec_active;
1413 	return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1414 }
1415 
1416 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1417 	fec_stats->corrected_blocks.lanes[(idx)] = \
1418 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1419 				      fc_fec_corrected_blocks_lane##idx); \
1420 	fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1421 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1422 				      fc_fec_uncorrectable_blocks_lane##idx); \
1423 })
1424 
fec_set_fc_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt,u8 lanes)1425 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1426 			     u32 *ppcnt, u8 lanes)
1427 {
1428 	if (lanes > 3) { /* 4 lanes */
1429 		MLX5E_STATS_SET_FEC_BLOCK(3);
1430 		MLX5E_STATS_SET_FEC_BLOCK(2);
1431 	}
1432 	if (lanes > 1) /* 2 lanes */
1433 		MLX5E_STATS_SET_FEC_BLOCK(1);
1434 	if (lanes > 0) /* 1 lane */
1435 		MLX5E_STATS_SET_FEC_BLOCK(0);
1436 }
1437 
fec_set_rs_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt)1438 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1439 {
1440 	fec_stats->corrected_blocks.total =
1441 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1442 				      rs_fec_corrected_blocks);
1443 	fec_stats->uncorrectable_blocks.total =
1444 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1445 				      rs_fec_uncorrectable_blocks);
1446 }
1447 
fec_set_block_stats(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1448 static void fec_set_block_stats(struct mlx5e_priv *priv,
1449 				struct ethtool_fec_stats *fec_stats)
1450 {
1451 	struct mlx5_core_dev *mdev = priv->mdev;
1452 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1453 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1454 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1455 	int mode = fec_active_mode(mdev);
1456 
1457 	if (mode == MLX5E_FEC_NOFEC)
1458 		return;
1459 
1460 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1461 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1462 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1463 		return;
1464 
1465 	switch (mode) {
1466 	case MLX5E_FEC_RS_528_514:
1467 	case MLX5E_FEC_RS_544_514:
1468 	case MLX5E_FEC_LLRS_272_257_1:
1469 		fec_set_rs_stats(fec_stats, out);
1470 		return;
1471 	case MLX5E_FEC_FIRECODE:
1472 		fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1473 	}
1474 }
1475 
fec_set_corrected_bits_total(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1476 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1477 					 struct ethtool_fec_stats *fec_stats)
1478 {
1479 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1480 	struct mlx5_core_dev *mdev = priv->mdev;
1481 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1482 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1483 
1484 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1485 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1486 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1487 				 sz, MLX5_REG_PPCNT, 0, 0))
1488 		return;
1489 
1490 	fec_stats->corrected_bits.total =
1491 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1492 				      phys_layer_statistical_cntrs,
1493 				      phy_corrected_bits);
1494 }
1495 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1496 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1497 			 struct ethtool_fec_stats *fec_stats)
1498 {
1499 	if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1500 		return;
1501 
1502 	fec_set_corrected_bits_total(priv, fec_stats);
1503 	fec_set_block_stats(priv, fec_stats);
1504 }
1505 
1506 #define PPORT_ETH_EXT_OFF(c) \
1507 	MLX5_BYTE_OFF(ppcnt_reg, \
1508 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1509 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1510 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1511 };
1512 
1513 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1514 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1515 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1516 {
1517 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1518 		return NUM_PPORT_ETH_EXT_COUNTERS;
1519 
1520 	return 0;
1521 }
1522 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1523 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1524 {
1525 	int i;
1526 
1527 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1528 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1529 			ethtool_puts(data, pport_eth_ext_stats_desc[i].format);
1530 }
1531 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1532 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1533 {
1534 	int i;
1535 
1536 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1537 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1538 			mlx5e_ethtool_put_stat(
1539 				data,
1540 				MLX5E_READ_CTR64_BE(
1541 					&priv->stats.pport.eth_ext_counters,
1542 					pport_eth_ext_stats_desc, i));
1543 }
1544 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1545 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1546 {
1547 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1548 	struct mlx5_core_dev *mdev = priv->mdev;
1549 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1550 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1551 	void *out;
1552 
1553 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1554 		return;
1555 
1556 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1557 	out = pstats->eth_ext_counters;
1558 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1559 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1560 }
1561 
1562 #define PCIE_PERF_OFF(c) \
1563 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1564 static const struct counter_desc pcie_perf_stats_desc[] = {
1565 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1566 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1567 };
1568 
1569 #define PCIE_PERF_OFF64(c) \
1570 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1571 static const struct counter_desc pcie_perf_stats_desc64[] = {
1572 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1573 };
1574 
1575 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1576 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1577 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1578 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1579 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1580 };
1581 
1582 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1583 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1584 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1585 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1586 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1587 {
1588 	int num_stats = 0;
1589 
1590 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1591 		num_stats += NUM_PCIE_PERF_COUNTERS;
1592 
1593 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1594 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1595 
1596 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1597 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1598 
1599 	return num_stats;
1600 }
1601 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1602 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1603 {
1604 	int i;
1605 
1606 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1607 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1608 			ethtool_puts(data, pcie_perf_stats_desc[i].format);
1609 
1610 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1611 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1612 			ethtool_puts(data, pcie_perf_stats_desc64[i].format);
1613 
1614 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1615 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1616 			ethtool_puts(data,
1617 				     pcie_perf_stall_stats_desc[i].format);
1618 }
1619 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1620 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1621 {
1622 	int i;
1623 
1624 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1625 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1626 			mlx5e_ethtool_put_stat(
1627 				data,
1628 				MLX5E_READ_CTR32_BE(
1629 					&priv->stats.pcie.pcie_perf_counters,
1630 					pcie_perf_stats_desc, i));
1631 
1632 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1633 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1634 			mlx5e_ethtool_put_stat(
1635 				data,
1636 				MLX5E_READ_CTR64_BE(
1637 					&priv->stats.pcie.pcie_perf_counters,
1638 					pcie_perf_stats_desc64, i));
1639 
1640 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1641 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1642 			mlx5e_ethtool_put_stat(
1643 				data,
1644 				MLX5E_READ_CTR32_BE(
1645 					&priv->stats.pcie.pcie_perf_counters,
1646 					pcie_perf_stall_stats_desc, i));
1647 }
1648 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1649 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1650 {
1651 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1652 	struct mlx5_core_dev *mdev = priv->mdev;
1653 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1654 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1655 	void *out;
1656 
1657 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1658 		return;
1659 
1660 	out = pcie_stats->pcie_perf_counters;
1661 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1662 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1663 }
1664 
1665 #define PPORT_PER_TC_PRIO_OFF(c) \
1666 	MLX5_BYTE_OFF(ppcnt_reg, \
1667 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1668 
1669 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1670 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1671 };
1672 
1673 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1674 
1675 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1676 	MLX5_BYTE_OFF(ppcnt_reg, \
1677 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1678 
1679 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1680 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1681 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1682 };
1683 
1684 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1685 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1686 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1687 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1688 {
1689 	struct mlx5_core_dev *mdev = priv->mdev;
1690 
1691 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1692 		return 0;
1693 
1694 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1695 }
1696 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1697 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1698 {
1699 	struct mlx5_core_dev *mdev = priv->mdev;
1700 	int i, prio;
1701 
1702 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1703 		return;
1704 
1705 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1706 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1707 			ethtool_sprintf(data,
1708 					pport_per_tc_prio_stats_desc[i].format,
1709 					prio);
1710 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1711 			ethtool_sprintf(data,
1712 					pport_per_tc_congest_prio_stats_desc[i].format,
1713 					prio);
1714 	}
1715 }
1716 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1717 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1718 {
1719 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1720 	struct mlx5_core_dev *mdev = priv->mdev;
1721 	int i, prio;
1722 
1723 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1724 		return;
1725 
1726 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1727 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1728 			mlx5e_ethtool_put_stat(
1729 				data,
1730 				MLX5E_READ_CTR64_BE(
1731 					&pport->per_tc_prio_counters[prio],
1732 					pport_per_tc_prio_stats_desc, i));
1733 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1734 			mlx5e_ethtool_put_stat(
1735 				data,
1736 				MLX5E_READ_CTR64_BE(
1737 					&pport->per_tc_congest_prio_counters
1738 						 [prio],
1739 					pport_per_tc_congest_prio_stats_desc,
1740 					i));
1741 	}
1742 }
1743 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1744 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1745 {
1746 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1747 	struct mlx5_core_dev *mdev = priv->mdev;
1748 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1749 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1750 	void *out;
1751 	int prio;
1752 
1753 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1754 		return;
1755 
1756 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1757 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1758 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1759 		out = pstats->per_tc_prio_counters[prio];
1760 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1761 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1762 	}
1763 }
1764 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1765 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1766 {
1767 	struct mlx5_core_dev *mdev = priv->mdev;
1768 
1769 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1770 		return 0;
1771 
1772 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1773 }
1774 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1775 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1776 {
1777 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1778 	struct mlx5_core_dev *mdev = priv->mdev;
1779 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1780 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1781 	void *out;
1782 	int prio;
1783 
1784 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1785 		return;
1786 
1787 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1788 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1789 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1790 		out = pstats->per_tc_congest_prio_counters[prio];
1791 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1792 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1793 	}
1794 }
1795 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1796 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1797 {
1798 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1799 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1800 }
1801 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1802 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1803 {
1804 	mlx5e_grp_per_tc_prio_update_stats(priv);
1805 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1806 }
1807 
1808 #define PPORT_PER_PRIO_OFF(c) \
1809 	MLX5_BYTE_OFF(ppcnt_reg, \
1810 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1811 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1812 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1813 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1814 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1815 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1816 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1817 };
1818 
1819 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1820 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1821 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1822 {
1823 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1824 }
1825 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 ** data)1826 static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1827 						    u8 **data)
1828 {
1829 	int i, prio;
1830 
1831 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1832 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1833 			ethtool_sprintf(data,
1834 					pport_per_prio_traffic_stats_desc[i].format,
1835 					prio);
1836 	}
1837 }
1838 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 ** data)1839 static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1840 						  u64 **data)
1841 {
1842 	int i, prio;
1843 
1844 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1845 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1846 			mlx5e_ethtool_put_stat(
1847 				data,
1848 				MLX5E_READ_CTR64_BE(
1849 					&priv->stats.pport
1850 						 .per_prio_counters[prio],
1851 					pport_per_prio_traffic_stats_desc, i));
1852 	}
1853 }
1854 
1855 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1856 	/* %s is "global" or "prio{i}" */
1857 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1858 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1859 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1860 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1861 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1862 };
1863 
1864 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1865 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1866 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1867 };
1868 
1869 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1870 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1871 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1872 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1873 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1874 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1875 {
1876 	struct mlx5_core_dev *mdev = priv->mdev;
1877 	u8 pfc_en_tx;
1878 	u8 pfc_en_rx;
1879 	int err;
1880 
1881 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1882 		return 0;
1883 
1884 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1885 
1886 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1887 }
1888 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1889 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1890 {
1891 	struct mlx5_core_dev *mdev = priv->mdev;
1892 	u32 rx_pause;
1893 	u32 tx_pause;
1894 	int err;
1895 
1896 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1897 		return false;
1898 
1899 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1900 
1901 	return err ? false : rx_pause | tx_pause;
1902 }
1903 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1904 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1905 {
1906 	return (mlx5e_query_global_pause_combined(priv) +
1907 		hweight8(mlx5e_query_pfc_combined(priv))) *
1908 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1909 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1910 }
1911 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 ** data)1912 static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1913 						u8 **data)
1914 {
1915 	unsigned long pfc_combined;
1916 	int i, prio;
1917 
1918 	pfc_combined = mlx5e_query_pfc_combined(priv);
1919 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1920 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1921 			char pfc_string[ETH_GSTRING_LEN];
1922 
1923 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1924 			ethtool_sprintf(data,
1925 					pport_per_prio_pfc_stats_desc[i].format,
1926 					pfc_string);
1927 		}
1928 	}
1929 
1930 	if (mlx5e_query_global_pause_combined(priv)) {
1931 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1932 			ethtool_sprintf(data,
1933 					pport_per_prio_pfc_stats_desc[i].format,
1934 					"global");
1935 		}
1936 	}
1937 
1938 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1939 		ethtool_puts(data, pport_pfc_stall_stats_desc[i].format);
1940 }
1941 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 ** data)1942 static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1943 					      u64 **data)
1944 {
1945 	unsigned long pfc_combined;
1946 	int i, prio;
1947 
1948 	pfc_combined = mlx5e_query_pfc_combined(priv);
1949 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1950 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1951 			mlx5e_ethtool_put_stat(
1952 				data,
1953 				MLX5E_READ_CTR64_BE(
1954 					&priv->stats.pport
1955 						 .per_prio_counters[prio],
1956 					pport_per_prio_pfc_stats_desc, i));
1957 		}
1958 	}
1959 
1960 	if (mlx5e_query_global_pause_combined(priv)) {
1961 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1962 			mlx5e_ethtool_put_stat(
1963 				data,
1964 				MLX5E_READ_CTR64_BE(
1965 					&priv->stats.pport.per_prio_counters[0],
1966 					pport_per_prio_pfc_stats_desc, i));
1967 		}
1968 	}
1969 
1970 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1971 		mlx5e_ethtool_put_stat(
1972 			data, MLX5E_READ_CTR64_BE(
1973 				      &priv->stats.pport.per_prio_counters[0],
1974 				      pport_pfc_stall_stats_desc, i));
1975 }
1976 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1977 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1978 {
1979 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1980 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1981 }
1982 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1983 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1984 {
1985 	mlx5e_grp_per_prio_traffic_fill_strings(priv, data);
1986 	mlx5e_grp_per_prio_pfc_fill_strings(priv, data);
1987 }
1988 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1989 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1990 {
1991 	mlx5e_grp_per_prio_traffic_fill_stats(priv, data);
1992 	mlx5e_grp_per_prio_pfc_fill_stats(priv, data);
1993 }
1994 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1995 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1996 {
1997 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1998 	struct mlx5_core_dev *mdev = priv->mdev;
1999 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
2000 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
2001 	int prio;
2002 	void *out;
2003 
2004 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
2005 		return;
2006 
2007 	MLX5_SET(ppcnt_reg, in, local_port, 1);
2008 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
2009 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
2010 		out = pstats->per_prio_counters[prio];
2011 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
2012 		mlx5_core_access_reg(mdev, in, sz, out, sz,
2013 				     MLX5_REG_PPCNT, 0, 0);
2014 	}
2015 }
2016 
2017 static const struct counter_desc mlx5e_pme_status_desc[] = {
2018 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
2019 };
2020 
2021 static const struct counter_desc mlx5e_pme_error_desc[] = {
2022 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
2023 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
2024 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
2025 };
2026 
2027 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
2028 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
2029 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)2030 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
2031 {
2032 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
2033 }
2034 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)2035 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
2036 {
2037 	int i;
2038 
2039 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
2040 		ethtool_puts(data, mlx5e_pme_status_desc[i].format);
2041 
2042 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
2043 		ethtool_puts(data, mlx5e_pme_error_desc[i].format);
2044 }
2045 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)2046 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
2047 {
2048 	struct mlx5_pme_stats pme_stats;
2049 	int i;
2050 
2051 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
2052 
2053 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
2054 		mlx5e_ethtool_put_stat(
2055 			data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
2056 						   mlx5e_pme_status_desc, i));
2057 
2058 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
2059 		mlx5e_ethtool_put_stat(
2060 			data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
2061 						   mlx5e_pme_error_desc, i));
2062 }
2063 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)2064 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
2065 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)2066 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
2067 {
2068 	return mlx5e_ktls_get_count(priv);
2069 }
2070 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)2071 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
2072 {
2073 	mlx5e_ktls_get_strings(priv, data);
2074 }
2075 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)2076 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
2077 {
2078 	mlx5e_ktls_get_stats(priv, data);
2079 }
2080 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)2081 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
2082 
2083 static const struct counter_desc rq_stats_desc[] = {
2084 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
2085 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
2086 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
2087 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2088 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2089 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2090 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2091 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
2092 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
2093 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2094 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
2095 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
2096 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
2097 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
2098 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
2099 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
2100 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
2101 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
2102 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
2103 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
2104 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
2105 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2106 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
2107 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2108 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2109 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2110 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2111 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2112 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2113 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
2114 #ifdef CONFIG_MLX5_EN_ARFS
2115 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) },
2116 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) },
2117 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) },
2118 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) },
2119 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
2120 #endif
2121 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
2122 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
2123 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
2124 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
2125 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2126 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2127 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2128 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2129 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2130 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2131 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2132 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2133 #ifdef CONFIG_MLX5_EN_TLS
2134 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2135 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
2136 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2137 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2138 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2139 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2140 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
2141 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
2142 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2143 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2144 #endif
2145 };
2146 
2147 static const struct counter_desc sq_stats_desc[] = {
2148 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2149 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2150 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2151 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2152 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2153 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2154 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2155 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2156 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2157 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2158 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) },
2159 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2160 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2161 #ifdef CONFIG_MLX5_EN_TLS
2162 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2163 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2164 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2165 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2166 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2167 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2168 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2169 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2170 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2171 #endif
2172 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2173 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2174 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2175 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2176 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2177 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2178 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2179 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2180 };
2181 
2182 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2183 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2184 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2185 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2186 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2187 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2188 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2189 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2190 };
2191 
2192 static const struct counter_desc xdpsq_stats_desc[] = {
2193 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2194 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2195 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2196 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2197 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2198 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2199 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2200 };
2201 
2202 static const struct counter_desc xskrq_stats_desc[] = {
2203 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2204 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2205 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2206 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2207 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2208 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2209 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2210 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2211 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2212 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2213 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2214 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2215 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2216 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2217 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2218 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2219 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2220 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2221 };
2222 
2223 static const struct counter_desc xsksq_stats_desc[] = {
2224 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2225 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2226 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2227 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2228 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2229 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2230 };
2231 
2232 static const struct counter_desc ch_stats_desc[] = {
2233 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2234 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2235 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2236 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2237 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2238 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2239 };
2240 
2241 static const struct counter_desc ptp_sq_stats_desc[] = {
2242 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2243 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2244 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2245 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2246 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2247 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2248 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2249 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2250 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2251 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2252 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2253 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2254 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2255 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2256 };
2257 
2258 static const struct counter_desc ptp_ch_stats_desc[] = {
2259 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2260 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2261 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2262 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2263 };
2264 
2265 static const struct counter_desc ptp_cq_stats_desc[] = {
2266 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2267 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2268 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2269 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2270 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
2271 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) },
2272 };
2273 
2274 static const struct counter_desc ptp_rq_stats_desc[] = {
2275 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2276 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2277 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2278 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2279 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2280 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2281 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2282 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2283 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2284 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2285 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2286 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2287 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2288 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2289 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2290 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2291 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2292 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2293 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2294 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2295 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2296 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2297 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2298 };
2299 
2300 static const struct counter_desc qos_sq_stats_desc[] = {
2301 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2302 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2303 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2304 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2305 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2306 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2307 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2308 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2309 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2310 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2311 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) },
2312 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2313 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2314 #ifdef CONFIG_MLX5_EN_TLS
2315 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2316 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2317 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2318 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2319 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2320 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2321 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2322 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2323 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2324 #endif
2325 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2326 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2327 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2328 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2329 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2330 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2331 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2332 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2333 };
2334 
2335 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
2336 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
2337 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
2338 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
2339 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
2340 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
2341 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
2342 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
2343 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
2344 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
2345 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2346 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
2347 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)2348 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2349 {
2350 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2351 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2352 }
2353 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2354 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2355 {
2356 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2357 	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2358 	int i, qid;
2359 
2360 	for (qid = 0; qid < max_qos_sqs; qid++)
2361 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2362 			ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
2363 }
2364 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2365 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2366 {
2367 	struct mlx5e_sq_stats **stats;
2368 	u16 max_qos_sqs;
2369 	int i, qid;
2370 
2371 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2372 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2373 	stats = READ_ONCE(priv->htb_qos_sq_stats);
2374 
2375 	for (qid = 0; qid < max_qos_sqs; qid++) {
2376 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2377 
2378 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2379 			mlx5e_ethtool_put_stat(
2380 				data,
2381 				MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i));
2382 	}
2383 }
2384 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2385 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2386 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2387 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2388 {
2389 	int num = NUM_PTP_CH_STATS;
2390 
2391 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2392 		return 0;
2393 
2394 	if (priv->tx_ptp_opened)
2395 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2396 	if (priv->rx_ptp_opened)
2397 		num += NUM_PTP_RQ_STATS;
2398 
2399 	return num;
2400 }
2401 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2402 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2403 {
2404 	int i, tc;
2405 
2406 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2407 		return;
2408 
2409 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2410 		ethtool_puts(data, ptp_ch_stats_desc[i].format);
2411 
2412 	if (priv->tx_ptp_opened) {
2413 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2414 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2415 				ethtool_sprintf(data,
2416 						ptp_sq_stats_desc[i].format,
2417 						tc);
2418 
2419 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2420 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2421 				ethtool_sprintf(data,
2422 						ptp_cq_stats_desc[i].format,
2423 						tc);
2424 	}
2425 	if (priv->rx_ptp_opened) {
2426 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2427 			ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
2428 					MLX5E_PTP_CHANNEL_IX);
2429 	}
2430 }
2431 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2432 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2433 {
2434 	int i, tc;
2435 
2436 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2437 		return;
2438 
2439 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2440 		mlx5e_ethtool_put_stat(
2441 			data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2442 						   ptp_ch_stats_desc, i));
2443 
2444 	if (priv->tx_ptp_opened) {
2445 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2446 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2447 				mlx5e_ethtool_put_stat(
2448 					data, MLX5E_READ_CTR64_CPU(
2449 						      &priv->ptp_stats.sq[tc],
2450 						      ptp_sq_stats_desc, i));
2451 
2452 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2453 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2454 				mlx5e_ethtool_put_stat(
2455 					data, MLX5E_READ_CTR64_CPU(
2456 						      &priv->ptp_stats.cq[tc],
2457 						      ptp_cq_stats_desc, i));
2458 	}
2459 	if (priv->rx_ptp_opened) {
2460 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2461 			mlx5e_ethtool_put_stat(
2462 				data,
2463 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2464 						     ptp_rq_stats_desc, i));
2465 	}
2466 }
2467 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2468 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2469 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2470 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2471 {
2472 	int max_nch = priv->stats_nch;
2473 
2474 	return (NUM_RQ_STATS * max_nch) +
2475 	       (NUM_CH_STATS * max_nch) +
2476 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2477 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2478 	       (NUM_XDPSQ_STATS * max_nch) +
2479 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2480 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2481 }
2482 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2483 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2484 {
2485 	bool is_xsk = priv->xsk.ever_used;
2486 	int max_nch = priv->stats_nch;
2487 	int i, j, tc;
2488 
2489 	for (i = 0; i < max_nch; i++)
2490 		for (j = 0; j < NUM_CH_STATS; j++)
2491 			ethtool_sprintf(data, ch_stats_desc[j].format, i);
2492 
2493 	for (i = 0; i < max_nch; i++) {
2494 		for (j = 0; j < NUM_RQ_STATS; j++)
2495 			ethtool_sprintf(data, rq_stats_desc[j].format, i);
2496 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2497 			ethtool_sprintf(data, xskrq_stats_desc[j].format, i);
2498 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2499 			ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i);
2500 	}
2501 
2502 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2503 		for (i = 0; i < max_nch; i++)
2504 			for (j = 0; j < NUM_SQ_STATS; j++)
2505 				ethtool_sprintf(data, sq_stats_desc[j].format,
2506 						i + tc * max_nch);
2507 
2508 	for (i = 0; i < max_nch; i++) {
2509 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2510 			ethtool_sprintf(data, xsksq_stats_desc[j].format, i);
2511 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2512 			ethtool_sprintf(data, xdpsq_stats_desc[j].format, i);
2513 	}
2514 }
2515 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2516 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2517 {
2518 	bool is_xsk = priv->xsk.ever_used;
2519 	int max_nch = priv->stats_nch;
2520 	int i, j, tc;
2521 
2522 	for (i = 0; i < max_nch; i++)
2523 		for (j = 0; j < NUM_CH_STATS; j++)
2524 			mlx5e_ethtool_put_stat(
2525 				data, MLX5E_READ_CTR64_CPU(
2526 					      &priv->channel_stats[i]->ch,
2527 					      ch_stats_desc, j));
2528 
2529 	for (i = 0; i < max_nch; i++) {
2530 		for (j = 0; j < NUM_RQ_STATS; j++)
2531 			mlx5e_ethtool_put_stat(
2532 				data, MLX5E_READ_CTR64_CPU(
2533 					      &priv->channel_stats[i]->rq,
2534 					      rq_stats_desc, j));
2535 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2536 			mlx5e_ethtool_put_stat(
2537 				data, MLX5E_READ_CTR64_CPU(
2538 					      &priv->channel_stats[i]->xskrq,
2539 					      xskrq_stats_desc, j));
2540 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2541 			mlx5e_ethtool_put_stat(
2542 				data, MLX5E_READ_CTR64_CPU(
2543 					      &priv->channel_stats[i]->rq_xdpsq,
2544 					      rq_xdpsq_stats_desc, j));
2545 	}
2546 
2547 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2548 		for (i = 0; i < max_nch; i++)
2549 			for (j = 0; j < NUM_SQ_STATS; j++)
2550 				mlx5e_ethtool_put_stat(
2551 					data,
2552 					MLX5E_READ_CTR64_CPU(
2553 						&priv->channel_stats[i]->sq[tc],
2554 						sq_stats_desc, j));
2555 
2556 	for (i = 0; i < max_nch; i++) {
2557 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2558 			mlx5e_ethtool_put_stat(
2559 				data, MLX5E_READ_CTR64_CPU(
2560 					      &priv->channel_stats[i]->xsksq,
2561 					      xsksq_stats_desc, j));
2562 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2563 			mlx5e_ethtool_put_stat(
2564 				data, MLX5E_READ_CTR64_CPU(
2565 					      &priv->channel_stats[i]->xdpsq,
2566 					      xdpsq_stats_desc, j));
2567 	}
2568 }
2569 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2570 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2571 
2572 MLX5E_DEFINE_STATS_GRP(sw, 0);
2573 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2574 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2575 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2576 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2577 MLX5E_DEFINE_STATS_GRP(2863, 0);
2578 MLX5E_DEFINE_STATS_GRP(2819, 0);
2579 MLX5E_DEFINE_STATS_GRP(phy, 0);
2580 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2581 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2582 MLX5E_DEFINE_STATS_GRP(pme, 0);
2583 MLX5E_DEFINE_STATS_GRP(channels, 0);
2584 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2585 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2586 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2587 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2588 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2589 
2590 /* The stats groups order is opposite to the update_stats() order calls */
2591 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2592 	&MLX5E_STATS_GRP(sw),
2593 	&MLX5E_STATS_GRP(qcnt),
2594 	&MLX5E_STATS_GRP(vnic_env),
2595 	&MLX5E_STATS_GRP(vport),
2596 	&MLX5E_STATS_GRP(802_3),
2597 	&MLX5E_STATS_GRP(2863),
2598 	&MLX5E_STATS_GRP(2819),
2599 	&MLX5E_STATS_GRP(phy),
2600 	&MLX5E_STATS_GRP(eth_ext),
2601 	&MLX5E_STATS_GRP(pcie),
2602 	&MLX5E_STATS_GRP(per_prio),
2603 	&MLX5E_STATS_GRP(pme),
2604 #ifdef CONFIG_MLX5_EN_IPSEC
2605 	&MLX5E_STATS_GRP(ipsec_hw),
2606 	&MLX5E_STATS_GRP(ipsec_sw),
2607 #endif
2608 	&MLX5E_STATS_GRP(tls),
2609 	&MLX5E_STATS_GRP(channels),
2610 	&MLX5E_STATS_GRP(per_port_buff_congest),
2611 	&MLX5E_STATS_GRP(ptp),
2612 	&MLX5E_STATS_GRP(qos),
2613 #ifdef CONFIG_MLX5_MACSEC
2614 	&MLX5E_STATS_GRP(macsec_hw),
2615 #endif
2616 };
2617 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2618 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2619 {
2620 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2621 }
2622