xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/events.h"
34 #include "en.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 #include "en/port.h"
39 
40 #ifdef CONFIG_PAGE_POOL_STATS
41 #include <net/page_pool/helpers.h>
42 #endif
43 
mlx5e_ethtool_put_stat(u64 ** data,u64 val)44 void mlx5e_ethtool_put_stat(u64 **data, u64 val)
45 {
46 	*(*data)++ = val;
47 }
48 
stats_grps_num(struct mlx5e_priv * priv)49 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
50 {
51 	return !priv->profile->stats_grps_num ? 0 :
52 		priv->profile->stats_grps_num(priv);
53 }
54 
mlx5e_stats_total_num(struct mlx5e_priv * priv)55 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
56 {
57 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
58 	const unsigned int num_stats_grps = stats_grps_num(priv);
59 	unsigned int total = 0;
60 	int i;
61 
62 	for (i = 0; i < num_stats_grps; i++)
63 		total += stats_grps[i]->get_num_stats(priv);
64 
65 	return total;
66 }
67 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)68 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
69 {
70 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
71 	const unsigned int num_stats_grps = stats_grps_num(priv);
72 	int i;
73 
74 	for (i = num_stats_grps - 1; i >= 0; i--)
75 		if (stats_grps[i]->update_stats &&
76 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
77 			stats_grps[i]->update_stats(priv);
78 }
79 
mlx5e_stats_update(struct mlx5e_priv * priv)80 void mlx5e_stats_update(struct mlx5e_priv *priv)
81 {
82 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83 	const unsigned int num_stats_grps = stats_grps_num(priv);
84 	int i;
85 
86 	for (i = num_stats_grps - 1; i >= 0; i--)
87 		if (stats_grps[i]->update_stats)
88 			stats_grps[i]->update_stats(priv);
89 }
90 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)91 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
92 {
93 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
94 	const unsigned int num_stats_grps = stats_grps_num(priv);
95 	int i;
96 
97 	for (i = 0; i < num_stats_grps; i++)
98 		stats_grps[i]->fill_stats(priv, &data);
99 }
100 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)101 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
102 {
103 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
104 	const unsigned int num_stats_grps = stats_grps_num(priv);
105 	int i;
106 
107 	for (i = 0; i < num_stats_grps; i++)
108 		stats_grps[i]->fill_strings(priv, &data);
109 }
110 
111 /* Concrete NIC Stats */
112 
113 static const struct counter_desc sw_stats_desc[] = {
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
116 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
121 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
122 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
126 
127 #ifdef CONFIG_MLX5_EN_TLS
128 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
132 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
133 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
137 #endif
138 
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
180 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
189 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
191 #ifdef CONFIG_MLX5_EN_ARFS
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
197 #endif
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
199 #ifdef CONFIG_PAGE_POOL_STATS
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
211 #endif
212 #ifdef CONFIG_MLX5_EN_TLS
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
223 #endif
224 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
225 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
226 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
227 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
228 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
229 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
230 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
231 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
232 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
233 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
234 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
235 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
236 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
237 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
238 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
239 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
240 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
241 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
242 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
243 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
244 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
245 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
246 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
247 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
248 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
249 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
250 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
251 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
252 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
253 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
254 };
255 
256 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
257 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)258 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
259 {
260 	return NUM_SW_COUNTERS;
261 }
262 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)263 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
264 {
265 	int i;
266 
267 	for (i = 0; i < NUM_SW_COUNTERS; i++)
268 		ethtool_puts(data, sw_stats_desc[i].format);
269 }
270 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)271 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
272 {
273 	int i;
274 
275 	for (i = 0; i < NUM_SW_COUNTERS; i++)
276 		mlx5e_ethtool_put_stat(data,
277 				       MLX5E_READ_CTR64_CPU(&priv->stats.sw,
278 							    sw_stats_desc, i));
279 }
280 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)281 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
282 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
283 {
284 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
285 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
286 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
287 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
288 	s->tx_xdp_full  += xdpsq_red_stats->full;
289 	s->tx_xdp_err   += xdpsq_red_stats->err;
290 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
291 }
292 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)293 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
294 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
295 {
296 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
297 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
298 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
299 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
300 	s->rx_xdp_tx_full  += xdpsq_stats->full;
301 	s->rx_xdp_tx_err   += xdpsq_stats->err;
302 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
303 }
304 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)305 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
306 						  struct mlx5e_xdpsq_stats *xsksq_stats)
307 {
308 	s->tx_xsk_xmit  += xsksq_stats->xmit;
309 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
310 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
311 	s->tx_xsk_full  += xsksq_stats->full;
312 	s->tx_xsk_err   += xsksq_stats->err;
313 	s->tx_xsk_cqes  += xsksq_stats->cqes;
314 }
315 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)316 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
317 						  struct mlx5e_rq_stats *xskrq_stats)
318 {
319 	s->rx_xsk_packets                += xskrq_stats->packets;
320 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
321 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
322 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
323 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
324 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
325 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
326 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
327 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
328 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
329 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
330 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
331 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
332 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
333 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
334 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
335 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
336 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
337 }
338 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)339 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
340 						     struct mlx5e_rq_stats *rq_stats)
341 {
342 	s->rx_packets                 += rq_stats->packets;
343 	s->rx_bytes                   += rq_stats->bytes;
344 	s->rx_lro_packets             += rq_stats->lro_packets;
345 	s->rx_lro_bytes               += rq_stats->lro_bytes;
346 	s->rx_gro_packets             += rq_stats->gro_packets;
347 	s->rx_gro_bytes               += rq_stats->gro_bytes;
348 	s->rx_gro_skbs                += rq_stats->gro_skbs;
349 	s->rx_gro_large_hds           += rq_stats->gro_large_hds;
350 	s->rx_hds_nodata_packets      += rq_stats->hds_nodata_packets;
351 	s->rx_hds_nodata_bytes        += rq_stats->hds_nodata_bytes;
352 	s->rx_hds_nosplit_packets     += rq_stats->hds_nosplit_packets;
353 	s->rx_hds_nosplit_bytes       += rq_stats->hds_nosplit_bytes;
354 	s->rx_ecn_mark                += rq_stats->ecn_mark;
355 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
356 	s->rx_csum_none               += rq_stats->csum_none;
357 	s->rx_csum_complete           += rq_stats->csum_complete;
358 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
359 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
360 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
361 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
362 	s->rx_xdp_drop                += rq_stats->xdp_drop;
363 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
364 	s->rx_wqe_err                 += rq_stats->wqe_err;
365 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
366 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
367 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
368 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
369 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
370 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
371 	s->rx_congst_umr              += rq_stats->congst_umr;
372 #ifdef CONFIG_MLX5_EN_ARFS
373 	s->rx_arfs_add                += rq_stats->arfs_add;
374 	s->rx_arfs_request_in         += rq_stats->arfs_request_in;
375 	s->rx_arfs_request_out        += rq_stats->arfs_request_out;
376 	s->rx_arfs_expired            += rq_stats->arfs_expired;
377 	s->rx_arfs_err                += rq_stats->arfs_err;
378 #endif
379 	s->rx_recover                 += rq_stats->recover;
380 #ifdef CONFIG_PAGE_POOL_STATS
381 	s->rx_pp_alloc_fast          += rq_stats->pp_alloc_fast;
382 	s->rx_pp_alloc_slow          += rq_stats->pp_alloc_slow;
383 	s->rx_pp_alloc_empty         += rq_stats->pp_alloc_empty;
384 	s->rx_pp_alloc_refill        += rq_stats->pp_alloc_refill;
385 	s->rx_pp_alloc_waive         += rq_stats->pp_alloc_waive;
386 	s->rx_pp_alloc_slow_high_order		+= rq_stats->pp_alloc_slow_high_order;
387 	s->rx_pp_recycle_cached			+= rq_stats->pp_recycle_cached;
388 	s->rx_pp_recycle_cache_full		+= rq_stats->pp_recycle_cache_full;
389 	s->rx_pp_recycle_ring			+= rq_stats->pp_recycle_ring;
390 	s->rx_pp_recycle_ring_full		+= rq_stats->pp_recycle_ring_full;
391 	s->rx_pp_recycle_released_ref		+= rq_stats->pp_recycle_released_ref;
392 #endif
393 #ifdef CONFIG_MLX5_EN_TLS
394 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
395 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
396 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
397 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
398 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
399 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
400 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
401 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
402 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
403 	s->rx_tls_err                 += rq_stats->tls_err;
404 #endif
405 }
406 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)407 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
408 						     struct mlx5e_ch_stats *ch_stats)
409 {
410 	s->ch_events      += ch_stats->events;
411 	s->ch_poll        += ch_stats->poll;
412 	s->ch_arm         += ch_stats->arm;
413 	s->ch_aff_change  += ch_stats->aff_change;
414 	s->ch_force_irq   += ch_stats->force_irq;
415 	s->ch_eq_rearm    += ch_stats->eq_rearm;
416 }
417 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)418 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
419 					       struct mlx5e_sq_stats *sq_stats)
420 {
421 	s->tx_packets               += sq_stats->packets;
422 	s->tx_bytes                 += sq_stats->bytes;
423 	s->tx_tso_packets           += sq_stats->tso_packets;
424 	s->tx_tso_bytes             += sq_stats->tso_bytes;
425 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
426 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
427 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
428 	s->tx_nop                   += sq_stats->nop;
429 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
430 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
431 	s->tx_queue_stopped         += sq_stats->stopped;
432 	s->tx_queue_wake            += sq_stats->wake;
433 	s->tx_queue_dropped         += sq_stats->dropped;
434 	s->tx_cqe_err               += sq_stats->cqe_err;
435 	s->tx_recover               += sq_stats->recover;
436 	s->tx_xmit_more             += sq_stats->xmit_more;
437 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
438 	s->tx_csum_none             += sq_stats->csum_none;
439 	s->tx_csum_partial          += sq_stats->csum_partial;
440 #ifdef CONFIG_MLX5_EN_TLS
441 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
442 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
443 	s->tx_tls_ooo               += sq_stats->tls_ooo;
444 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
445 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
446 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
447 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
448 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
449 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
450 #endif
451 	s->tx_cqes                  += sq_stats->cqes;
452 }
453 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)454 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
455 						struct mlx5e_sw_stats *s)
456 {
457 	int i;
458 
459 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
460 		return;
461 
462 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
463 
464 	if (priv->tx_ptp_opened) {
465 		for (i = 0; i < priv->max_opened_tc; i++) {
466 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
467 
468 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
469 			barrier();
470 		}
471 	}
472 	if (priv->rx_ptp_opened) {
473 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
474 
475 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
476 		barrier();
477 	}
478 }
479 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)480 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
481 						struct mlx5e_sw_stats *s)
482 {
483 	struct mlx5e_sq_stats **stats;
484 	u16 max_qos_sqs;
485 	int i;
486 
487 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
488 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
489 	stats = READ_ONCE(priv->htb_qos_sq_stats);
490 
491 	for (i = 0; i < max_qos_sqs; i++) {
492 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
493 
494 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
495 		barrier();
496 	}
497 }
498 
499 #ifdef CONFIG_PAGE_POOL_STATS
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)500 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
501 {
502 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
503 	struct page_pool *pool = c->rq.page_pool;
504 	struct page_pool_stats stats = { 0 };
505 
506 	if (!page_pool_get_stats(pool, &stats))
507 		return;
508 
509 	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
510 	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
511 	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
512 	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
513 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
514 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
515 
516 	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
517 	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
518 	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
519 	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
520 	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
521 }
522 #else
mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel * c)523 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
524 {
525 }
526 #endif
527 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)528 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
529 {
530 	struct mlx5e_sw_stats *s = &priv->stats.sw;
531 	int i;
532 
533 	memset(s, 0, sizeof(*s));
534 
535 	for (i = 0; i < priv->channels.num; i++) /* for active channels only */
536 		mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
537 
538 	for (i = 0; i < priv->stats_nch; i++) {
539 		struct mlx5e_channel_stats *channel_stats =
540 			priv->channel_stats[i];
541 
542 		int j;
543 
544 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
545 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
546 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
547 		/* xdp redirect */
548 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
549 		/* AF_XDP zero-copy */
550 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
551 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
552 
553 		for (j = 0; j < priv->max_opened_tc; j++) {
554 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
555 
556 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
557 			barrier();
558 		}
559 	}
560 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
561 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
562 }
563 
564 static const struct counter_desc q_stats_desc[] = {
565 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
566 };
567 
568 static const struct counter_desc drop_rq_stats_desc[] = {
569 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
570 };
571 
572 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
573 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
574 
q_counter_any(struct mlx5e_priv * priv)575 static bool q_counter_any(struct mlx5e_priv *priv)
576 {
577 	struct mlx5_core_dev *pos;
578 	int i;
579 
580 	mlx5_sd_for_each_dev(i, priv->mdev, pos)
581 		if (priv->q_counter[i++])
582 			return true;
583 
584 	return false;
585 }
586 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)587 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
588 {
589 	int num_stats = 0;
590 
591 	if (q_counter_any(priv))
592 		num_stats += NUM_Q_COUNTERS;
593 
594 	if (priv->drop_rq_q_counter)
595 		num_stats += NUM_DROP_RQ_COUNTERS;
596 
597 	return num_stats;
598 }
599 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)600 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
601 {
602 	int i;
603 
604 	for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
605 		ethtool_puts(data, q_stats_desc[i].format);
606 
607 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
608 		ethtool_puts(data, drop_rq_stats_desc[i].format);
609 }
610 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)611 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
612 {
613 	int i;
614 
615 	for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
616 		mlx5e_ethtool_put_stat(data,
617 				       MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
618 							    q_stats_desc, i));
619 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
620 		mlx5e_ethtool_put_stat(
621 			data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
622 						   drop_rq_stats_desc, i));
623 }
624 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)625 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
626 {
627 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
628 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
629 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
630 	struct mlx5_core_dev *pos;
631 	u32 rx_out_of_buffer = 0;
632 	int ret, i;
633 
634 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
635 
636 	mlx5_sd_for_each_dev(i, priv->mdev, pos) {
637 		if (priv->q_counter[i]) {
638 			MLX5_SET(query_q_counter_in, in, counter_set_id,
639 				 priv->q_counter[i]);
640 			ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
641 			if (!ret)
642 				rx_out_of_buffer += MLX5_GET(query_q_counter_out,
643 							     out, out_of_buffer);
644 		}
645 	}
646 	qcnt->rx_out_of_buffer = rx_out_of_buffer;
647 
648 	if (priv->drop_rq_q_counter) {
649 		MLX5_SET(query_q_counter_in, in, counter_set_id,
650 			 priv->drop_rq_q_counter);
651 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
652 		if (!ret)
653 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
654 							    out, out_of_buffer);
655 	}
656 }
657 
658 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
659 static const struct counter_desc vnic_env_stats_steer_desc[] = {
660 	{ "rx_steer_missed_packets",
661 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
662 };
663 
664 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
665 	{ "dev_internal_queue_oob",
666 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
667 };
668 
669 static const struct counter_desc vnic_env_stats_drop_desc[] = {
670 	{ "rx_oversize_pkts_buffer",
671 		VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
672 };
673 
674 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
675 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
676 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
677 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
678 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
679 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
680 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
681 	(MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
682 	 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
683 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)684 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
685 {
686 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
687 	       NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
688 	       NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
689 }
690 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)691 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
692 {
693 	int i;
694 
695 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
696 		ethtool_puts(data, vnic_env_stats_steer_desc[i].format);
697 
698 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
699 		ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format);
700 
701 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
702 		ethtool_puts(data, vnic_env_stats_drop_desc[i].format);
703 }
704 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)705 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
706 {
707 	int i;
708 
709 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
710 		mlx5e_ethtool_put_stat(
711 			data,
712 			MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
713 					    vnic_env_stats_steer_desc, i));
714 
715 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
716 		mlx5e_ethtool_put_stat(
717 			data,
718 			MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
719 					    vnic_env_stats_dev_oob_desc, i));
720 
721 	for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
722 		mlx5e_ethtool_put_stat(
723 			data,
724 			MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
725 					    vnic_env_stats_drop_desc, i));
726 }
727 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)728 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
729 {
730 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
731 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
732 	struct mlx5_core_dev *mdev = priv->mdev;
733 
734 	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
735 		return;
736 
737 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
738 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
739 }
740 
741 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
742 static const struct counter_desc vport_stats_desc[] = {
743 	{ "rx_vport_unicast_packets",
744 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
745 	{ "rx_vport_unicast_bytes",
746 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
747 	{ "tx_vport_unicast_packets",
748 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
749 	{ "tx_vport_unicast_bytes",
750 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
751 	{ "rx_vport_multicast_packets",
752 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
753 	{ "rx_vport_multicast_bytes",
754 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
755 	{ "tx_vport_multicast_packets",
756 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
757 	{ "tx_vport_multicast_bytes",
758 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
759 	{ "rx_vport_broadcast_packets",
760 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
761 	{ "rx_vport_broadcast_bytes",
762 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
763 	{ "tx_vport_broadcast_packets",
764 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
765 	{ "tx_vport_broadcast_bytes",
766 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
767 	{ "rx_vport_rdma_unicast_packets",
768 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
769 	{ "rx_vport_rdma_unicast_bytes",
770 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
771 	{ "tx_vport_rdma_unicast_packets",
772 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
773 	{ "tx_vport_rdma_unicast_bytes",
774 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
775 	{ "rx_vport_rdma_multicast_packets",
776 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
777 	{ "rx_vport_rdma_multicast_bytes",
778 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
779 	{ "tx_vport_rdma_multicast_packets",
780 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
781 	{ "tx_vport_rdma_multicast_bytes",
782 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
783 };
784 
785 static const struct counter_desc vport_loopback_stats_desc[] = {
786 	{ "vport_loopback_packets",
787 		VPORT_COUNTER_OFF(local_loopback.packets) },
788 	{ "vport_loopback_bytes",
789 		VPORT_COUNTER_OFF(local_loopback.octets) },
790 };
791 
792 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
793 #define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
794 	(MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
795 	 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
796 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)797 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
798 {
799 	return NUM_VPORT_COUNTERS +
800 		NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
801 }
802 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)803 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
804 {
805 	int i;
806 
807 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
808 		ethtool_puts(data, vport_stats_desc[i].format);
809 
810 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
811 		ethtool_puts(data, vport_loopback_stats_desc[i].format);
812 }
813 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)814 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
815 {
816 	int i;
817 
818 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
819 		mlx5e_ethtool_put_stat(
820 			data,
821 			MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
822 					    vport_stats_desc, i));
823 
824 	for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
825 		mlx5e_ethtool_put_stat(
826 			data,
827 			MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
828 					    vport_loopback_stats_desc, i));
829 }
830 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)831 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
832 {
833 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
834 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
835 	struct mlx5_core_dev *mdev = priv->mdev;
836 
837 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
838 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
839 }
840 
841 #define PPORT_802_3_OFF(c) \
842 	MLX5_BYTE_OFF(ppcnt_reg, \
843 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
844 static const struct counter_desc pport_802_3_stats_desc[] = {
845 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
846 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
847 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
848 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
849 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
850 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
851 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
852 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
853 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
854 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
855 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
856 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
857 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
858 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
859 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
860 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
861 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
862 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
863 };
864 
865 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
866 
867 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
868 {
869 	return NUM_PPORT_802_3_COUNTERS;
870 }
871 
872 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
873 {
874 	int i;
875 
876 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
877 		ethtool_puts(data, pport_802_3_stats_desc[i].format);
878 }
879 
880 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
881 {
882 	int i;
883 
884 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
885 		mlx5e_ethtool_put_stat(
886 			data, MLX5E_READ_CTR64_BE(
887 				      &priv->stats.pport.IEEE_802_3_counters,
888 				      pport_802_3_stats_desc, i));
889 }
890 
891 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
892 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
893 
894 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
895 {
896 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
897 	struct mlx5_core_dev *mdev = priv->mdev;
898 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
899 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
900 	void *out;
901 
902 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
903 		return;
904 
905 	MLX5_SET(ppcnt_reg, in, local_port, 1);
906 	out = pstats->IEEE_802_3_counters;
907 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
908 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
909 }
910 
911 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
912 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
913 		MLX5_BYTE_OFF(ppcnt_reg,		\
914 			      counter_set.set.c##_high)))
915 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)916 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
917 				u32 *ppcnt_ieee_802_3)
918 {
919 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
920 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
921 
922 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
923 		return -EOPNOTSUPP;
924 
925 	MLX5_SET(ppcnt_reg, in, local_port, 1);
926 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
927 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
928 				    sz, MLX5_REG_PPCNT, 0, 0);
929 }
930 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)931 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
932 			   struct ethtool_pause_stats *pause_stats)
933 {
934 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
935 	struct mlx5_core_dev *mdev = priv->mdev;
936 
937 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
938 		return;
939 
940 	pause_stats->tx_pause_frames =
941 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
942 				      eth_802_3_cntrs_grp_data_layout,
943 				      a_pause_mac_ctrl_frames_transmitted);
944 	pause_stats->rx_pause_frames =
945 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
946 				      eth_802_3_cntrs_grp_data_layout,
947 				      a_pause_mac_ctrl_frames_received);
948 }
949 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)950 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
951 			     struct ethtool_eth_phy_stats *phy_stats)
952 {
953 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
954 	struct mlx5_core_dev *mdev = priv->mdev;
955 
956 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
957 		return;
958 
959 	phy_stats->SymbolErrorDuringCarrier =
960 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
961 				      eth_802_3_cntrs_grp_data_layout,
962 				      a_symbol_error_during_carrier);
963 }
964 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)965 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
966 			     struct ethtool_eth_mac_stats *mac_stats)
967 {
968 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
969 	struct mlx5_core_dev *mdev = priv->mdev;
970 
971 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
972 		return;
973 
974 #define RD(name)							\
975 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
976 			      eth_802_3_cntrs_grp_data_layout,		\
977 			      name)
978 
979 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
980 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
981 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
982 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
983 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
984 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
985 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
986 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
987 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
988 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
989 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
990 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
991 #undef RD
992 }
993 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)994 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
995 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
996 {
997 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
998 	struct mlx5_core_dev *mdev = priv->mdev;
999 
1000 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
1001 		return;
1002 
1003 	ctrl_stats->MACControlFramesTransmitted =
1004 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1005 				      eth_802_3_cntrs_grp_data_layout,
1006 				      a_mac_control_frames_transmitted);
1007 	ctrl_stats->MACControlFramesReceived =
1008 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1009 				      eth_802_3_cntrs_grp_data_layout,
1010 				      a_mac_control_frames_received);
1011 	ctrl_stats->UnsupportedOpcodesReceived =
1012 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1013 				      eth_802_3_cntrs_grp_data_layout,
1014 				      a_unsupported_opcodes_received);
1015 }
1016 
1017 #define PPORT_2863_OFF(c) \
1018 	MLX5_BYTE_OFF(ppcnt_reg, \
1019 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
1020 static const struct counter_desc pport_2863_stats_desc[] = {
1021 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
1022 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
1023 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
1024 };
1025 
1026 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
1027 
1028 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
1029 {
1030 	return NUM_PPORT_2863_COUNTERS;
1031 }
1032 
1033 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
1034 {
1035 	int i;
1036 
1037 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1038 		ethtool_puts(data, pport_2863_stats_desc[i].format);
1039 }
1040 
1041 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
1042 {
1043 	int i;
1044 
1045 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1046 		mlx5e_ethtool_put_stat(
1047 			data, MLX5E_READ_CTR64_BE(
1048 				      &priv->stats.pport.RFC_2863_counters,
1049 				      pport_2863_stats_desc, i));
1050 }
1051 
1052 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1053 {
1054 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1055 	struct mlx5_core_dev *mdev = priv->mdev;
1056 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1057 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1058 	void *out;
1059 
1060 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1061 	out = pstats->RFC_2863_counters;
1062 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1063 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1064 }
1065 
1066 #define PPORT_2819_OFF(c) \
1067 	MLX5_BYTE_OFF(ppcnt_reg, \
1068 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1069 static const struct counter_desc pport_2819_stats_desc[] = {
1070 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1071 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1072 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1073 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1074 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1075 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1076 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1077 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1078 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1079 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1080 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1081 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1082 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1083 };
1084 
1085 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
1086 
1087 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1088 {
1089 	return NUM_PPORT_2819_COUNTERS;
1090 }
1091 
1092 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1093 {
1094 	int i;
1095 
1096 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1097 		ethtool_puts(data, pport_2819_stats_desc[i].format);
1098 }
1099 
1100 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1101 {
1102 	int i;
1103 
1104 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1105 		mlx5e_ethtool_put_stat(
1106 			data, MLX5E_READ_CTR64_BE(
1107 				      &priv->stats.pport.RFC_2819_counters,
1108 				      pport_2819_stats_desc, i));
1109 }
1110 
1111 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1112 {
1113 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1114 	struct mlx5_core_dev *mdev = priv->mdev;
1115 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1116 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1117 	void *out;
1118 
1119 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1120 		return;
1121 
1122 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1123 	out = pstats->RFC_2819_counters;
1124 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1125 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1126 }
1127 
1128 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1129 	{    0,    64 },
1130 	{   65,   127 },
1131 	{  128,   255 },
1132 	{  256,   511 },
1133 	{  512,  1023 },
1134 	{ 1024,  1518 },
1135 	{ 1519,  2047 },
1136 	{ 2048,  4095 },
1137 	{ 4096,  8191 },
1138 	{ 8192, 10239 },
1139 	{}
1140 };
1141 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1142 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1143 			  struct ethtool_rmon_stats *rmon,
1144 			  const struct ethtool_rmon_hist_range **ranges)
1145 {
1146 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1147 	struct mlx5_core_dev *mdev = priv->mdev;
1148 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1149 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1150 
1151 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1152 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1153 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1154 				 sz, MLX5_REG_PPCNT, 0, 0))
1155 		return;
1156 
1157 #define RD(name)						\
1158 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1159 			      eth_2819_cntrs_grp_data_layout,	\
1160 			      name)
1161 
1162 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1163 	rmon->fragments		= RD(ether_stats_fragments);
1164 	rmon->jabbers		= RD(ether_stats_jabbers);
1165 
1166 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1167 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1168 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1169 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1170 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1171 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1172 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1173 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1174 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1175 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1176 #undef RD
1177 
1178 	*ranges = mlx5e_rmon_ranges;
1179 }
1180 
mlx5e_stats_ts_get(struct mlx5e_priv * priv,struct ethtool_ts_stats * ts_stats)1181 void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
1182 			struct ethtool_ts_stats *ts_stats)
1183 {
1184 	int i, j;
1185 
1186 	mutex_lock(&priv->state_lock);
1187 
1188 	if (priv->tx_ptp_opened) {
1189 		struct mlx5e_ptp *ptp = priv->channels.ptp;
1190 
1191 		ts_stats->pkts = 0;
1192 		ts_stats->err = 0;
1193 		ts_stats->lost = 0;
1194 
1195 		if (!ptp)
1196 			goto out;
1197 
1198 		/* Aggregate stats across all TCs */
1199 		for (i = 0; i < ptp->num_tc; i++) {
1200 			struct mlx5e_ptp_cq_stats *stats =
1201 				ptp->ptpsq[i].cq_stats;
1202 
1203 			ts_stats->pkts += stats->cqe;
1204 			ts_stats->err += stats->abort + stats->err_cqe +
1205 				stats->late_cqe;
1206 			ts_stats->lost += stats->lost_cqe;
1207 		}
1208 	} else {
1209 		/* DMA layer will always successfully timestamp packets. Other
1210 		 * counters do not make sense for this layer.
1211 		 */
1212 		ts_stats->pkts = 0;
1213 
1214 		/* Aggregate stats across all SQs */
1215 		for (j = 0; j < priv->channels.num; j++) {
1216 			struct mlx5e_channel *c = priv->channels.c[j];
1217 
1218 			for (i = 0; i < c->num_tc; i++) {
1219 				struct mlx5e_sq_stats *stats = c->sq[i].stats;
1220 
1221 				ts_stats->pkts += stats->timestamps;
1222 			}
1223 		}
1224 	}
1225 
1226 out:
1227 	mutex_unlock(&priv->state_lock);
1228 }
1229 
1230 #define PPORT_PHY_STATISTICAL_OFF(c) \
1231 	MLX5_BYTE_OFF(ppcnt_reg, \
1232 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1233 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1234 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1235 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1236 };
1237 
1238 static const struct counter_desc
1239 pport_phy_statistical_err_lanes_stats_desc[] = {
1240 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1241 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1242 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1243 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1244 };
1245 
1246 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1247 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1248 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1249 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1250 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1251 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1252 {
1253 	struct mlx5_core_dev *mdev = priv->mdev;
1254 	int num_stats;
1255 
1256 	/* "1" for link_down_events special counter */
1257 	num_stats = 1;
1258 
1259 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1260 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1261 
1262 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1263 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1264 
1265 	return num_stats;
1266 }
1267 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1268 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1269 {
1270 	struct mlx5_core_dev *mdev = priv->mdev;
1271 	int i;
1272 
1273 	ethtool_puts(data, "link_down_events_phy");
1274 
1275 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1276 		return;
1277 
1278 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1279 		ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
1280 
1281 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1282 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1283 			ethtool_puts(data,
1284 				     pport_phy_statistical_err_lanes_stats_desc[i].format);
1285 }
1286 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1287 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1288 {
1289 	struct mlx5_core_dev *mdev = priv->mdev;
1290 	int i;
1291 
1292 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
1293 	mlx5e_ethtool_put_stat(
1294 		data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1295 			       counter_set.phys_layer_cntrs.link_down_events));
1296 
1297 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1298 		return;
1299 
1300 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1301 		mlx5e_ethtool_put_stat(
1302 			data,
1303 			MLX5E_READ_CTR64_BE(
1304 				&priv->stats.pport.phy_statistical_counters,
1305 				pport_phy_statistical_stats_desc, i));
1306 
1307 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1308 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1309 			mlx5e_ethtool_put_stat(
1310 				data,
1311 				MLX5E_READ_CTR64_BE(
1312 					&priv->stats.pport
1313 						 .phy_statistical_counters,
1314 					pport_phy_statistical_err_lanes_stats_desc,
1315 					i));
1316 }
1317 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1318 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1319 {
1320 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1321 	struct mlx5_core_dev *mdev = priv->mdev;
1322 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1323 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1324 	void *out;
1325 
1326 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1327 	out = pstats->phy_counters;
1328 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1329 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1330 
1331 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1332 		return;
1333 
1334 	out = pstats->phy_statistical_counters;
1335 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1336 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1337 }
1338 
mlx5e_get_link_ext_stats(struct net_device * dev,struct ethtool_link_ext_stats * stats)1339 void mlx5e_get_link_ext_stats(struct net_device *dev,
1340 			      struct ethtool_link_ext_stats *stats)
1341 {
1342 	struct mlx5e_priv *priv = netdev_priv(dev);
1343 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1344 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1345 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1346 
1347 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1348 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1349 	mlx5_core_access_reg(priv->mdev, in, sz, out,
1350 			     MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1351 
1352 	stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1353 					   counter_set.phys_layer_cntrs.link_down_events);
1354 }
1355 
fec_num_lanes(struct mlx5_core_dev * dev)1356 static int fec_num_lanes(struct mlx5_core_dev *dev)
1357 {
1358 	u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1359 	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1360 	int err;
1361 
1362 	MLX5_SET(pmlp_reg, in, local_port, 1);
1363 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1364 				   MLX5_REG_PMLP, 0, 0);
1365 	if (err)
1366 		return 0;
1367 
1368 	return MLX5_GET(pmlp_reg, out, width);
1369 }
1370 
fec_active_mode(struct mlx5_core_dev * mdev)1371 static int fec_active_mode(struct mlx5_core_dev *mdev)
1372 {
1373 	unsigned long fec_active_long;
1374 	u32 fec_active;
1375 
1376 	if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1377 		return MLX5E_FEC_NOFEC;
1378 
1379 	fec_active_long = fec_active;
1380 	return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1381 }
1382 
1383 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1384 	fec_stats->corrected_blocks.lanes[(idx)] = \
1385 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1386 				      fc_fec_corrected_blocks_lane##idx); \
1387 	fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1388 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1389 				      fc_fec_uncorrectable_blocks_lane##idx); \
1390 })
1391 
fec_set_fc_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt,u8 lanes)1392 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1393 			     u32 *ppcnt, u8 lanes)
1394 {
1395 	if (lanes > 3) { /* 4 lanes */
1396 		MLX5E_STATS_SET_FEC_BLOCK(3);
1397 		MLX5E_STATS_SET_FEC_BLOCK(2);
1398 	}
1399 	if (lanes > 1) /* 2 lanes */
1400 		MLX5E_STATS_SET_FEC_BLOCK(1);
1401 	if (lanes > 0) /* 1 lane */
1402 		MLX5E_STATS_SET_FEC_BLOCK(0);
1403 }
1404 
fec_set_rs_stats(struct ethtool_fec_stats * fec_stats,u32 * ppcnt)1405 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1406 {
1407 	fec_stats->corrected_blocks.total =
1408 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1409 				      rs_fec_corrected_blocks);
1410 	fec_stats->uncorrectable_blocks.total =
1411 		MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1412 				      rs_fec_uncorrectable_blocks);
1413 }
1414 
fec_set_block_stats(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1415 static void fec_set_block_stats(struct mlx5e_priv *priv,
1416 				struct ethtool_fec_stats *fec_stats)
1417 {
1418 	struct mlx5_core_dev *mdev = priv->mdev;
1419 	u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1420 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1421 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1422 	int mode = fec_active_mode(mdev);
1423 
1424 	if (mode == MLX5E_FEC_NOFEC)
1425 		return;
1426 
1427 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1428 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1429 	if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1430 		return;
1431 
1432 	switch (mode) {
1433 	case MLX5E_FEC_RS_528_514:
1434 	case MLX5E_FEC_RS_544_514:
1435 	case MLX5E_FEC_LLRS_272_257_1:
1436 		fec_set_rs_stats(fec_stats, out);
1437 		return;
1438 	case MLX5E_FEC_FIRECODE:
1439 		fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1440 	}
1441 }
1442 
fec_set_corrected_bits_total(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1443 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1444 					 struct ethtool_fec_stats *fec_stats)
1445 {
1446 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1447 	struct mlx5_core_dev *mdev = priv->mdev;
1448 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1449 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1450 
1451 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1452 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1453 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1454 				 sz, MLX5_REG_PPCNT, 0, 0))
1455 		return;
1456 
1457 	fec_stats->corrected_bits.total =
1458 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1459 				      phys_layer_statistical_cntrs,
1460 				      phy_corrected_bits);
1461 }
1462 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1463 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1464 			 struct ethtool_fec_stats *fec_stats)
1465 {
1466 	if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1467 		return;
1468 
1469 	fec_set_corrected_bits_total(priv, fec_stats);
1470 	fec_set_block_stats(priv, fec_stats);
1471 }
1472 
1473 #define PPORT_ETH_EXT_OFF(c) \
1474 	MLX5_BYTE_OFF(ppcnt_reg, \
1475 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1476 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1477 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1478 };
1479 
1480 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1481 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1482 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1483 {
1484 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1485 		return NUM_PPORT_ETH_EXT_COUNTERS;
1486 
1487 	return 0;
1488 }
1489 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1490 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1491 {
1492 	int i;
1493 
1494 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1495 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1496 			ethtool_puts(data, pport_eth_ext_stats_desc[i].format);
1497 }
1498 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1499 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1500 {
1501 	int i;
1502 
1503 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1504 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1505 			mlx5e_ethtool_put_stat(
1506 				data,
1507 				MLX5E_READ_CTR64_BE(
1508 					&priv->stats.pport.eth_ext_counters,
1509 					pport_eth_ext_stats_desc, i));
1510 }
1511 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1512 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1513 {
1514 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1515 	struct mlx5_core_dev *mdev = priv->mdev;
1516 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1517 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1518 	void *out;
1519 
1520 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1521 		return;
1522 
1523 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1524 	out = pstats->eth_ext_counters;
1525 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1526 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1527 }
1528 
1529 #define PCIE_PERF_OFF(c) \
1530 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1531 static const struct counter_desc pcie_perf_stats_desc[] = {
1532 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1533 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1534 };
1535 
1536 #define PCIE_PERF_OFF64(c) \
1537 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1538 static const struct counter_desc pcie_perf_stats_desc64[] = {
1539 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1540 };
1541 
1542 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1543 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1544 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1545 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1546 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1547 };
1548 
1549 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1550 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1551 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1552 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1553 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1554 {
1555 	int num_stats = 0;
1556 
1557 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1558 		num_stats += NUM_PCIE_PERF_COUNTERS;
1559 
1560 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1561 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1562 
1563 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1564 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1565 
1566 	return num_stats;
1567 }
1568 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1569 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1570 {
1571 	int i;
1572 
1573 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1574 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1575 			ethtool_puts(data, pcie_perf_stats_desc[i].format);
1576 
1577 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1578 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1579 			ethtool_puts(data, pcie_perf_stats_desc64[i].format);
1580 
1581 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1582 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1583 			ethtool_puts(data,
1584 				     pcie_perf_stall_stats_desc[i].format);
1585 }
1586 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1587 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1588 {
1589 	int i;
1590 
1591 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1592 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1593 			mlx5e_ethtool_put_stat(
1594 				data,
1595 				MLX5E_READ_CTR32_BE(
1596 					&priv->stats.pcie.pcie_perf_counters,
1597 					pcie_perf_stats_desc, i));
1598 
1599 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1600 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1601 			mlx5e_ethtool_put_stat(
1602 				data,
1603 				MLX5E_READ_CTR64_BE(
1604 					&priv->stats.pcie.pcie_perf_counters,
1605 					pcie_perf_stats_desc64, i));
1606 
1607 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1608 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1609 			mlx5e_ethtool_put_stat(
1610 				data,
1611 				MLX5E_READ_CTR32_BE(
1612 					&priv->stats.pcie.pcie_perf_counters,
1613 					pcie_perf_stall_stats_desc, i));
1614 }
1615 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1616 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1617 {
1618 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1619 	struct mlx5_core_dev *mdev = priv->mdev;
1620 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1621 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1622 	void *out;
1623 
1624 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1625 		return;
1626 
1627 	out = pcie_stats->pcie_perf_counters;
1628 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1629 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1630 }
1631 
1632 #define PPORT_PER_TC_PRIO_OFF(c) \
1633 	MLX5_BYTE_OFF(ppcnt_reg, \
1634 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1635 
1636 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1637 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1638 };
1639 
1640 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1641 
1642 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1643 	MLX5_BYTE_OFF(ppcnt_reg, \
1644 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1645 
1646 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1647 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1648 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1649 };
1650 
1651 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1652 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1653 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1654 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1655 {
1656 	struct mlx5_core_dev *mdev = priv->mdev;
1657 
1658 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1659 		return 0;
1660 
1661 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1662 }
1663 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1664 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1665 {
1666 	struct mlx5_core_dev *mdev = priv->mdev;
1667 	int i, prio;
1668 
1669 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1670 		return;
1671 
1672 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1673 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1674 			ethtool_sprintf(data,
1675 					pport_per_tc_prio_stats_desc[i].format,
1676 					prio);
1677 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1678 			ethtool_sprintf(data,
1679 					pport_per_tc_congest_prio_stats_desc[i].format,
1680 					prio);
1681 	}
1682 }
1683 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1684 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1685 {
1686 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1687 	struct mlx5_core_dev *mdev = priv->mdev;
1688 	int i, prio;
1689 
1690 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1691 		return;
1692 
1693 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1694 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1695 			mlx5e_ethtool_put_stat(
1696 				data,
1697 				MLX5E_READ_CTR64_BE(
1698 					&pport->per_tc_prio_counters[prio],
1699 					pport_per_tc_prio_stats_desc, i));
1700 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1701 			mlx5e_ethtool_put_stat(
1702 				data,
1703 				MLX5E_READ_CTR64_BE(
1704 					&pport->per_tc_congest_prio_counters
1705 						 [prio],
1706 					pport_per_tc_congest_prio_stats_desc,
1707 					i));
1708 	}
1709 }
1710 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1711 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1712 {
1713 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1714 	struct mlx5_core_dev *mdev = priv->mdev;
1715 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1716 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1717 	void *out;
1718 	int prio;
1719 
1720 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1721 		return;
1722 
1723 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1724 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1725 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1726 		out = pstats->per_tc_prio_counters[prio];
1727 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1728 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1729 	}
1730 }
1731 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1732 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1733 {
1734 	struct mlx5_core_dev *mdev = priv->mdev;
1735 
1736 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1737 		return 0;
1738 
1739 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1740 }
1741 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1742 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1743 {
1744 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1745 	struct mlx5_core_dev *mdev = priv->mdev;
1746 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1747 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1748 	void *out;
1749 	int prio;
1750 
1751 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1752 		return;
1753 
1754 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1755 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1756 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1757 		out = pstats->per_tc_congest_prio_counters[prio];
1758 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1759 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1760 	}
1761 }
1762 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1763 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1764 {
1765 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1766 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1767 }
1768 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1769 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1770 {
1771 	mlx5e_grp_per_tc_prio_update_stats(priv);
1772 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1773 }
1774 
1775 #define PPORT_PER_PRIO_OFF(c) \
1776 	MLX5_BYTE_OFF(ppcnt_reg, \
1777 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1778 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1779 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1780 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1781 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1782 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1783 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1784 };
1785 
1786 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1787 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1788 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1789 {
1790 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1791 }
1792 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 ** data)1793 static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1794 						    u8 **data)
1795 {
1796 	int i, prio;
1797 
1798 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1799 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1800 			ethtool_sprintf(data,
1801 					pport_per_prio_traffic_stats_desc[i].format,
1802 					prio);
1803 	}
1804 }
1805 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 ** data)1806 static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1807 						  u64 **data)
1808 {
1809 	int i, prio;
1810 
1811 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1812 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1813 			mlx5e_ethtool_put_stat(
1814 				data,
1815 				MLX5E_READ_CTR64_BE(
1816 					&priv->stats.pport
1817 						 .per_prio_counters[prio],
1818 					pport_per_prio_traffic_stats_desc, i));
1819 	}
1820 }
1821 
1822 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1823 	/* %s is "global" or "prio{i}" */
1824 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1825 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1826 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1827 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1828 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1829 };
1830 
1831 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1832 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1833 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1834 };
1835 
1836 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1837 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1838 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1839 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1840 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1841 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1842 {
1843 	struct mlx5_core_dev *mdev = priv->mdev;
1844 	u8 pfc_en_tx;
1845 	u8 pfc_en_rx;
1846 	int err;
1847 
1848 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1849 		return 0;
1850 
1851 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1852 
1853 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1854 }
1855 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1856 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1857 {
1858 	struct mlx5_core_dev *mdev = priv->mdev;
1859 	u32 rx_pause;
1860 	u32 tx_pause;
1861 	int err;
1862 
1863 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1864 		return false;
1865 
1866 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1867 
1868 	return err ? false : rx_pause | tx_pause;
1869 }
1870 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1871 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1872 {
1873 	return (mlx5e_query_global_pause_combined(priv) +
1874 		hweight8(mlx5e_query_pfc_combined(priv))) *
1875 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1876 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1877 }
1878 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 ** data)1879 static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1880 						u8 **data)
1881 {
1882 	unsigned long pfc_combined;
1883 	int i, prio;
1884 
1885 	pfc_combined = mlx5e_query_pfc_combined(priv);
1886 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1887 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1888 			char pfc_string[ETH_GSTRING_LEN];
1889 
1890 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1891 			ethtool_sprintf(data,
1892 					pport_per_prio_pfc_stats_desc[i].format,
1893 					pfc_string);
1894 		}
1895 	}
1896 
1897 	if (mlx5e_query_global_pause_combined(priv)) {
1898 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1899 			ethtool_sprintf(data,
1900 					pport_per_prio_pfc_stats_desc[i].format,
1901 					"global");
1902 		}
1903 	}
1904 
1905 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1906 		ethtool_puts(data, pport_pfc_stall_stats_desc[i].format);
1907 }
1908 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 ** data)1909 static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1910 					      u64 **data)
1911 {
1912 	unsigned long pfc_combined;
1913 	int i, prio;
1914 
1915 	pfc_combined = mlx5e_query_pfc_combined(priv);
1916 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1917 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1918 			mlx5e_ethtool_put_stat(
1919 				data,
1920 				MLX5E_READ_CTR64_BE(
1921 					&priv->stats.pport
1922 						 .per_prio_counters[prio],
1923 					pport_per_prio_pfc_stats_desc, i));
1924 		}
1925 	}
1926 
1927 	if (mlx5e_query_global_pause_combined(priv)) {
1928 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1929 			mlx5e_ethtool_put_stat(
1930 				data,
1931 				MLX5E_READ_CTR64_BE(
1932 					&priv->stats.pport.per_prio_counters[0],
1933 					pport_per_prio_pfc_stats_desc, i));
1934 		}
1935 	}
1936 
1937 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1938 		mlx5e_ethtool_put_stat(
1939 			data, MLX5E_READ_CTR64_BE(
1940 				      &priv->stats.pport.per_prio_counters[0],
1941 				      pport_pfc_stall_stats_desc, i));
1942 }
1943 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1944 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1945 {
1946 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1947 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1948 }
1949 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1950 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1951 {
1952 	mlx5e_grp_per_prio_traffic_fill_strings(priv, data);
1953 	mlx5e_grp_per_prio_pfc_fill_strings(priv, data);
1954 }
1955 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1956 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1957 {
1958 	mlx5e_grp_per_prio_traffic_fill_stats(priv, data);
1959 	mlx5e_grp_per_prio_pfc_fill_stats(priv, data);
1960 }
1961 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1962 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1963 {
1964 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1965 	struct mlx5_core_dev *mdev = priv->mdev;
1966 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1967 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1968 	int prio;
1969 	void *out;
1970 
1971 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1972 		return;
1973 
1974 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1975 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1976 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1977 		out = pstats->per_prio_counters[prio];
1978 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1979 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1980 				     MLX5_REG_PPCNT, 0, 0);
1981 	}
1982 }
1983 
1984 static const struct counter_desc mlx5e_pme_status_desc[] = {
1985 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1986 };
1987 
1988 static const struct counter_desc mlx5e_pme_error_desc[] = {
1989 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1990 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1991 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1992 };
1993 
1994 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1995 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1996 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)1997 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1998 {
1999 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
2000 }
2001 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)2002 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
2003 {
2004 	int i;
2005 
2006 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
2007 		ethtool_puts(data, mlx5e_pme_status_desc[i].format);
2008 
2009 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
2010 		ethtool_puts(data, mlx5e_pme_error_desc[i].format);
2011 }
2012 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)2013 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
2014 {
2015 	struct mlx5_pme_stats pme_stats;
2016 	int i;
2017 
2018 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
2019 
2020 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
2021 		mlx5e_ethtool_put_stat(
2022 			data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
2023 						   mlx5e_pme_status_desc, i));
2024 
2025 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
2026 		mlx5e_ethtool_put_stat(
2027 			data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
2028 						   mlx5e_pme_error_desc, i));
2029 }
2030 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)2031 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
2032 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)2033 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
2034 {
2035 	return mlx5e_ktls_get_count(priv);
2036 }
2037 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)2038 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
2039 {
2040 	mlx5e_ktls_get_strings(priv, data);
2041 }
2042 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)2043 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
2044 {
2045 	mlx5e_ktls_get_stats(priv, data);
2046 }
2047 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)2048 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
2049 
2050 static const struct counter_desc rq_stats_desc[] = {
2051 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
2052 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
2053 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
2054 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2055 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2056 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2057 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2058 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
2059 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
2060 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2061 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
2062 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
2063 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
2064 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
2065 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
2066 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
2067 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
2068 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
2069 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
2070 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
2071 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
2072 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2073 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
2074 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2075 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2076 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2077 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2078 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2079 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2080 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
2081 #ifdef CONFIG_MLX5_EN_ARFS
2082 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) },
2083 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) },
2084 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) },
2085 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) },
2086 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
2087 #endif
2088 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
2089 #ifdef CONFIG_PAGE_POOL_STATS
2090 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
2091 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
2092 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
2093 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2094 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2095 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2096 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2097 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2098 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2099 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2100 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2101 #endif
2102 #ifdef CONFIG_MLX5_EN_TLS
2103 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2104 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
2105 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2106 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2107 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2108 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2109 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
2110 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
2111 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2112 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2113 #endif
2114 };
2115 
2116 static const struct counter_desc sq_stats_desc[] = {
2117 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2118 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2119 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2120 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2121 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2122 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2123 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2124 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2125 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2126 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2127 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) },
2128 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2129 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2130 #ifdef CONFIG_MLX5_EN_TLS
2131 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2132 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2133 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2134 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2135 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2136 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2137 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2138 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2139 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2140 #endif
2141 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2142 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2143 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2144 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2145 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2146 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2147 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2148 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2149 };
2150 
2151 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2152 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2153 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2154 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2155 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2156 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2157 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2158 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2159 };
2160 
2161 static const struct counter_desc xdpsq_stats_desc[] = {
2162 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2163 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2164 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2165 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2166 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2167 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2168 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2169 };
2170 
2171 static const struct counter_desc xskrq_stats_desc[] = {
2172 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2173 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2174 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2175 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2176 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2177 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2178 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2179 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2180 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2181 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2182 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2183 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2184 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2185 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2186 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2187 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2188 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2189 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2190 };
2191 
2192 static const struct counter_desc xsksq_stats_desc[] = {
2193 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2194 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2195 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2196 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2197 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2198 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2199 };
2200 
2201 static const struct counter_desc ch_stats_desc[] = {
2202 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2203 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2204 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2205 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2206 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2207 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2208 };
2209 
2210 static const struct counter_desc ptp_sq_stats_desc[] = {
2211 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2212 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2213 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2214 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2215 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2216 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2217 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2218 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2219 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2220 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2221 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2222 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2223 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2224 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2225 };
2226 
2227 static const struct counter_desc ptp_ch_stats_desc[] = {
2228 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2229 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2230 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2231 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2232 };
2233 
2234 static const struct counter_desc ptp_cq_stats_desc[] = {
2235 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2236 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2237 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2238 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2239 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
2240 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) },
2241 };
2242 
2243 static const struct counter_desc ptp_rq_stats_desc[] = {
2244 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2245 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2246 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2247 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2248 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2249 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2250 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2251 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2252 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2253 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2254 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2255 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2256 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2257 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2258 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2259 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2260 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2261 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2262 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2263 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2264 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2265 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2266 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2267 };
2268 
2269 static const struct counter_desc qos_sq_stats_desc[] = {
2270 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2271 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2272 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2273 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2274 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2275 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2276 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2277 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2278 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2279 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2280 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) },
2281 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2282 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2283 #ifdef CONFIG_MLX5_EN_TLS
2284 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2285 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2286 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2287 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2288 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2289 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2290 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2291 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2292 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2293 #endif
2294 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2295 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2296 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2297 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2298 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2299 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2300 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2301 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2302 };
2303 
2304 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
2305 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
2306 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
2307 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
2308 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
2309 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
2310 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
2311 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
2312 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
2313 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
2314 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
2315 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
2316 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)2317 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2318 {
2319 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2320 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2321 }
2322 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2323 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2324 {
2325 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2326 	u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2327 	int i, qid;
2328 
2329 	for (qid = 0; qid < max_qos_sqs; qid++)
2330 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2331 			ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
2332 }
2333 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2334 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2335 {
2336 	struct mlx5e_sq_stats **stats;
2337 	u16 max_qos_sqs;
2338 	int i, qid;
2339 
2340 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2341 	max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2342 	stats = READ_ONCE(priv->htb_qos_sq_stats);
2343 
2344 	for (qid = 0; qid < max_qos_sqs; qid++) {
2345 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2346 
2347 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2348 			mlx5e_ethtool_put_stat(
2349 				data,
2350 				MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i));
2351 	}
2352 }
2353 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2354 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2355 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2356 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2357 {
2358 	int num = NUM_PTP_CH_STATS;
2359 
2360 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2361 		return 0;
2362 
2363 	if (priv->tx_ptp_opened)
2364 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2365 	if (priv->rx_ptp_opened)
2366 		num += NUM_PTP_RQ_STATS;
2367 
2368 	return num;
2369 }
2370 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2371 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2372 {
2373 	int i, tc;
2374 
2375 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2376 		return;
2377 
2378 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2379 		ethtool_puts(data, ptp_ch_stats_desc[i].format);
2380 
2381 	if (priv->tx_ptp_opened) {
2382 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2383 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2384 				ethtool_sprintf(data,
2385 						ptp_sq_stats_desc[i].format,
2386 						tc);
2387 
2388 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2389 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2390 				ethtool_sprintf(data,
2391 						ptp_cq_stats_desc[i].format,
2392 						tc);
2393 	}
2394 	if (priv->rx_ptp_opened) {
2395 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2396 			ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
2397 					MLX5E_PTP_CHANNEL_IX);
2398 	}
2399 }
2400 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2401 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2402 {
2403 	int i, tc;
2404 
2405 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2406 		return;
2407 
2408 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2409 		mlx5e_ethtool_put_stat(
2410 			data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2411 						   ptp_ch_stats_desc, i));
2412 
2413 	if (priv->tx_ptp_opened) {
2414 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2415 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2416 				mlx5e_ethtool_put_stat(
2417 					data, MLX5E_READ_CTR64_CPU(
2418 						      &priv->ptp_stats.sq[tc],
2419 						      ptp_sq_stats_desc, i));
2420 
2421 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2422 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2423 				mlx5e_ethtool_put_stat(
2424 					data, MLX5E_READ_CTR64_CPU(
2425 						      &priv->ptp_stats.cq[tc],
2426 						      ptp_cq_stats_desc, i));
2427 	}
2428 	if (priv->rx_ptp_opened) {
2429 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2430 			mlx5e_ethtool_put_stat(
2431 				data,
2432 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2433 						     ptp_rq_stats_desc, i));
2434 	}
2435 }
2436 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2437 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2438 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2439 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2440 {
2441 	int max_nch = priv->stats_nch;
2442 
2443 	return (NUM_RQ_STATS * max_nch) +
2444 	       (NUM_CH_STATS * max_nch) +
2445 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2446 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2447 	       (NUM_XDPSQ_STATS * max_nch) +
2448 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2449 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2450 }
2451 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2452 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2453 {
2454 	bool is_xsk = priv->xsk.ever_used;
2455 	int max_nch = priv->stats_nch;
2456 	int i, j, tc;
2457 
2458 	for (i = 0; i < max_nch; i++)
2459 		for (j = 0; j < NUM_CH_STATS; j++)
2460 			ethtool_sprintf(data, ch_stats_desc[j].format, i);
2461 
2462 	for (i = 0; i < max_nch; i++) {
2463 		for (j = 0; j < NUM_RQ_STATS; j++)
2464 			ethtool_sprintf(data, rq_stats_desc[j].format, i);
2465 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2466 			ethtool_sprintf(data, xskrq_stats_desc[j].format, i);
2467 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2468 			ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i);
2469 	}
2470 
2471 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2472 		for (i = 0; i < max_nch; i++)
2473 			for (j = 0; j < NUM_SQ_STATS; j++)
2474 				ethtool_sprintf(data, sq_stats_desc[j].format,
2475 						i + tc * max_nch);
2476 
2477 	for (i = 0; i < max_nch; i++) {
2478 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2479 			ethtool_sprintf(data, xsksq_stats_desc[j].format, i);
2480 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2481 			ethtool_sprintf(data, xdpsq_stats_desc[j].format, i);
2482 	}
2483 }
2484 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2485 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2486 {
2487 	bool is_xsk = priv->xsk.ever_used;
2488 	int max_nch = priv->stats_nch;
2489 	int i, j, tc;
2490 
2491 	for (i = 0; i < max_nch; i++)
2492 		for (j = 0; j < NUM_CH_STATS; j++)
2493 			mlx5e_ethtool_put_stat(
2494 				data, MLX5E_READ_CTR64_CPU(
2495 					      &priv->channel_stats[i]->ch,
2496 					      ch_stats_desc, j));
2497 
2498 	for (i = 0; i < max_nch; i++) {
2499 		for (j = 0; j < NUM_RQ_STATS; j++)
2500 			mlx5e_ethtool_put_stat(
2501 				data, MLX5E_READ_CTR64_CPU(
2502 					      &priv->channel_stats[i]->rq,
2503 					      rq_stats_desc, j));
2504 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2505 			mlx5e_ethtool_put_stat(
2506 				data, MLX5E_READ_CTR64_CPU(
2507 					      &priv->channel_stats[i]->xskrq,
2508 					      xskrq_stats_desc, j));
2509 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2510 			mlx5e_ethtool_put_stat(
2511 				data, MLX5E_READ_CTR64_CPU(
2512 					      &priv->channel_stats[i]->rq_xdpsq,
2513 					      rq_xdpsq_stats_desc, j));
2514 	}
2515 
2516 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2517 		for (i = 0; i < max_nch; i++)
2518 			for (j = 0; j < NUM_SQ_STATS; j++)
2519 				mlx5e_ethtool_put_stat(
2520 					data,
2521 					MLX5E_READ_CTR64_CPU(
2522 						&priv->channel_stats[i]->sq[tc],
2523 						sq_stats_desc, j));
2524 
2525 	for (i = 0; i < max_nch; i++) {
2526 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2527 			mlx5e_ethtool_put_stat(
2528 				data, MLX5E_READ_CTR64_CPU(
2529 					      &priv->channel_stats[i]->xsksq,
2530 					      xsksq_stats_desc, j));
2531 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2532 			mlx5e_ethtool_put_stat(
2533 				data, MLX5E_READ_CTR64_CPU(
2534 					      &priv->channel_stats[i]->xdpsq,
2535 					      xdpsq_stats_desc, j));
2536 	}
2537 }
2538 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2539 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2540 
2541 MLX5E_DEFINE_STATS_GRP(sw, 0);
2542 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2543 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2544 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2545 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2546 MLX5E_DEFINE_STATS_GRP(2863, 0);
2547 MLX5E_DEFINE_STATS_GRP(2819, 0);
2548 MLX5E_DEFINE_STATS_GRP(phy, 0);
2549 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2550 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2551 MLX5E_DEFINE_STATS_GRP(pme, 0);
2552 MLX5E_DEFINE_STATS_GRP(channels, 0);
2553 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2554 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2555 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2556 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2557 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2558 
2559 /* The stats groups order is opposite to the update_stats() order calls */
2560 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2561 	&MLX5E_STATS_GRP(sw),
2562 	&MLX5E_STATS_GRP(qcnt),
2563 	&MLX5E_STATS_GRP(vnic_env),
2564 	&MLX5E_STATS_GRP(vport),
2565 	&MLX5E_STATS_GRP(802_3),
2566 	&MLX5E_STATS_GRP(2863),
2567 	&MLX5E_STATS_GRP(2819),
2568 	&MLX5E_STATS_GRP(phy),
2569 	&MLX5E_STATS_GRP(eth_ext),
2570 	&MLX5E_STATS_GRP(pcie),
2571 	&MLX5E_STATS_GRP(per_prio),
2572 	&MLX5E_STATS_GRP(pme),
2573 #ifdef CONFIG_MLX5_EN_IPSEC
2574 	&MLX5E_STATS_GRP(ipsec_hw),
2575 	&MLX5E_STATS_GRP(ipsec_sw),
2576 #endif
2577 	&MLX5E_STATS_GRP(tls),
2578 	&MLX5E_STATS_GRP(channels),
2579 	&MLX5E_STATS_GRP(per_port_buff_congest),
2580 	&MLX5E_STATS_GRP(ptp),
2581 	&MLX5E_STATS_GRP(qos),
2582 #ifdef CONFIG_MLX5_MACSEC
2583 	&MLX5E_STATS_GRP(macsec_hw),
2584 #endif
2585 };
2586 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2587 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2588 {
2589 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2590 }
2591