xref: /linux/drivers/net/ethernet/microsoft/mana/mana_ethtool.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7 
8 #include <net/mana/mana.h>
9 
10 static const struct {
11 	char name[ETH_GSTRING_LEN];
12 	u16 offset;
13 } mana_eth_stats[] = {
14 	{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
15 	{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
16 	{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
17 					   hc_rx_discards_no_wqe)},
18 	{"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
19 					      hc_rx_err_vport_disabled)},
20 	{"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
21 	{"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
22 				      hc_rx_ucast_pkts)},
23 	{"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
24 				       hc_rx_ucast_bytes)},
25 	{"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
26 				      hc_rx_bcast_pkts)},
27 	{"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
28 				       hc_rx_bcast_bytes)},
29 	{"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
30 			hc_rx_mcast_pkts)},
31 	{"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
32 				       hc_rx_mcast_bytes)},
33 	{"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
34 					   hc_tx_err_gf_disabled)},
35 	{"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
36 					      hc_tx_err_vport_disabled)},
37 	{"hc_tx_err_inval_vportoffset_pkt",
38 	 offsetof(struct mana_ethtool_stats,
39 		  hc_tx_err_inval_vportoffset_pkt)},
40 	{"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
41 						hc_tx_err_vlan_enforcement)},
42 	{"hc_tx_err_eth_type_enforcement",
43 	 offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
44 	{"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
45 					      hc_tx_err_sa_enforcement)},
46 	{"hc_tx_err_sqpdid_enforcement",
47 	 offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
48 	{"hc_tx_err_cqpdid_enforcement",
49 	 offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
50 	{"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
51 					     hc_tx_err_mtu_violation)},
52 	{"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
53 					 hc_tx_err_inval_oob)},
54 	{"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
55 				    hc_tx_err_gdma)},
56 	{"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
57 	{"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
58 					hc_tx_ucast_pkts)},
59 	{"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
60 					hc_tx_ucast_bytes)},
61 	{"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
62 					hc_tx_bcast_pkts)},
63 	{"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
64 					hc_tx_bcast_bytes)},
65 	{"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
66 					hc_tx_mcast_pkts)},
67 	{"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
68 					hc_tx_mcast_bytes)},
69 	{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
70 	{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
71 					tx_cqe_unknown_type)},
72 	{"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
73 					rx_coalesced_err)},
74 	{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
75 					rx_cqe_unknown_type)},
76 };
77 
78 static int mana_get_sset_count(struct net_device *ndev, int stringset)
79 {
80 	struct mana_port_context *apc = netdev_priv(ndev);
81 	unsigned int num_queues = apc->num_queues;
82 
83 	if (stringset != ETH_SS_STATS)
84 		return -EINVAL;
85 
86 	return ARRAY_SIZE(mana_eth_stats) + num_queues *
87 				(MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
88 }
89 
90 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
91 {
92 	struct mana_port_context *apc = netdev_priv(ndev);
93 	unsigned int num_queues = apc->num_queues;
94 	u8 *p = data;
95 	int i;
96 
97 	if (stringset != ETH_SS_STATS)
98 		return;
99 
100 	for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
101 		memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
102 		p += ETH_GSTRING_LEN;
103 	}
104 
105 	for (i = 0; i < num_queues; i++) {
106 		sprintf(p, "rx_%d_packets", i);
107 		p += ETH_GSTRING_LEN;
108 		sprintf(p, "rx_%d_bytes", i);
109 		p += ETH_GSTRING_LEN;
110 		sprintf(p, "rx_%d_xdp_drop", i);
111 		p += ETH_GSTRING_LEN;
112 		sprintf(p, "rx_%d_xdp_tx", i);
113 		p += ETH_GSTRING_LEN;
114 		sprintf(p, "rx_%d_xdp_redirect", i);
115 		p += ETH_GSTRING_LEN;
116 	}
117 
118 	for (i = 0; i < num_queues; i++) {
119 		sprintf(p, "tx_%d_packets", i);
120 		p += ETH_GSTRING_LEN;
121 		sprintf(p, "tx_%d_bytes", i);
122 		p += ETH_GSTRING_LEN;
123 		sprintf(p, "tx_%d_xdp_xmit", i);
124 		p += ETH_GSTRING_LEN;
125 		sprintf(p, "tx_%d_tso_packets", i);
126 		p += ETH_GSTRING_LEN;
127 		sprintf(p, "tx_%d_tso_bytes", i);
128 		p += ETH_GSTRING_LEN;
129 		sprintf(p, "tx_%d_tso_inner_packets", i);
130 		p += ETH_GSTRING_LEN;
131 		sprintf(p, "tx_%d_tso_inner_bytes", i);
132 		p += ETH_GSTRING_LEN;
133 		sprintf(p, "tx_%d_long_pkt_fmt", i);
134 		p += ETH_GSTRING_LEN;
135 		sprintf(p, "tx_%d_short_pkt_fmt", i);
136 		p += ETH_GSTRING_LEN;
137 		sprintf(p, "tx_%d_csum_partial", i);
138 		p += ETH_GSTRING_LEN;
139 		sprintf(p, "tx_%d_mana_map_err", i);
140 		p += ETH_GSTRING_LEN;
141 	}
142 }
143 
144 static void mana_get_ethtool_stats(struct net_device *ndev,
145 				   struct ethtool_stats *e_stats, u64 *data)
146 {
147 	struct mana_port_context *apc = netdev_priv(ndev);
148 	unsigned int num_queues = apc->num_queues;
149 	void *eth_stats = &apc->eth_stats;
150 	struct mana_stats_rx *rx_stats;
151 	struct mana_stats_tx *tx_stats;
152 	unsigned int start;
153 	u64 packets, bytes;
154 	u64 xdp_redirect;
155 	u64 xdp_xmit;
156 	u64 xdp_drop;
157 	u64 xdp_tx;
158 	u64 tso_packets;
159 	u64 tso_bytes;
160 	u64 tso_inner_packets;
161 	u64 tso_inner_bytes;
162 	u64 long_pkt_fmt;
163 	u64 short_pkt_fmt;
164 	u64 csum_partial;
165 	u64 mana_map_err;
166 	int q, i = 0;
167 
168 	if (!apc->port_is_up)
169 		return;
170 	/* we call mana function to update stats from GDMA */
171 	mana_query_gf_stats(apc);
172 
173 	for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
174 		data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
175 
176 	for (q = 0; q < num_queues; q++) {
177 		rx_stats = &apc->rxqs[q]->stats;
178 
179 		do {
180 			start = u64_stats_fetch_begin(&rx_stats->syncp);
181 			packets = rx_stats->packets;
182 			bytes = rx_stats->bytes;
183 			xdp_drop = rx_stats->xdp_drop;
184 			xdp_tx = rx_stats->xdp_tx;
185 			xdp_redirect = rx_stats->xdp_redirect;
186 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
187 
188 		data[i++] = packets;
189 		data[i++] = bytes;
190 		data[i++] = xdp_drop;
191 		data[i++] = xdp_tx;
192 		data[i++] = xdp_redirect;
193 	}
194 
195 	for (q = 0; q < num_queues; q++) {
196 		tx_stats = &apc->tx_qp[q].txq.stats;
197 
198 		do {
199 			start = u64_stats_fetch_begin(&tx_stats->syncp);
200 			packets = tx_stats->packets;
201 			bytes = tx_stats->bytes;
202 			xdp_xmit = tx_stats->xdp_xmit;
203 			tso_packets = tx_stats->tso_packets;
204 			tso_bytes = tx_stats->tso_bytes;
205 			tso_inner_packets = tx_stats->tso_inner_packets;
206 			tso_inner_bytes = tx_stats->tso_inner_bytes;
207 			long_pkt_fmt = tx_stats->long_pkt_fmt;
208 			short_pkt_fmt = tx_stats->short_pkt_fmt;
209 			csum_partial = tx_stats->csum_partial;
210 			mana_map_err = tx_stats->mana_map_err;
211 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
212 
213 		data[i++] = packets;
214 		data[i++] = bytes;
215 		data[i++] = xdp_xmit;
216 		data[i++] = tso_packets;
217 		data[i++] = tso_bytes;
218 		data[i++] = tso_inner_packets;
219 		data[i++] = tso_inner_bytes;
220 		data[i++] = long_pkt_fmt;
221 		data[i++] = short_pkt_fmt;
222 		data[i++] = csum_partial;
223 		data[i++] = mana_map_err;
224 	}
225 }
226 
227 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
228 			  u32 *rules)
229 {
230 	struct mana_port_context *apc = netdev_priv(ndev);
231 
232 	switch (cmd->cmd) {
233 	case ETHTOOL_GRXRINGS:
234 		cmd->data = apc->num_queues;
235 		return 0;
236 	}
237 
238 	return -EOPNOTSUPP;
239 }
240 
241 static u32 mana_get_rxfh_key_size(struct net_device *ndev)
242 {
243 	return MANA_HASH_KEY_SIZE;
244 }
245 
246 static u32 mana_rss_indir_size(struct net_device *ndev)
247 {
248 	return MANA_INDIRECT_TABLE_SIZE;
249 }
250 
251 static int mana_get_rxfh(struct net_device *ndev,
252 			 struct ethtool_rxfh_param *rxfh)
253 {
254 	struct mana_port_context *apc = netdev_priv(ndev);
255 	int i;
256 
257 	rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
258 
259 	if (rxfh->indir) {
260 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
261 			rxfh->indir[i] = apc->indir_table[i];
262 	}
263 
264 	if (rxfh->key)
265 		memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
266 
267 	return 0;
268 }
269 
270 static int mana_set_rxfh(struct net_device *ndev,
271 			 struct ethtool_rxfh_param *rxfh,
272 			 struct netlink_ext_ack *extack)
273 {
274 	struct mana_port_context *apc = netdev_priv(ndev);
275 	bool update_hash = false, update_table = false;
276 	u32 save_table[MANA_INDIRECT_TABLE_SIZE];
277 	u8 save_key[MANA_HASH_KEY_SIZE];
278 	int i, err;
279 
280 	if (!apc->port_is_up)
281 		return -EOPNOTSUPP;
282 
283 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
284 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
285 		return -EOPNOTSUPP;
286 
287 	if (rxfh->indir) {
288 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
289 			if (rxfh->indir[i] >= apc->num_queues)
290 				return -EINVAL;
291 
292 		update_table = true;
293 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
294 			save_table[i] = apc->indir_table[i];
295 			apc->indir_table[i] = rxfh->indir[i];
296 		}
297 	}
298 
299 	if (rxfh->key) {
300 		update_hash = true;
301 		memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
302 		memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
303 	}
304 
305 	err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
306 
307 	if (err) { /* recover to original values */
308 		if (update_table) {
309 			for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
310 				apc->indir_table[i] = save_table[i];
311 		}
312 
313 		if (update_hash)
314 			memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
315 
316 		mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
317 	}
318 
319 	return err;
320 }
321 
322 static void mana_get_channels(struct net_device *ndev,
323 			      struct ethtool_channels *channel)
324 {
325 	struct mana_port_context *apc = netdev_priv(ndev);
326 
327 	channel->max_combined = apc->max_queues;
328 	channel->combined_count = apc->num_queues;
329 }
330 
331 static int mana_set_channels(struct net_device *ndev,
332 			     struct ethtool_channels *channels)
333 {
334 	struct mana_port_context *apc = netdev_priv(ndev);
335 	unsigned int new_count = channels->combined_count;
336 	unsigned int old_count = apc->num_queues;
337 	int err, err2;
338 
339 	err = mana_detach(ndev, false);
340 	if (err) {
341 		netdev_err(ndev, "mana_detach failed: %d\n", err);
342 		return err;
343 	}
344 
345 	apc->num_queues = new_count;
346 	err = mana_attach(ndev);
347 	if (!err)
348 		return 0;
349 
350 	netdev_err(ndev, "mana_attach failed: %d\n", err);
351 
352 	/* Try to roll it back to the old configuration. */
353 	apc->num_queues = old_count;
354 	err2 = mana_attach(ndev);
355 	if (err2)
356 		netdev_err(ndev, "mana re-attach failed: %d\n", err2);
357 
358 	return err;
359 }
360 
361 const struct ethtool_ops mana_ethtool_ops = {
362 	.get_ethtool_stats	= mana_get_ethtool_stats,
363 	.get_sset_count		= mana_get_sset_count,
364 	.get_strings		= mana_get_strings,
365 	.get_rxnfc		= mana_get_rxnfc,
366 	.get_rxfh_key_size	= mana_get_rxfh_key_size,
367 	.get_rxfh_indir_size	= mana_rss_indir_size,
368 	.get_rxfh		= mana_get_rxfh,
369 	.set_rxfh		= mana_set_rxfh,
370 	.get_channels		= mana_get_channels,
371 	.set_channels		= mana_set_channels,
372 };
373