xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/pci.h>
5 #include <linux/phy.h>
6 #include <linux/ethtool.h>
7 
8 #include "wx_type.h"
9 #include "wx_ethtool.h"
10 #include "wx_hw.h"
11 #include "wx_lib.h"
12 
13 struct wx_stats {
14 	char stat_string[ETH_GSTRING_LEN];
15 	size_t sizeof_stat;
16 	off_t stat_offset;
17 };
18 
19 #define WX_STAT(str, m) { \
20 		.stat_string = str, \
21 		.sizeof_stat = sizeof(((struct wx *)0)->m), \
22 		.stat_offset = offsetof(struct wx, m) }
23 
24 static const struct wx_stats wx_gstrings_stats[] = {
25 	WX_STAT("rx_dma_pkts", stats.gprc),
26 	WX_STAT("tx_dma_pkts", stats.gptc),
27 	WX_STAT("rx_dma_bytes", stats.gorc),
28 	WX_STAT("tx_dma_bytes", stats.gotc),
29 	WX_STAT("rx_total_pkts", stats.tpr),
30 	WX_STAT("tx_total_pkts", stats.tpt),
31 	WX_STAT("rx_long_length_count", stats.roc),
32 	WX_STAT("rx_short_length_count", stats.ruc),
33 	WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
34 	WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
35 	WX_STAT("os2bmc_tx_by_host", stats.o2bspc),
36 	WX_STAT("os2bmc_rx_by_host", stats.b2ogprc),
37 	WX_STAT("rx_no_dma_resources", stats.rdmdrop),
38 	WX_STAT("tx_busy", tx_busy),
39 	WX_STAT("non_eop_descs", non_eop_descs),
40 	WX_STAT("tx_restart_queue", restart_queue),
41 	WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
42 	WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
43 	WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
44 };
45 
46 static const struct wx_stats wx_gstrings_fdir_stats[] = {
47 	WX_STAT("fdir_match", stats.fdirmatch),
48 	WX_STAT("fdir_miss", stats.fdirmiss),
49 };
50 
51 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so
52  * we set the num_rx_queues to evaluate to num_tx_queues. This is
53  * used because we do not have a good way to get the max number of
54  * rx queues with CONFIG_RPS disabled.
55  */
56 #define WX_NUM_RX_QUEUES netdev->num_tx_queues
57 #define WX_NUM_TX_QUEUES netdev->num_tx_queues
58 
59 #define WX_QUEUE_STATS_LEN ( \
60 		(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
61 		(sizeof(struct wx_queue_stats) / sizeof(u64)))
62 #define WX_GLOBAL_STATS_LEN  ARRAY_SIZE(wx_gstrings_stats)
63 #define WX_FDIR_STATS_LEN  ARRAY_SIZE(wx_gstrings_fdir_stats)
64 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
65 
wx_get_sset_count(struct net_device * netdev,int sset)66 int wx_get_sset_count(struct net_device *netdev, int sset)
67 {
68 	struct wx *wx = netdev_priv(netdev);
69 
70 	switch (sset) {
71 	case ETH_SS_STATS:
72 		return (wx->mac.type == wx_mac_sp) ?
73 			WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
74 	default:
75 		return -EOPNOTSUPP;
76 	}
77 }
78 EXPORT_SYMBOL(wx_get_sset_count);
79 
wx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)80 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
81 {
82 	struct wx *wx = netdev_priv(netdev);
83 	u8 *p = data;
84 	int i;
85 
86 	switch (stringset) {
87 	case ETH_SS_STATS:
88 		for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
89 			ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
90 		if (wx->mac.type == wx_mac_sp) {
91 			for (i = 0; i < WX_FDIR_STATS_LEN; i++)
92 				ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
93 		}
94 		for (i = 0; i < netdev->num_tx_queues; i++) {
95 			ethtool_sprintf(&p, "tx_queue_%u_packets", i);
96 			ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
97 		}
98 		for (i = 0; i < WX_NUM_RX_QUEUES; i++) {
99 			ethtool_sprintf(&p, "rx_queue_%u_packets", i);
100 			ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
101 		}
102 		break;
103 	}
104 }
105 EXPORT_SYMBOL(wx_get_strings);
106 
wx_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)107 void wx_get_ethtool_stats(struct net_device *netdev,
108 			  struct ethtool_stats *stats, u64 *data)
109 {
110 	struct wx *wx = netdev_priv(netdev);
111 	struct wx_ring *ring;
112 	unsigned int start;
113 	int i, j, k;
114 	char *p;
115 
116 	wx_update_stats(wx);
117 
118 	for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) {
119 		p = (char *)wx + wx_gstrings_stats[i].stat_offset;
120 		data[i] = (wx_gstrings_stats[i].sizeof_stat ==
121 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
122 	}
123 
124 	if (wx->mac.type == wx_mac_sp) {
125 		for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
126 			p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
127 			data[i++] = *(u64 *)p;
128 		}
129 	}
130 
131 	for (j = 0; j < netdev->num_tx_queues; j++) {
132 		ring = wx->tx_ring[j];
133 		if (!ring) {
134 			data[i++] = 0;
135 			data[i++] = 0;
136 			continue;
137 		}
138 
139 		do {
140 			start = u64_stats_fetch_begin(&ring->syncp);
141 			data[i] = ring->stats.packets;
142 			data[i + 1] = ring->stats.bytes;
143 		} while (u64_stats_fetch_retry(&ring->syncp, start));
144 		i += 2;
145 	}
146 	for (j = 0; j < WX_NUM_RX_QUEUES; j++) {
147 		ring = wx->rx_ring[j];
148 		if (!ring) {
149 			data[i++] = 0;
150 			data[i++] = 0;
151 			continue;
152 		}
153 
154 		do {
155 			start = u64_stats_fetch_begin(&ring->syncp);
156 			data[i] = ring->stats.packets;
157 			data[i + 1] = ring->stats.bytes;
158 		} while (u64_stats_fetch_retry(&ring->syncp, start));
159 		i += 2;
160 	}
161 }
162 EXPORT_SYMBOL(wx_get_ethtool_stats);
163 
wx_get_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * mac_stats)164 void wx_get_mac_stats(struct net_device *netdev,
165 		      struct ethtool_eth_mac_stats *mac_stats)
166 {
167 	struct wx *wx = netdev_priv(netdev);
168 	struct wx_hw_stats *hwstats;
169 
170 	wx_update_stats(wx);
171 
172 	hwstats = &wx->stats;
173 	mac_stats->MulticastFramesXmittedOK = hwstats->mptc;
174 	mac_stats->BroadcastFramesXmittedOK = hwstats->bptc;
175 	mac_stats->MulticastFramesReceivedOK = hwstats->mprc;
176 	mac_stats->BroadcastFramesReceivedOK = hwstats->bprc;
177 }
178 EXPORT_SYMBOL(wx_get_mac_stats);
179 
wx_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * stats)180 void wx_get_pause_stats(struct net_device *netdev,
181 			struct ethtool_pause_stats *stats)
182 {
183 	struct wx *wx = netdev_priv(netdev);
184 	struct wx_hw_stats *hwstats;
185 
186 	wx_update_stats(wx);
187 
188 	hwstats = &wx->stats;
189 	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
190 	stats->rx_pause_frames = hwstats->lxonoffrxc;
191 }
192 EXPORT_SYMBOL(wx_get_pause_stats);
193 
wx_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)194 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
195 {
196 	unsigned int stats_len = WX_STATS_LEN;
197 	struct wx *wx = netdev_priv(netdev);
198 
199 	if (wx->mac.type == wx_mac_sp)
200 		stats_len += WX_FDIR_STATS_LEN;
201 
202 	strscpy(info->driver, wx->driver_name, sizeof(info->driver));
203 	strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
204 	strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
205 	if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
206 		info->n_stats = stats_len -
207 				   (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
208 				   (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
209 	} else {
210 		info->n_stats = stats_len;
211 	}
212 }
213 EXPORT_SYMBOL(wx_get_drvinfo);
214 
wx_nway_reset(struct net_device * netdev)215 int wx_nway_reset(struct net_device *netdev)
216 {
217 	struct wx *wx = netdev_priv(netdev);
218 
219 	return phylink_ethtool_nway_reset(wx->phylink);
220 }
221 EXPORT_SYMBOL(wx_nway_reset);
222 
wx_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)223 int wx_get_link_ksettings(struct net_device *netdev,
224 			  struct ethtool_link_ksettings *cmd)
225 {
226 	struct wx *wx = netdev_priv(netdev);
227 
228 	return phylink_ethtool_ksettings_get(wx->phylink, cmd);
229 }
230 EXPORT_SYMBOL(wx_get_link_ksettings);
231 
wx_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)232 int wx_set_link_ksettings(struct net_device *netdev,
233 			  const struct ethtool_link_ksettings *cmd)
234 {
235 	struct wx *wx = netdev_priv(netdev);
236 
237 	return phylink_ethtool_ksettings_set(wx->phylink, cmd);
238 }
239 EXPORT_SYMBOL(wx_set_link_ksettings);
240 
wx_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)241 void wx_get_pauseparam(struct net_device *netdev,
242 		       struct ethtool_pauseparam *pause)
243 {
244 	struct wx *wx = netdev_priv(netdev);
245 
246 	phylink_ethtool_get_pauseparam(wx->phylink, pause);
247 }
248 EXPORT_SYMBOL(wx_get_pauseparam);
249 
wx_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)250 int wx_set_pauseparam(struct net_device *netdev,
251 		      struct ethtool_pauseparam *pause)
252 {
253 	struct wx *wx = netdev_priv(netdev);
254 
255 	return phylink_ethtool_set_pauseparam(wx->phylink, pause);
256 }
257 EXPORT_SYMBOL(wx_set_pauseparam);
258 
wx_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)259 void wx_get_ringparam(struct net_device *netdev,
260 		      struct ethtool_ringparam *ring,
261 		      struct kernel_ethtool_ringparam *kernel_ring,
262 		      struct netlink_ext_ack *extack)
263 {
264 	struct wx *wx = netdev_priv(netdev);
265 
266 	ring->rx_max_pending = WX_MAX_RXD;
267 	ring->tx_max_pending = WX_MAX_TXD;
268 	ring->rx_mini_max_pending = 0;
269 	ring->rx_jumbo_max_pending = 0;
270 	ring->rx_pending = wx->rx_ring_count;
271 	ring->tx_pending = wx->tx_ring_count;
272 	ring->rx_mini_pending = 0;
273 	ring->rx_jumbo_pending = 0;
274 }
275 EXPORT_SYMBOL(wx_get_ringparam);
276 
wx_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)277 int wx_get_coalesce(struct net_device *netdev,
278 		    struct ethtool_coalesce *ec,
279 		    struct kernel_ethtool_coalesce *kernel_coal,
280 		    struct netlink_ext_ack *extack)
281 {
282 	struct wx *wx = netdev_priv(netdev);
283 
284 	ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
285 	/* only valid if in constant ITR mode */
286 	if (wx->rx_itr_setting <= 1)
287 		ec->rx_coalesce_usecs = wx->rx_itr_setting;
288 	else
289 		ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
290 
291 	/* if in mixed tx/rx queues per vector mode, report only rx settings */
292 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
293 		return 0;
294 
295 	/* only valid if in constant ITR mode */
296 	if (wx->tx_itr_setting <= 1)
297 		ec->tx_coalesce_usecs = wx->tx_itr_setting;
298 	else
299 		ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
300 
301 	return 0;
302 }
303 EXPORT_SYMBOL(wx_get_coalesce);
304 
wx_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)305 int wx_set_coalesce(struct net_device *netdev,
306 		    struct ethtool_coalesce *ec,
307 		    struct kernel_ethtool_coalesce *kernel_coal,
308 		    struct netlink_ext_ack *extack)
309 {
310 	struct wx *wx = netdev_priv(netdev);
311 	u16 tx_itr_param, rx_itr_param;
312 	struct wx_q_vector *q_vector;
313 	u16 max_eitr;
314 	int i;
315 
316 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
317 		/* reject Tx specific changes in case of mixed RxTx vectors */
318 		if (ec->tx_coalesce_usecs)
319 			return -EOPNOTSUPP;
320 	}
321 
322 	if (ec->tx_max_coalesced_frames_irq)
323 		wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
324 
325 	if (wx->mac.type == wx_mac_sp)
326 		max_eitr = WX_SP_MAX_EITR;
327 	else
328 		max_eitr = WX_EM_MAX_EITR;
329 
330 	if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
331 	    (ec->tx_coalesce_usecs > (max_eitr >> 2)))
332 		return -EINVAL;
333 
334 	if (ec->rx_coalesce_usecs > 1)
335 		wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
336 	else
337 		wx->rx_itr_setting = ec->rx_coalesce_usecs;
338 
339 	if (wx->rx_itr_setting == 1)
340 		rx_itr_param = WX_20K_ITR;
341 	else
342 		rx_itr_param = wx->rx_itr_setting;
343 
344 	if (ec->tx_coalesce_usecs > 1)
345 		wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
346 	else
347 		wx->tx_itr_setting = ec->tx_coalesce_usecs;
348 
349 	if (wx->tx_itr_setting == 1) {
350 		if (wx->mac.type == wx_mac_sp)
351 			tx_itr_param = WX_12K_ITR;
352 		else
353 			tx_itr_param = WX_20K_ITR;
354 	} else {
355 		tx_itr_param = wx->tx_itr_setting;
356 	}
357 
358 	/* mixed Rx/Tx */
359 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
360 		wx->tx_itr_setting = wx->rx_itr_setting;
361 
362 	for (i = 0; i < wx->num_q_vectors; i++) {
363 		q_vector = wx->q_vector[i];
364 		if (q_vector->tx.count && !q_vector->rx.count)
365 			/* tx only */
366 			q_vector->itr = tx_itr_param;
367 		else
368 			/* rx only or mixed */
369 			q_vector->itr = rx_itr_param;
370 		wx_write_eitr(q_vector);
371 	}
372 
373 	return 0;
374 }
375 EXPORT_SYMBOL(wx_set_coalesce);
376 
wx_max_channels(struct wx * wx)377 static unsigned int wx_max_channels(struct wx *wx)
378 {
379 	unsigned int max_combined;
380 
381 	if (!wx->msix_q_entries) {
382 		/* We only support one q_vector without MSI-X */
383 		max_combined = 1;
384 	} else {
385 		/* support up to max allowed queues with RSS */
386 		if (wx->mac.type == wx_mac_sp)
387 			max_combined = 63;
388 		else
389 			max_combined = 8;
390 	}
391 
392 	return max_combined;
393 }
394 
wx_get_channels(struct net_device * dev,struct ethtool_channels * ch)395 void wx_get_channels(struct net_device *dev,
396 		     struct ethtool_channels *ch)
397 {
398 	struct wx *wx = netdev_priv(dev);
399 
400 	/* report maximum channels */
401 	ch->max_combined = wx_max_channels(wx);
402 
403 	/* report info for other vector */
404 	if (wx->msix_q_entries) {
405 		ch->max_other = 1;
406 		ch->other_count = 1;
407 	}
408 
409 	/* record RSS queues */
410 	ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
411 
412 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
413 		ch->combined_count = wx->ring_feature[RING_F_FDIR].indices;
414 }
415 EXPORT_SYMBOL(wx_get_channels);
416 
wx_set_channels(struct net_device * dev,struct ethtool_channels * ch)417 int wx_set_channels(struct net_device *dev,
418 		    struct ethtool_channels *ch)
419 {
420 	unsigned int count = ch->combined_count;
421 	struct wx *wx = netdev_priv(dev);
422 
423 	/* verify other_count has not changed */
424 	if (ch->other_count != 1)
425 		return -EINVAL;
426 
427 	/* verify the number of channels does not exceed hardware limits */
428 	if (count > wx_max_channels(wx))
429 		return -EINVAL;
430 
431 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
432 		wx->ring_feature[RING_F_FDIR].limit = count;
433 
434 	wx->ring_feature[RING_F_RSS].limit = count;
435 
436 	return 0;
437 }
438 EXPORT_SYMBOL(wx_set_channels);
439 
wx_get_msglevel(struct net_device * netdev)440 u32 wx_get_msglevel(struct net_device *netdev)
441 {
442 	struct wx *wx = netdev_priv(netdev);
443 
444 	return wx->msg_enable;
445 }
446 EXPORT_SYMBOL(wx_get_msglevel);
447 
wx_set_msglevel(struct net_device * netdev,u32 data)448 void wx_set_msglevel(struct net_device *netdev, u32 data)
449 {
450 	struct wx *wx = netdev_priv(netdev);
451 
452 	wx->msg_enable = data;
453 }
454 EXPORT_SYMBOL(wx_set_msglevel);
455