xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c (revision 0e50474fa514822e9d990874e554bf8043a201d7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/pci.h>
5 #include <linux/phy.h>
6 #include <linux/ethtool.h>
7 
8 #include "wx_type.h"
9 #include "wx_ethtool.h"
10 #include "wx_hw.h"
11 #include "wx_lib.h"
12 
13 struct wx_stats {
14 	char stat_string[ETH_GSTRING_LEN];
15 	size_t sizeof_stat;
16 	off_t stat_offset;
17 };
18 
19 #define WX_STAT(str, m) { \
20 		.stat_string = str, \
21 		.sizeof_stat = sizeof(((struct wx *)0)->m), \
22 		.stat_offset = offsetof(struct wx, m) }
23 
24 static const struct wx_stats wx_gstrings_stats[] = {
25 	WX_STAT("rx_dma_pkts", stats.gprc),
26 	WX_STAT("tx_dma_pkts", stats.gptc),
27 	WX_STAT("rx_dma_bytes", stats.gorc),
28 	WX_STAT("tx_dma_bytes", stats.gotc),
29 	WX_STAT("rx_total_pkts", stats.tpr),
30 	WX_STAT("tx_total_pkts", stats.tpt),
31 	WX_STAT("rx_long_length_count", stats.roc),
32 	WX_STAT("rx_short_length_count", stats.ruc),
33 	WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
34 	WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
35 	WX_STAT("os2bmc_tx_by_host", stats.o2bspc),
36 	WX_STAT("os2bmc_rx_by_host", stats.b2ogprc),
37 	WX_STAT("rx_no_dma_resources", stats.rdmdrop),
38 	WX_STAT("tx_busy", tx_busy),
39 	WX_STAT("non_eop_descs", non_eop_descs),
40 	WX_STAT("tx_restart_queue", restart_queue),
41 	WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
42 	WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
43 	WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
44 	WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
45 	WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
46 	WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
47 };
48 
49 static const struct wx_stats wx_gstrings_fdir_stats[] = {
50 	WX_STAT("fdir_match", stats.fdirmatch),
51 	WX_STAT("fdir_miss", stats.fdirmiss),
52 };
53 
54 static const struct wx_stats wx_gstrings_rsc_stats[] = {
55 	WX_STAT("rsc_aggregated", rsc_count),
56 	WX_STAT("rsc_flushed", rsc_flush),
57 };
58 
59 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so
60  * we set the num_rx_queues to evaluate to num_tx_queues. This is
61  * used because we do not have a good way to get the max number of
62  * rx queues with CONFIG_RPS disabled.
63  */
64 #define WX_NUM_RX_QUEUES netdev->num_tx_queues
65 #define WX_NUM_TX_QUEUES netdev->num_tx_queues
66 
67 #define WX_QUEUE_STATS_LEN ( \
68 		(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
69 		(sizeof(struct wx_queue_stats) / sizeof(u64)))
70 #define WX_GLOBAL_STATS_LEN  ARRAY_SIZE(wx_gstrings_stats)
71 #define WX_FDIR_STATS_LEN  ARRAY_SIZE(wx_gstrings_fdir_stats)
72 #define WX_RSC_STATS_LEN  ARRAY_SIZE(wx_gstrings_rsc_stats)
73 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
74 
75 int wx_get_sset_count(struct net_device *netdev, int sset)
76 {
77 	struct wx *wx = netdev_priv(netdev);
78 	int len = WX_STATS_LEN;
79 
80 	switch (sset) {
81 	case ETH_SS_STATS:
82 		if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
83 			len += WX_FDIR_STATS_LEN;
84 		if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags))
85 			len += WX_RSC_STATS_LEN;
86 		return len;
87 	default:
88 		return -EOPNOTSUPP;
89 	}
90 }
91 EXPORT_SYMBOL(wx_get_sset_count);
92 
93 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
94 {
95 	struct wx *wx = netdev_priv(netdev);
96 	u8 *p = data;
97 	int i;
98 
99 	switch (stringset) {
100 	case ETH_SS_STATS:
101 		for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
102 			ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
103 		if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
104 			for (i = 0; i < WX_FDIR_STATS_LEN; i++)
105 				ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
106 		}
107 		if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
108 			for (i = 0; i < WX_RSC_STATS_LEN; i++)
109 				ethtool_puts(&p, wx_gstrings_rsc_stats[i].stat_string);
110 		}
111 		for (i = 0; i < netdev->num_tx_queues; i++) {
112 			ethtool_sprintf(&p, "tx_queue_%u_packets", i);
113 			ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
114 		}
115 		for (i = 0; i < WX_NUM_RX_QUEUES; i++) {
116 			ethtool_sprintf(&p, "rx_queue_%u_packets", i);
117 			ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
118 		}
119 		break;
120 	}
121 }
122 EXPORT_SYMBOL(wx_get_strings);
123 
124 void wx_get_ethtool_stats(struct net_device *netdev,
125 			  struct ethtool_stats *stats, u64 *data)
126 {
127 	struct wx *wx = netdev_priv(netdev);
128 	struct wx_ring *ring;
129 	unsigned int start;
130 	int i, j, k;
131 	char *p;
132 
133 	wx_update_stats(wx);
134 
135 	for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) {
136 		p = (char *)wx + wx_gstrings_stats[i].stat_offset;
137 		data[i] = (wx_gstrings_stats[i].sizeof_stat ==
138 			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
139 	}
140 
141 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
142 		for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
143 			p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
144 			data[i++] = *(u64 *)p;
145 		}
146 	}
147 
148 	if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
149 		for (k = 0; k < WX_RSC_STATS_LEN; k++) {
150 			p = (char *)wx + wx_gstrings_rsc_stats[k].stat_offset;
151 			data[i++] = *(u64 *)p;
152 		}
153 	}
154 
155 	for (j = 0; j < netdev->num_tx_queues; j++) {
156 		ring = wx->tx_ring[j];
157 		if (!ring) {
158 			data[i++] = 0;
159 			data[i++] = 0;
160 			continue;
161 		}
162 
163 		do {
164 			start = u64_stats_fetch_begin(&ring->syncp);
165 			data[i] = ring->stats.packets;
166 			data[i + 1] = ring->stats.bytes;
167 		} while (u64_stats_fetch_retry(&ring->syncp, start));
168 		i += 2;
169 	}
170 	for (j = 0; j < WX_NUM_RX_QUEUES; j++) {
171 		ring = wx->rx_ring[j];
172 		if (!ring) {
173 			data[i++] = 0;
174 			data[i++] = 0;
175 			continue;
176 		}
177 
178 		do {
179 			start = u64_stats_fetch_begin(&ring->syncp);
180 			data[i] = ring->stats.packets;
181 			data[i + 1] = ring->stats.bytes;
182 		} while (u64_stats_fetch_retry(&ring->syncp, start));
183 		i += 2;
184 	}
185 }
186 EXPORT_SYMBOL(wx_get_ethtool_stats);
187 
188 void wx_get_mac_stats(struct net_device *netdev,
189 		      struct ethtool_eth_mac_stats *mac_stats)
190 {
191 	struct wx *wx = netdev_priv(netdev);
192 	struct wx_hw_stats *hwstats;
193 
194 	wx_update_stats(wx);
195 
196 	hwstats = &wx->stats;
197 	mac_stats->MulticastFramesXmittedOK = hwstats->mptc;
198 	mac_stats->BroadcastFramesXmittedOK = hwstats->bptc;
199 	mac_stats->MulticastFramesReceivedOK = hwstats->mprc;
200 	mac_stats->BroadcastFramesReceivedOK = hwstats->bprc;
201 }
202 EXPORT_SYMBOL(wx_get_mac_stats);
203 
204 void wx_get_pause_stats(struct net_device *netdev,
205 			struct ethtool_pause_stats *stats)
206 {
207 	struct wx *wx = netdev_priv(netdev);
208 	struct wx_hw_stats *hwstats;
209 
210 	wx_update_stats(wx);
211 
212 	hwstats = &wx->stats;
213 	stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
214 	stats->rx_pause_frames = hwstats->lxonoffrxc;
215 }
216 EXPORT_SYMBOL(wx_get_pause_stats);
217 
218 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
219 {
220 	unsigned int stats_len = WX_STATS_LEN;
221 	struct wx *wx = netdev_priv(netdev);
222 
223 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
224 		stats_len += WX_FDIR_STATS_LEN;
225 
226 	strscpy(info->driver, wx->driver_name, sizeof(info->driver));
227 	strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
228 	strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
229 	if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
230 		info->n_stats = stats_len -
231 				   (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
232 				   (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
233 	} else {
234 		info->n_stats = stats_len;
235 	}
236 }
237 EXPORT_SYMBOL(wx_get_drvinfo);
238 
239 int wx_nway_reset(struct net_device *netdev)
240 {
241 	struct wx *wx = netdev_priv(netdev);
242 
243 	if (wx->mac.type == wx_mac_aml40)
244 		return -EOPNOTSUPP;
245 
246 	return phylink_ethtool_nway_reset(wx->phylink);
247 }
248 EXPORT_SYMBOL(wx_nway_reset);
249 
250 int wx_get_link_ksettings(struct net_device *netdev,
251 			  struct ethtool_link_ksettings *cmd)
252 {
253 	struct wx *wx = netdev_priv(netdev);
254 
255 	return phylink_ethtool_ksettings_get(wx->phylink, cmd);
256 }
257 EXPORT_SYMBOL(wx_get_link_ksettings);
258 
259 int wx_set_link_ksettings(struct net_device *netdev,
260 			  const struct ethtool_link_ksettings *cmd)
261 {
262 	struct wx *wx = netdev_priv(netdev);
263 
264 	if (wx->mac.type == wx_mac_aml40)
265 		return -EOPNOTSUPP;
266 
267 	return phylink_ethtool_ksettings_set(wx->phylink, cmd);
268 }
269 EXPORT_SYMBOL(wx_set_link_ksettings);
270 
271 void wx_get_pauseparam(struct net_device *netdev,
272 		       struct ethtool_pauseparam *pause)
273 {
274 	struct wx *wx = netdev_priv(netdev);
275 
276 	if (wx->mac.type == wx_mac_aml40)
277 		return;
278 
279 	phylink_ethtool_get_pauseparam(wx->phylink, pause);
280 }
281 EXPORT_SYMBOL(wx_get_pauseparam);
282 
283 int wx_set_pauseparam(struct net_device *netdev,
284 		      struct ethtool_pauseparam *pause)
285 {
286 	struct wx *wx = netdev_priv(netdev);
287 
288 	if (wx->mac.type == wx_mac_aml40)
289 		return -EOPNOTSUPP;
290 
291 	return phylink_ethtool_set_pauseparam(wx->phylink, pause);
292 }
293 EXPORT_SYMBOL(wx_set_pauseparam);
294 
295 void wx_get_ringparam(struct net_device *netdev,
296 		      struct ethtool_ringparam *ring,
297 		      struct kernel_ethtool_ringparam *kernel_ring,
298 		      struct netlink_ext_ack *extack)
299 {
300 	struct wx *wx = netdev_priv(netdev);
301 
302 	ring->rx_max_pending = WX_MAX_RXD;
303 	ring->tx_max_pending = WX_MAX_TXD;
304 	ring->rx_mini_max_pending = 0;
305 	ring->rx_jumbo_max_pending = 0;
306 	ring->rx_pending = wx->rx_ring_count;
307 	ring->tx_pending = wx->tx_ring_count;
308 	ring->rx_mini_pending = 0;
309 	ring->rx_jumbo_pending = 0;
310 }
311 EXPORT_SYMBOL(wx_get_ringparam);
312 
313 int wx_get_coalesce(struct net_device *netdev,
314 		    struct ethtool_coalesce *ec,
315 		    struct kernel_ethtool_coalesce *kernel_coal,
316 		    struct netlink_ext_ack *extack)
317 {
318 	struct wx *wx = netdev_priv(netdev);
319 
320 	ec->tx_max_coalesced_frames_irq = wx->tx_work_limit;
321 	/* only valid if in constant ITR mode */
322 	if (wx->rx_itr_setting <= 1)
323 		ec->rx_coalesce_usecs = wx->rx_itr_setting;
324 	else
325 		ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
326 
327 	if (wx->adaptive_itr) {
328 		ec->use_adaptive_rx_coalesce = 1;
329 		ec->use_adaptive_tx_coalesce = 1;
330 	}
331 
332 	/* if in mixed tx/rx queues per vector mode, report only rx settings */
333 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
334 		return 0;
335 
336 	/* only valid if in constant ITR mode */
337 	if (wx->tx_itr_setting <= 1)
338 		ec->tx_coalesce_usecs = wx->tx_itr_setting;
339 	else
340 		ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2;
341 
342 	return 0;
343 }
344 EXPORT_SYMBOL(wx_get_coalesce);
345 
346 static void wx_update_rsc(struct wx *wx)
347 {
348 	struct net_device *netdev = wx->netdev;
349 	bool need_reset = false;
350 
351 	/* nothing to do if LRO or RSC are not enabled */
352 	if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags) ||
353 	    !(netdev->features & NETIF_F_LRO))
354 		return;
355 
356 	/* check the feature flag value and enable RSC if necessary */
357 	if (wx->rx_itr_setting == 1 ||
358 	    wx->rx_itr_setting > WX_MIN_RSC_ITR) {
359 		if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
360 			set_bit(WX_FLAG_RSC_ENABLED, wx->flags);
361 			dev_info(&wx->pdev->dev,
362 				 "rx-usecs value high enough to re-enable RSC\n");
363 
364 			need_reset = true;
365 		}
366 	/* if interrupt rate is too high then disable RSC */
367 	} else if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
368 		clear_bit(WX_FLAG_RSC_ENABLED, wx->flags);
369 		dev_info(&wx->pdev->dev,
370 			 "rx-usecs set too low, disabling RSC\n");
371 
372 		need_reset = true;
373 	}
374 
375 	/* reset the device to apply the new RSC setting */
376 	if (need_reset && wx->do_reset)
377 		wx->do_reset(netdev);
378 }
379 
380 int wx_set_coalesce(struct net_device *netdev,
381 		    struct ethtool_coalesce *ec,
382 		    struct kernel_ethtool_coalesce *kernel_coal,
383 		    struct netlink_ext_ack *extack)
384 {
385 	struct wx *wx = netdev_priv(netdev);
386 	u16 tx_itr_param, rx_itr_param;
387 	struct wx_q_vector *q_vector;
388 	u16 max_eitr;
389 	int i;
390 
391 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) {
392 		/* reject Tx specific changes in case of mixed RxTx vectors */
393 		if (ec->tx_coalesce_usecs)
394 			return -EOPNOTSUPP;
395 	}
396 
397 	if (ec->tx_max_coalesced_frames_irq > U16_MAX  ||
398 	    !ec->tx_max_coalesced_frames_irq)
399 		return -EINVAL;
400 
401 	wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
402 
403 	switch (wx->mac.type) {
404 	case wx_mac_sp:
405 		max_eitr = WX_SP_MAX_EITR;
406 		rx_itr_param = WX_20K_ITR;
407 		tx_itr_param = WX_12K_ITR;
408 		break;
409 	case wx_mac_aml:
410 	case wx_mac_aml40:
411 		max_eitr = WX_AML_MAX_EITR;
412 		rx_itr_param = WX_20K_ITR;
413 		tx_itr_param = WX_12K_ITR;
414 		break;
415 	default:
416 		max_eitr = WX_EM_MAX_EITR;
417 		rx_itr_param = WX_7K_ITR;
418 		tx_itr_param = WX_7K_ITR;
419 		break;
420 	}
421 
422 	if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
423 	    (ec->tx_coalesce_usecs > (max_eitr >> 2)))
424 		return -EINVAL;
425 
426 	if (ec->use_adaptive_rx_coalesce) {
427 		wx->adaptive_itr = true;
428 		wx->rx_itr_setting = 1;
429 		wx->tx_itr_setting = 1;
430 		return 0;
431 	}
432 
433 	if (ec->rx_coalesce_usecs > 1)
434 		wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
435 	else
436 		wx->rx_itr_setting = ec->rx_coalesce_usecs;
437 
438 	if (ec->tx_coalesce_usecs > 1)
439 		wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
440 	else
441 		wx->tx_itr_setting = ec->tx_coalesce_usecs;
442 
443 	if (wx->adaptive_itr) {
444 		wx->adaptive_itr = false;
445 		wx->rx_itr_setting = rx_itr_param;
446 		wx->tx_itr_setting = tx_itr_param;
447 	} else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) {
448 		wx->adaptive_itr = true;
449 	}
450 
451 	if (wx->rx_itr_setting != 1)
452 		rx_itr_param = wx->rx_itr_setting;
453 
454 	if (wx->tx_itr_setting != 1)
455 		tx_itr_param = wx->tx_itr_setting;
456 
457 	/* mixed Rx/Tx */
458 	if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
459 		wx->tx_itr_setting = wx->rx_itr_setting;
460 
461 	for (i = 0; i < wx->num_q_vectors; i++) {
462 		q_vector = wx->q_vector[i];
463 		if (q_vector->tx.count && !q_vector->rx.count)
464 			/* tx only */
465 			q_vector->itr = tx_itr_param;
466 		else
467 			/* rx only or mixed */
468 			q_vector->itr = rx_itr_param;
469 		wx_write_eitr(q_vector);
470 	}
471 
472 	wx_update_rsc(wx);
473 
474 	return 0;
475 }
476 EXPORT_SYMBOL(wx_set_coalesce);
477 
478 static unsigned int wx_max_channels(struct wx *wx)
479 {
480 	unsigned int max_combined;
481 
482 	if (!wx->msix_q_entries) {
483 		/* We only support one q_vector without MSI-X */
484 		max_combined = 1;
485 	} else {
486 		/* support up to max allowed queues with RSS */
487 		if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
488 			max_combined = 63;
489 		else
490 			max_combined = 8;
491 	}
492 
493 	return max_combined;
494 }
495 
496 void wx_get_channels(struct net_device *dev,
497 		     struct ethtool_channels *ch)
498 {
499 	struct wx *wx = netdev_priv(dev);
500 
501 	/* report maximum channels */
502 	ch->max_combined = wx_max_channels(wx);
503 
504 	/* report info for other vector */
505 	if (wx->msix_q_entries) {
506 		ch->max_other = 1;
507 		ch->other_count = 1;
508 	}
509 
510 	/* record RSS queues */
511 	ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
512 
513 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
514 		ch->combined_count = wx->ring_feature[RING_F_FDIR].indices;
515 }
516 EXPORT_SYMBOL(wx_get_channels);
517 
518 int wx_set_channels(struct net_device *dev,
519 		    struct ethtool_channels *ch)
520 {
521 	unsigned int count = ch->combined_count;
522 	struct wx *wx = netdev_priv(dev);
523 
524 	/* verify other_count has not changed */
525 	if (ch->other_count != 1)
526 		return -EINVAL;
527 
528 	/* verify the number of channels does not exceed hardware limits */
529 	if (count > wx_max_channels(wx))
530 		return -EINVAL;
531 
532 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
533 		wx->ring_feature[RING_F_FDIR].limit = count;
534 
535 	wx->ring_feature[RING_F_RSS].limit = count;
536 
537 	return 0;
538 }
539 EXPORT_SYMBOL(wx_set_channels);
540 
541 u32 wx_rss_indir_size(struct net_device *netdev)
542 {
543 	struct wx *wx = netdev_priv(netdev);
544 
545 	return wx_rss_indir_tbl_entries(wx);
546 }
547 EXPORT_SYMBOL(wx_rss_indir_size);
548 
549 u32 wx_get_rxfh_key_size(struct net_device *netdev)
550 {
551 	return WX_RSS_KEY_SIZE;
552 }
553 EXPORT_SYMBOL(wx_get_rxfh_key_size);
554 
555 static void wx_get_reta(struct wx *wx, u32 *indir)
556 {
557 	u32 reta_size = wx_rss_indir_tbl_entries(wx);
558 	u16 rss_m = wx->ring_feature[RING_F_RSS].mask;
559 
560 	if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
561 		rss_m = wx->ring_feature[RING_F_RSS].indices - 1;
562 
563 	for (u32 i = 0; i < reta_size; i++)
564 		indir[i] = wx->rss_indir_tbl[i] & rss_m;
565 }
566 
567 int wx_get_rxfh(struct net_device *netdev,
568 		struct ethtool_rxfh_param *rxfh)
569 {
570 	struct wx *wx = netdev_priv(netdev);
571 
572 	rxfh->hfunc = ETH_RSS_HASH_TOP;
573 
574 	if (rxfh->indir)
575 		wx_get_reta(wx, rxfh->indir);
576 
577 	if (rxfh->key)
578 		memcpy(rxfh->key, wx->rss_key, WX_RSS_KEY_SIZE);
579 
580 	return 0;
581 }
582 EXPORT_SYMBOL(wx_get_rxfh);
583 
584 int wx_set_rxfh(struct net_device *netdev,
585 		struct ethtool_rxfh_param *rxfh,
586 		struct netlink_ext_ack *extack)
587 {
588 	struct wx *wx = netdev_priv(netdev);
589 	u32 reta_entries, i;
590 
591 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
592 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
593 		return -EOPNOTSUPP;
594 
595 	reta_entries = wx_rss_indir_tbl_entries(wx);
596 	/* Fill out the redirection table */
597 	if (rxfh->indir) {
598 		for (i = 0; i < reta_entries; i++)
599 			wx->rss_indir_tbl[i] = rxfh->indir[i];
600 
601 		wx_store_reta(wx);
602 	}
603 
604 	/* Fill out the rss hash key */
605 	if (rxfh->key) {
606 		memcpy(wx->rss_key, rxfh->key, WX_RSS_KEY_SIZE);
607 		wx_store_rsskey(wx);
608 	}
609 
610 	return 0;
611 }
612 EXPORT_SYMBOL(wx_set_rxfh);
613 
614 static const struct wx_rss_flow_map rss_flow_table[] = {
615 	{ TCP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_TCP },
616 	{ TCP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_TCP },
617 	{ UDP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_UDP },
618 	{ UDP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_UDP },
619 	{ SCTP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_SCTP },
620 	{ SCTP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_SCTP },
621 };
622 
623 int wx_get_rxfh_fields(struct net_device *dev,
624 		       struct ethtool_rxfh_fields *nfc)
625 {
626 	struct wx *wx = netdev_priv(dev);
627 
628 	nfc->data = RXH_IP_SRC | RXH_IP_DST;
629 
630 	for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
631 		const struct wx_rss_flow_map *entry = &rss_flow_table[i];
632 
633 		if (entry->flow_type == nfc->flow_type) {
634 			if (wx->rss_flags & entry->flag)
635 				nfc->data |= entry->data;
636 			break;
637 		}
638 	}
639 
640 	return 0;
641 }
642 EXPORT_SYMBOL(wx_get_rxfh_fields);
643 
644 int wx_set_rxfh_fields(struct net_device *dev,
645 		       const struct ethtool_rxfh_fields *nfc,
646 		       struct netlink_ext_ack *extack)
647 {
648 	struct wx *wx = netdev_priv(dev);
649 	u8 flags = wx->rss_flags;
650 
651 	if (!(nfc->data & RXH_IP_SRC) ||
652 	    !(nfc->data & RXH_IP_DST))
653 		return -EINVAL;
654 
655 	for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
656 		const struct wx_rss_flow_map *entry = &rss_flow_table[i];
657 
658 		if (entry->flow_type == nfc->flow_type) {
659 			if (nfc->data & entry->data)
660 				flags |= entry->flag;
661 			else
662 				flags &= ~entry->flag;
663 
664 			if (flags != wx->rss_flags) {
665 				wx->rss_flags = flags;
666 				wx_config_rss_field(wx);
667 			}
668 
669 			return 0;
670 		}
671 	}
672 
673 	return -EINVAL;
674 }
675 EXPORT_SYMBOL(wx_set_rxfh_fields);
676 
677 u32 wx_get_msglevel(struct net_device *netdev)
678 {
679 	struct wx *wx = netdev_priv(netdev);
680 
681 	return wx->msg_enable;
682 }
683 EXPORT_SYMBOL(wx_get_msglevel);
684 
685 void wx_set_msglevel(struct net_device *netdev, u32 data)
686 {
687 	struct wx *wx = netdev_priv(netdev);
688 
689 	wx->msg_enable = data;
690 }
691 EXPORT_SYMBOL(wx_set_msglevel);
692 
693 int wx_get_ts_info(struct net_device *dev,
694 		   struct kernel_ethtool_ts_info *info)
695 {
696 	struct wx *wx = netdev_priv(dev);
697 
698 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
699 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
700 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
701 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
702 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
703 			   BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
704 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
705 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
706 			   BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
707 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
708 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
709 			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
710 
711 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
712 				SOF_TIMESTAMPING_TX_HARDWARE |
713 				SOF_TIMESTAMPING_RX_HARDWARE |
714 				SOF_TIMESTAMPING_RAW_HARDWARE;
715 
716 	if (wx->ptp_clock)
717 		info->phc_index = ptp_clock_index(wx->ptp_clock);
718 	else
719 		info->phc_index = -1;
720 
721 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
722 			 BIT(HWTSTAMP_TX_ON);
723 
724 	return 0;
725 }
726 EXPORT_SYMBOL(wx_get_ts_info);
727 
728 void wx_get_ptp_stats(struct net_device *dev,
729 		      struct ethtool_ts_stats *ts_stats)
730 {
731 	struct wx *wx = netdev_priv(dev);
732 
733 	if (wx->ptp_clock) {
734 		ts_stats->pkts = wx->tx_hwtstamp_pkts;
735 		ts_stats->lost = wx->tx_hwtstamp_timeouts +
736 				 wx->tx_hwtstamp_skipped +
737 				 wx->rx_hwtstamp_cleared;
738 		ts_stats->err = wx->tx_hwtstamp_errors;
739 	}
740 }
741 EXPORT_SYMBOL(wx_get_ptp_stats);
742 
743 static int wx_get_link_ksettings_vf(struct net_device *netdev,
744 				    struct ethtool_link_ksettings *cmd)
745 {
746 	struct wx *wx = netdev_priv(netdev);
747 
748 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
749 	cmd->base.autoneg = AUTONEG_DISABLE;
750 	cmd->base.port = PORT_NONE;
751 	cmd->base.duplex = DUPLEX_FULL;
752 	cmd->base.speed = wx->speed;
753 
754 	return 0;
755 }
756 
757 static const struct ethtool_ops wx_ethtool_ops_vf = {
758 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
759 				     ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
760 				     ETHTOOL_COALESCE_USE_ADAPTIVE,
761 	.get_drvinfo		= wx_get_drvinfo,
762 	.get_link		= ethtool_op_get_link,
763 	.get_ringparam		= wx_get_ringparam,
764 	.get_msglevel		= wx_get_msglevel,
765 	.get_coalesce		= wx_get_coalesce,
766 	.get_ts_info		= ethtool_op_get_ts_info,
767 	.get_link_ksettings	= wx_get_link_ksettings_vf,
768 };
769 
770 void wx_set_ethtool_ops_vf(struct net_device *netdev)
771 {
772 	netdev->ethtool_ops = &wx_ethtool_ops_vf;
773 }
774 EXPORT_SYMBOL(wx_set_ethtool_ops_vf);
775