xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/spinlock.h>
9 #include <linux/phy.h>
10 #include <linux/net_tstamp.h>
11 
12 #include "xgbe.h"
13 #include "xgbe-common.h"
14 
15 struct xgbe_stats {
16 	char stat_string[ETH_GSTRING_LEN];
17 	int stat_size;
18 	int stat_offset;
19 };
20 
21 #define XGMAC_MMC_STAT(_string, _var)				\
22 	{ _string,						\
23 	  sizeof_field(struct xgbe_mmc_stats, _var),		\
24 	  offsetof(struct xgbe_prv_data, mmc_stats._var),	\
25 	}
26 
27 #define XGMAC_EXT_STAT(_string, _var)				\
28 	{ _string,						\
29 	  sizeof_field(struct xgbe_ext_stats, _var),		\
30 	  offsetof(struct xgbe_prv_data, ext_stats._var),	\
31 	}
32 
33 static const struct xgbe_stats xgbe_gstring_stats[] = {
34 	XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
35 	XGMAC_MMC_STAT("tx_packets", txframecount_gb),
36 	XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
37 	XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
38 	XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
39 	XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
40 	XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets),
41 	XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
42 	XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
43 	XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
44 	XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
45 	XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
46 	XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
47 	XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
48 	XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
49 	XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
50 
51 	XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
52 	XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
53 	XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
54 	XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
55 	XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
56 	XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
57 	XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets),
58 	XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
59 	XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
60 	XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
61 	XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
62 	XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
63 	XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
64 	XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
65 	XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
66 	XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
67 	XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
68 	XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
69 	XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
70 	XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
71 	XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
72 	XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
73 	XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors),
74 	XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors),
75 	XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
76 	XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
77 	XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
78 };
79 
80 #define XGBE_STATS_COUNT	ARRAY_SIZE(xgbe_gstring_stats)
81 
82 static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
83 {
84 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
85 	int i;
86 
87 	switch (stringset) {
88 	case ETH_SS_TEST:
89 		xgbe_selftest_get_strings(pdata, data);
90 		break;
91 	case ETH_SS_STATS:
92 		for (i = 0; i < XGBE_STATS_COUNT; i++)
93 			ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
94 
95 		for (i = 0; i < pdata->tx_ring_count; i++) {
96 			ethtool_sprintf(&data, "txq_%u_packets", i);
97 			ethtool_sprintf(&data, "txq_%u_bytes", i);
98 		}
99 
100 		for (i = 0; i < pdata->rx_ring_count; i++) {
101 			ethtool_sprintf(&data, "rxq_%u_packets", i);
102 			ethtool_sprintf(&data, "rxq_%u_bytes", i);
103 		}
104 
105 		break;
106 	}
107 }
108 
109 static void xgbe_get_ethtool_stats(struct net_device *netdev,
110 				   struct ethtool_stats *stats, u64 *data)
111 {
112 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
113 	u8 *stat;
114 	int i;
115 
116 	pdata->hw_if.read_mmc_stats(pdata);
117 	for (i = 0; i < XGBE_STATS_COUNT; i++) {
118 		stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
119 		*data++ = *(u64 *)stat;
120 	}
121 	for (i = 0; i < pdata->tx_ring_count; i++) {
122 		*data++ = pdata->ext_stats.txq_packets[i];
123 		*data++ = pdata->ext_stats.txq_bytes[i];
124 	}
125 	for (i = 0; i < pdata->rx_ring_count; i++) {
126 		*data++ = pdata->ext_stats.rxq_packets[i];
127 		*data++ = pdata->ext_stats.rxq_bytes[i];
128 	}
129 }
130 
131 static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
132 {
133 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
134 	int ret;
135 
136 	switch (stringset) {
137 	case ETH_SS_TEST:
138 		ret = xgbe_selftest_get_count(pdata);
139 		break;
140 	case ETH_SS_STATS:
141 		ret = XGBE_STATS_COUNT +
142 		      (pdata->tx_ring_count * 2) +
143 		      (pdata->rx_ring_count * 2);
144 		break;
145 
146 	default:
147 		ret = -EOPNOTSUPP;
148 	}
149 
150 	return ret;
151 }
152 
153 static void xgbe_get_pauseparam(struct net_device *netdev,
154 				struct ethtool_pauseparam *pause)
155 {
156 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
157 
158 	pause->autoneg = pdata->phy.pause_autoneg;
159 	pause->tx_pause = pdata->phy.tx_pause;
160 	pause->rx_pause = pdata->phy.rx_pause;
161 }
162 
163 static int xgbe_set_pauseparam(struct net_device *netdev,
164 			       struct ethtool_pauseparam *pause)
165 {
166 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
167 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
168 	int ret = 0;
169 
170 	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
171 		netdev_err(netdev,
172 			   "autoneg disabled, pause autoneg not available\n");
173 		return -EINVAL;
174 	}
175 
176 	pdata->phy.pause_autoneg = pause->autoneg;
177 	pdata->phy.tx_pause = pause->tx_pause;
178 	pdata->phy.rx_pause = pause->rx_pause;
179 
180 	XGBE_CLR_ADV(lks, Pause);
181 	XGBE_CLR_ADV(lks, Asym_Pause);
182 
183 	if (pause->rx_pause) {
184 		XGBE_SET_ADV(lks, Pause);
185 		XGBE_SET_ADV(lks, Asym_Pause);
186 	}
187 
188 	if (pause->tx_pause) {
189 		/* Equivalent to XOR of Asym_Pause */
190 		if (XGBE_ADV(lks, Asym_Pause))
191 			XGBE_CLR_ADV(lks, Asym_Pause);
192 		else
193 			XGBE_SET_ADV(lks, Asym_Pause);
194 	}
195 
196 	if (netif_running(netdev))
197 		ret = pdata->phy_if.phy_config_aneg(pdata);
198 
199 	return ret;
200 }
201 
202 static int xgbe_get_link_ksettings(struct net_device *netdev,
203 				   struct ethtool_link_ksettings *cmd)
204 {
205 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
206 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
207 
208 	cmd->base.phy_address = pdata->phy.address;
209 
210 	if (netif_carrier_ok(netdev)) {
211 		cmd->base.speed = pdata->phy.speed;
212 		cmd->base.duplex = pdata->phy.duplex;
213 	} else {
214 		cmd->base.speed = SPEED_UNKNOWN;
215 		cmd->base.duplex = DUPLEX_UNKNOWN;
216 	}
217 
218 	cmd->base.autoneg = pdata->phy.autoneg;
219 	cmd->base.port = PORT_NONE;
220 
221 	XGBE_LM_COPY(cmd, supported, lks, supported);
222 	XGBE_LM_COPY(cmd, advertising, lks, advertising);
223 	XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising);
224 
225 	return 0;
226 }
227 
228 static int xgbe_set_link_ksettings(struct net_device *netdev,
229 				   const struct ethtool_link_ksettings *cmd)
230 {
231 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
232 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
233 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
234 	u32 speed;
235 	int ret;
236 
237 	speed = cmd->base.speed;
238 
239 	if (cmd->base.phy_address != pdata->phy.address) {
240 		netdev_err(netdev, "invalid phy address %hhu\n",
241 			   cmd->base.phy_address);
242 		return -EINVAL;
243 	}
244 
245 	if ((cmd->base.autoneg != AUTONEG_ENABLE) &&
246 	    (cmd->base.autoneg != AUTONEG_DISABLE)) {
247 		netdev_err(netdev, "unsupported autoneg %hhu\n",
248 			   cmd->base.autoneg);
249 		return -EINVAL;
250 	}
251 
252 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
253 		if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
254 			netdev_err(netdev, "unsupported speed %u\n", speed);
255 			return -EINVAL;
256 		}
257 
258 		if (cmd->base.duplex != DUPLEX_FULL) {
259 			netdev_err(netdev, "unsupported duplex %hhu\n",
260 				   cmd->base.duplex);
261 			return -EINVAL;
262 		}
263 	}
264 
265 	netif_dbg(pdata, link, netdev,
266 		  "requested advertisement 0x%*pb, phy supported 0x%*pb\n",
267 		  __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
268 		  __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
269 
270 	linkmode_and(advertising, cmd->link_modes.advertising,
271 		     lks->link_modes.supported);
272 
273 	if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
274 	    bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
275 		netdev_err(netdev,
276 			   "unsupported requested advertisement\n");
277 		return -EINVAL;
278 	}
279 
280 	ret = 0;
281 	pdata->phy.autoneg = cmd->base.autoneg;
282 	pdata->phy.speed = speed;
283 	pdata->phy.duplex = cmd->base.duplex;
284 	linkmode_copy(lks->link_modes.advertising, advertising);
285 
286 	if (cmd->base.autoneg == AUTONEG_ENABLE)
287 		XGBE_SET_ADV(lks, Autoneg);
288 	else
289 		XGBE_CLR_ADV(lks, Autoneg);
290 
291 	if (netif_running(netdev))
292 		ret = pdata->phy_if.phy_config_aneg(pdata);
293 
294 	return ret;
295 }
296 
297 static void xgbe_get_drvinfo(struct net_device *netdev,
298 			     struct ethtool_drvinfo *drvinfo)
299 {
300 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
301 	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
302 
303 	strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
304 	strscpy(drvinfo->bus_info, dev_name(pdata->dev),
305 		sizeof(drvinfo->bus_info));
306 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
307 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
308 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
309 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
310 }
311 
312 static u32 xgbe_get_msglevel(struct net_device *netdev)
313 {
314 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
315 
316 	return pdata->msg_enable;
317 }
318 
319 static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
320 {
321 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
322 
323 	pdata->msg_enable = msglevel;
324 }
325 
326 static int xgbe_get_coalesce(struct net_device *netdev,
327 			     struct ethtool_coalesce *ec,
328 			     struct kernel_ethtool_coalesce *kernel_coal,
329 			     struct netlink_ext_ack *extack)
330 {
331 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
332 
333 	memset(ec, 0, sizeof(struct ethtool_coalesce));
334 
335 	ec->rx_coalesce_usecs = pdata->rx_usecs;
336 	ec->rx_max_coalesced_frames = pdata->rx_frames;
337 
338 	ec->tx_coalesce_usecs = pdata->tx_usecs;
339 	ec->tx_max_coalesced_frames = pdata->tx_frames;
340 
341 	return 0;
342 }
343 
344 static int xgbe_set_coalesce(struct net_device *netdev,
345 			     struct ethtool_coalesce *ec,
346 			     struct kernel_ethtool_coalesce *kernel_coal,
347 			     struct netlink_ext_ack *extack)
348 {
349 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
350 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
351 	unsigned int rx_frames, rx_riwt, rx_usecs;
352 	unsigned int tx_frames, tx_usecs;
353 	unsigned int jiffy_us = jiffies_to_usecs(1);
354 
355 	rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
356 	rx_usecs = ec->rx_coalesce_usecs;
357 	rx_frames = ec->rx_max_coalesced_frames;
358 
359 	/* Use smallest possible value if conversion resulted in zero */
360 	if (rx_usecs && !rx_riwt)
361 		rx_riwt = 1;
362 
363 	/* Check the bounds of values for Rx */
364 	if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
365 		netdev_err(netdev, "rx-usec is limited to %d usecs\n",
366 			   hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
367 		return -EINVAL;
368 	}
369 	if (rx_frames > pdata->rx_desc_count) {
370 		netdev_err(netdev, "rx-frames is limited to %d frames\n",
371 			   pdata->rx_desc_count);
372 		return -EINVAL;
373 	}
374 
375 	tx_usecs = ec->tx_coalesce_usecs;
376 	tx_frames = ec->tx_max_coalesced_frames;
377 
378 	/* Check the bounds of values for Tx */
379 	if (!tx_usecs) {
380 		NL_SET_ERR_MSG_FMT_MOD(extack,
381 				       "tx-usecs must not be 0");
382 		return -EINVAL;
383 	}
384 	if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
385 		NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
386 				       XGMAC_MAX_COAL_TX_TICK);
387 		return -EINVAL;
388 	}
389 	if (tx_frames > pdata->tx_desc_count) {
390 		netdev_err(netdev, "tx-frames is limited to %d frames\n",
391 			   pdata->tx_desc_count);
392 		return -EINVAL;
393 	}
394 
395 	/* Round tx-usecs to nearest multiple of jiffy granularity */
396 	if (tx_usecs % jiffy_us) {
397 		tx_usecs = rounddown(tx_usecs, jiffy_us);
398 		if (!tx_usecs)
399 			tx_usecs = jiffy_us;
400 		NL_SET_ERR_MSG_FMT_MOD(extack,
401 				       "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
402 				       tx_usecs, jiffy_us);
403 	}
404 
405 	pdata->rx_riwt = rx_riwt;
406 	pdata->rx_usecs = rx_usecs;
407 	pdata->rx_frames = rx_frames;
408 	hw_if->config_rx_coalesce(pdata);
409 
410 	pdata->tx_usecs = tx_usecs;
411 	pdata->tx_frames = tx_frames;
412 	hw_if->config_tx_coalesce(pdata);
413 
414 	return 0;
415 }
416 
417 static int xgbe_get_rxnfc(struct net_device *netdev,
418 			  struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
419 {
420 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
421 
422 	switch (rxnfc->cmd) {
423 	case ETHTOOL_GRXRINGS:
424 		rxnfc->data = pdata->rx_ring_count;
425 		break;
426 	default:
427 		return -EOPNOTSUPP;
428 	}
429 
430 	return 0;
431 }
432 
433 static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
434 {
435 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
436 
437 	return sizeof(pdata->rss_key);
438 }
439 
440 static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
441 {
442 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
443 
444 	return ARRAY_SIZE(pdata->rss_table);
445 }
446 
447 static int xgbe_get_rxfh(struct net_device *netdev,
448 			 struct ethtool_rxfh_param *rxfh)
449 {
450 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
451 	unsigned int i;
452 
453 	if (rxfh->indir) {
454 		for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
455 			rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
456 							MAC_RSSDR, DMCH);
457 	}
458 
459 	if (rxfh->key)
460 		memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
461 
462 	rxfh->hfunc = ETH_RSS_HASH_TOP;
463 
464 	return 0;
465 }
466 
467 static int xgbe_set_rxfh(struct net_device *netdev,
468 			 struct ethtool_rxfh_param *rxfh,
469 			 struct netlink_ext_ack *extack)
470 {
471 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
472 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
473 	int ret;
474 
475 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
476 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
477 		netdev_err(netdev, "unsupported hash function\n");
478 		return -EOPNOTSUPP;
479 	}
480 
481 	if (rxfh->indir) {
482 		ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
483 		if (ret)
484 			return ret;
485 	}
486 
487 	if (rxfh->key) {
488 		ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
489 		if (ret)
490 			return ret;
491 	}
492 
493 	return 0;
494 }
495 
496 static int xgbe_get_ts_info(struct net_device *netdev,
497 			    struct kernel_ethtool_ts_info *ts_info)
498 {
499 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
500 
501 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
502 				   SOF_TIMESTAMPING_TX_HARDWARE |
503 				   SOF_TIMESTAMPING_RX_HARDWARE |
504 				   SOF_TIMESTAMPING_RAW_HARDWARE;
505 
506 	if (pdata->ptp_clock)
507 		ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
508 
509 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
510 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
511 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
512 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
513 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
514 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
515 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
516 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
517 			      (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
518 			      (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
519 			      (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
520 			      (1 << HWTSTAMP_FILTER_ALL);
521 
522 	return 0;
523 }
524 
525 static int xgbe_get_module_info(struct net_device *netdev,
526 				struct ethtool_modinfo *modinfo)
527 {
528 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
529 
530 	return pdata->phy_if.module_info(pdata, modinfo);
531 }
532 
533 static int xgbe_get_module_eeprom(struct net_device *netdev,
534 				  struct ethtool_eeprom *eeprom, u8 *data)
535 {
536 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
537 
538 	return pdata->phy_if.module_eeprom(pdata, eeprom, data);
539 }
540 
541 static void
542 xgbe_get_ringparam(struct net_device *netdev,
543 		   struct ethtool_ringparam *ringparam,
544 		   struct kernel_ethtool_ringparam *kernel_ringparam,
545 		   struct netlink_ext_ack *extack)
546 {
547 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
548 
549 	ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
550 	ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
551 	ringparam->rx_pending = pdata->rx_desc_count;
552 	ringparam->tx_pending = pdata->tx_desc_count;
553 }
554 
555 static int xgbe_set_ringparam(struct net_device *netdev,
556 			      struct ethtool_ringparam *ringparam,
557 			      struct kernel_ethtool_ringparam *kernel_ringparam,
558 			      struct netlink_ext_ack *extack)
559 {
560 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
561 	unsigned int rx, tx;
562 
563 	if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
564 		netdev_err(netdev, "unsupported ring parameter\n");
565 		return -EINVAL;
566 	}
567 
568 	if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
569 	    (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
570 		netdev_err(netdev,
571 			   "rx ring parameter must be between %u and %u\n",
572 			   XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
573 		return -EINVAL;
574 	}
575 
576 	if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
577 	    (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
578 		netdev_err(netdev,
579 			   "tx ring parameter must be between %u and %u\n",
580 			   XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
581 		return -EINVAL;
582 	}
583 
584 	rx = __rounddown_pow_of_two(ringparam->rx_pending);
585 	if (rx != ringparam->rx_pending)
586 		netdev_notice(netdev,
587 			      "rx ring parameter rounded to power of two: %u\n",
588 			      rx);
589 
590 	tx = __rounddown_pow_of_two(ringparam->tx_pending);
591 	if (tx != ringparam->tx_pending)
592 		netdev_notice(netdev,
593 			      "tx ring parameter rounded to power of two: %u\n",
594 			      tx);
595 
596 	if ((rx == pdata->rx_desc_count) &&
597 	    (tx == pdata->tx_desc_count))
598 		goto out;
599 
600 	pdata->rx_desc_count = rx;
601 	pdata->tx_desc_count = tx;
602 
603 	xgbe_restart_dev(pdata);
604 
605 out:
606 	return 0;
607 }
608 
609 static void xgbe_get_channels(struct net_device *netdev,
610 			      struct ethtool_channels *channels)
611 {
612 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
613 	unsigned int rx, tx, combined;
614 
615 	/* Calculate maximums allowed:
616 	 *   - Take into account the number of available IRQs
617 	 *   - Do not take into account the number of online CPUs so that
618 	 *     the user can over-subscribe if desired
619 	 *   - Tx is additionally limited by the number of hardware queues
620 	 */
621 	rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
622 	rx = min(rx, pdata->channel_irq_count);
623 	tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
624 	tx = min(tx, pdata->channel_irq_count);
625 	tx = min(tx, pdata->tx_max_q_count);
626 
627 	combined = min(rx, tx);
628 
629 	channels->max_combined = combined;
630 	channels->max_rx = rx ? rx - 1 : 0;
631 	channels->max_tx = tx ? tx - 1 : 0;
632 
633 	/* Get current settings based on device state */
634 	rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
635 	tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
636 
637 	combined = min(rx, tx);
638 	rx -= combined;
639 	tx -= combined;
640 
641 	channels->combined_count = combined;
642 	channels->rx_count = rx;
643 	channels->tx_count = tx;
644 }
645 
646 static void xgbe_print_set_channels_input(struct net_device *netdev,
647 					  struct ethtool_channels *channels)
648 {
649 	netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
650 		   channels->combined_count, channels->rx_count,
651 		   channels->tx_count);
652 }
653 
654 static int xgbe_set_channels(struct net_device *netdev,
655 			     struct ethtool_channels *channels)
656 {
657 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
658 	unsigned int rx, rx_curr, tx, tx_curr, combined;
659 
660 	/* Calculate maximums allowed:
661 	 *   - Take into account the number of available IRQs
662 	 *   - Do not take into account the number of online CPUs so that
663 	 *     the user can over-subscribe if desired
664 	 *   - Tx is additionally limited by the number of hardware queues
665 	 */
666 	rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
667 	rx = min(rx, pdata->channel_irq_count);
668 	tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
669 	tx = min(tx, pdata->tx_max_q_count);
670 	tx = min(tx, pdata->channel_irq_count);
671 
672 	combined = min(rx, tx);
673 
674 	/* Should not be setting other count */
675 	if (channels->other_count) {
676 		netdev_err(netdev,
677 			   "other channel count must be zero\n");
678 		return -EINVAL;
679 	}
680 
681 	/* Require at least one Combined (Rx and Tx) channel */
682 	if (!channels->combined_count) {
683 		netdev_err(netdev,
684 			   "at least one combined Rx/Tx channel is required\n");
685 		xgbe_print_set_channels_input(netdev, channels);
686 		return -EINVAL;
687 	}
688 
689 	/* Check combined channels */
690 	if (channels->combined_count > combined) {
691 		netdev_err(netdev,
692 			   "combined channel count cannot exceed %u\n",
693 			   combined);
694 		xgbe_print_set_channels_input(netdev, channels);
695 		return -EINVAL;
696 	}
697 
698 	/* Can have some Rx-only or Tx-only channels, but not both */
699 	if (channels->rx_count && channels->tx_count) {
700 		netdev_err(netdev,
701 			   "cannot specify both Rx-only and Tx-only channels\n");
702 		xgbe_print_set_channels_input(netdev, channels);
703 		return -EINVAL;
704 	}
705 
706 	/* Check that we don't exceed the maximum number of channels */
707 	if ((channels->combined_count + channels->rx_count) > rx) {
708 		netdev_err(netdev,
709 			   "total Rx channels (%u) requested exceeds maximum available (%u)\n",
710 			   channels->combined_count + channels->rx_count, rx);
711 		xgbe_print_set_channels_input(netdev, channels);
712 		return -EINVAL;
713 	}
714 
715 	if ((channels->combined_count + channels->tx_count) > tx) {
716 		netdev_err(netdev,
717 			   "total Tx channels (%u) requested exceeds maximum available (%u)\n",
718 			   channels->combined_count + channels->tx_count, tx);
719 		xgbe_print_set_channels_input(netdev, channels);
720 		return -EINVAL;
721 	}
722 
723 	rx = channels->combined_count + channels->rx_count;
724 	tx = channels->combined_count + channels->tx_count;
725 
726 	rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
727 	tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
728 
729 	if ((rx == rx_curr) && (tx == tx_curr))
730 		goto out;
731 
732 	pdata->new_rx_ring_count = rx;
733 	pdata->new_tx_ring_count = tx;
734 
735 	xgbe_full_restart_dev(pdata);
736 
737 out:
738 	return 0;
739 }
740 
741 static const struct ethtool_ops xgbe_ethtool_ops = {
742 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
743 				     ETHTOOL_COALESCE_MAX_FRAMES,
744 	.get_drvinfo = xgbe_get_drvinfo,
745 	.get_msglevel = xgbe_get_msglevel,
746 	.set_msglevel = xgbe_set_msglevel,
747 	.get_link = ethtool_op_get_link,
748 	.get_coalesce = xgbe_get_coalesce,
749 	.set_coalesce = xgbe_set_coalesce,
750 	.get_pauseparam = xgbe_get_pauseparam,
751 	.set_pauseparam = xgbe_set_pauseparam,
752 	.get_strings = xgbe_get_strings,
753 	.get_ethtool_stats = xgbe_get_ethtool_stats,
754 	.get_sset_count = xgbe_get_sset_count,
755 	.get_rxnfc = xgbe_get_rxnfc,
756 	.get_rxfh_key_size = xgbe_get_rxfh_key_size,
757 	.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
758 	.get_rxfh = xgbe_get_rxfh,
759 	.set_rxfh = xgbe_set_rxfh,
760 	.get_ts_info = xgbe_get_ts_info,
761 	.get_link_ksettings = xgbe_get_link_ksettings,
762 	.set_link_ksettings = xgbe_set_link_ksettings,
763 	.get_module_info = xgbe_get_module_info,
764 	.get_module_eeprom = xgbe_get_module_eeprom,
765 	.get_ringparam = xgbe_get_ringparam,
766 	.set_ringparam = xgbe_set_ringparam,
767 	.get_channels = xgbe_get_channels,
768 	.set_channels = xgbe_set_channels,
769 	.self_test = xgbe_selftest_run,
770 };
771 
772 const struct ethtool_ops *xgbe_get_ethtool_ops(void)
773 {
774 	return &xgbe_ethtool_ops;
775 }
776