xref: /linux/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c (revision 0d2ab5f922e75d10162e7199826e14df9cfae5cc)
1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4  * Copyright (c) 2014, Synopsys, Inc.
5  * All rights reserved
6  */
7 
8 #include <linux/spinlock.h>
9 #include <linux/phy.h>
10 #include <linux/net_tstamp.h>
11 
12 #include "xgbe.h"
13 #include "xgbe-common.h"
14 
15 struct xgbe_stats {
16 	char stat_string[ETH_GSTRING_LEN];
17 	int stat_size;
18 	int stat_offset;
19 };
20 
21 #define XGMAC_MMC_STAT(_string, _var)				\
22 	{ _string,						\
23 	  sizeof_field(struct xgbe_mmc_stats, _var),		\
24 	  offsetof(struct xgbe_prv_data, mmc_stats._var),	\
25 	}
26 
27 #define XGMAC_EXT_STAT(_string, _var)				\
28 	{ _string,						\
29 	  sizeof_field(struct xgbe_ext_stats, _var),		\
30 	  offsetof(struct xgbe_prv_data, ext_stats._var),	\
31 	}
32 
33 static const struct xgbe_stats xgbe_gstring_stats[] = {
34 	XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
35 	XGMAC_MMC_STAT("tx_packets", txframecount_gb),
36 	XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
37 	XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
38 	XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
39 	XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
40 	XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets),
41 	XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
42 	XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
43 	XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
44 	XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
45 	XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
46 	XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
47 	XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
48 	XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
49 	XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
50 
51 	XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
52 	XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
53 	XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
54 	XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
55 	XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
56 	XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
57 	XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets),
58 	XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
59 	XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
60 	XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
61 	XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
62 	XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
63 	XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
64 	XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
65 	XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
66 	XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
67 	XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
68 	XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
69 	XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
70 	XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
71 	XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
72 	XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
73 	XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors),
74 	XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors),
75 	XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
76 	XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
77 	XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
78 };
79 
80 #define XGBE_STATS_COUNT	ARRAY_SIZE(xgbe_gstring_stats)
81 
82 static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
83 {
84 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
85 	int i;
86 
87 	switch (stringset) {
88 	case ETH_SS_STATS:
89 		for (i = 0; i < XGBE_STATS_COUNT; i++)
90 			ethtool_puts(&data, xgbe_gstring_stats[i].stat_string);
91 
92 		for (i = 0; i < pdata->tx_ring_count; i++) {
93 			ethtool_sprintf(&data, "txq_%u_packets", i);
94 			ethtool_sprintf(&data, "txq_%u_bytes", i);
95 		}
96 
97 		for (i = 0; i < pdata->rx_ring_count; i++) {
98 			ethtool_sprintf(&data, "rxq_%u_packets", i);
99 			ethtool_sprintf(&data, "rxq_%u_bytes", i);
100 		}
101 
102 		break;
103 	}
104 }
105 
106 static void xgbe_get_ethtool_stats(struct net_device *netdev,
107 				   struct ethtool_stats *stats, u64 *data)
108 {
109 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
110 	u8 *stat;
111 	int i;
112 
113 	pdata->hw_if.read_mmc_stats(pdata);
114 	for (i = 0; i < XGBE_STATS_COUNT; i++) {
115 		stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
116 		*data++ = *(u64 *)stat;
117 	}
118 	for (i = 0; i < pdata->tx_ring_count; i++) {
119 		*data++ = pdata->ext_stats.txq_packets[i];
120 		*data++ = pdata->ext_stats.txq_bytes[i];
121 	}
122 	for (i = 0; i < pdata->rx_ring_count; i++) {
123 		*data++ = pdata->ext_stats.rxq_packets[i];
124 		*data++ = pdata->ext_stats.rxq_bytes[i];
125 	}
126 }
127 
128 static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
129 {
130 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
131 	int ret;
132 
133 	switch (stringset) {
134 	case ETH_SS_STATS:
135 		ret = XGBE_STATS_COUNT +
136 		      (pdata->tx_ring_count * 2) +
137 		      (pdata->rx_ring_count * 2);
138 		break;
139 
140 	default:
141 		ret = -EOPNOTSUPP;
142 	}
143 
144 	return ret;
145 }
146 
147 static void xgbe_get_pauseparam(struct net_device *netdev,
148 				struct ethtool_pauseparam *pause)
149 {
150 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
151 
152 	pause->autoneg = pdata->phy.pause_autoneg;
153 	pause->tx_pause = pdata->phy.tx_pause;
154 	pause->rx_pause = pdata->phy.rx_pause;
155 }
156 
157 static int xgbe_set_pauseparam(struct net_device *netdev,
158 			       struct ethtool_pauseparam *pause)
159 {
160 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
161 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
162 	int ret = 0;
163 
164 	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
165 		netdev_err(netdev,
166 			   "autoneg disabled, pause autoneg not available\n");
167 		return -EINVAL;
168 	}
169 
170 	pdata->phy.pause_autoneg = pause->autoneg;
171 	pdata->phy.tx_pause = pause->tx_pause;
172 	pdata->phy.rx_pause = pause->rx_pause;
173 
174 	XGBE_CLR_ADV(lks, Pause);
175 	XGBE_CLR_ADV(lks, Asym_Pause);
176 
177 	if (pause->rx_pause) {
178 		XGBE_SET_ADV(lks, Pause);
179 		XGBE_SET_ADV(lks, Asym_Pause);
180 	}
181 
182 	if (pause->tx_pause) {
183 		/* Equivalent to XOR of Asym_Pause */
184 		if (XGBE_ADV(lks, Asym_Pause))
185 			XGBE_CLR_ADV(lks, Asym_Pause);
186 		else
187 			XGBE_SET_ADV(lks, Asym_Pause);
188 	}
189 
190 	if (netif_running(netdev))
191 		ret = pdata->phy_if.phy_config_aneg(pdata);
192 
193 	return ret;
194 }
195 
196 static int xgbe_get_link_ksettings(struct net_device *netdev,
197 				   struct ethtool_link_ksettings *cmd)
198 {
199 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
200 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
201 
202 	cmd->base.phy_address = pdata->phy.address;
203 
204 	if (netif_carrier_ok(netdev)) {
205 		cmd->base.speed = pdata->phy.speed;
206 		cmd->base.duplex = pdata->phy.duplex;
207 	} else {
208 		cmd->base.speed = SPEED_UNKNOWN;
209 		cmd->base.duplex = DUPLEX_UNKNOWN;
210 	}
211 
212 	cmd->base.autoneg = pdata->phy.autoneg;
213 	cmd->base.port = PORT_NONE;
214 
215 	XGBE_LM_COPY(cmd, supported, lks, supported);
216 	XGBE_LM_COPY(cmd, advertising, lks, advertising);
217 	XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising);
218 
219 	return 0;
220 }
221 
222 static int xgbe_set_link_ksettings(struct net_device *netdev,
223 				   const struct ethtool_link_ksettings *cmd)
224 {
225 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
226 	struct ethtool_link_ksettings *lks = &pdata->phy.lks;
227 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
228 	u32 speed;
229 	int ret;
230 
231 	speed = cmd->base.speed;
232 
233 	if (cmd->base.phy_address != pdata->phy.address) {
234 		netdev_err(netdev, "invalid phy address %hhu\n",
235 			   cmd->base.phy_address);
236 		return -EINVAL;
237 	}
238 
239 	if ((cmd->base.autoneg != AUTONEG_ENABLE) &&
240 	    (cmd->base.autoneg != AUTONEG_DISABLE)) {
241 		netdev_err(netdev, "unsupported autoneg %hhu\n",
242 			   cmd->base.autoneg);
243 		return -EINVAL;
244 	}
245 
246 	if (cmd->base.autoneg == AUTONEG_DISABLE) {
247 		if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
248 			netdev_err(netdev, "unsupported speed %u\n", speed);
249 			return -EINVAL;
250 		}
251 
252 		if (cmd->base.duplex != DUPLEX_FULL) {
253 			netdev_err(netdev, "unsupported duplex %hhu\n",
254 				   cmd->base.duplex);
255 			return -EINVAL;
256 		}
257 	}
258 
259 	netif_dbg(pdata, link, netdev,
260 		  "requested advertisement 0x%*pb, phy supported 0x%*pb\n",
261 		  __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
262 		  __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
263 
264 	linkmode_and(advertising, cmd->link_modes.advertising,
265 		     lks->link_modes.supported);
266 
267 	if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
268 	    bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
269 		netdev_err(netdev,
270 			   "unsupported requested advertisement\n");
271 		return -EINVAL;
272 	}
273 
274 	ret = 0;
275 	pdata->phy.autoneg = cmd->base.autoneg;
276 	pdata->phy.speed = speed;
277 	pdata->phy.duplex = cmd->base.duplex;
278 	linkmode_copy(lks->link_modes.advertising, advertising);
279 
280 	if (cmd->base.autoneg == AUTONEG_ENABLE)
281 		XGBE_SET_ADV(lks, Autoneg);
282 	else
283 		XGBE_CLR_ADV(lks, Autoneg);
284 
285 	if (netif_running(netdev))
286 		ret = pdata->phy_if.phy_config_aneg(pdata);
287 
288 	return ret;
289 }
290 
291 static void xgbe_get_drvinfo(struct net_device *netdev,
292 			     struct ethtool_drvinfo *drvinfo)
293 {
294 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
295 	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
296 
297 	strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
298 	strscpy(drvinfo->bus_info, dev_name(pdata->dev),
299 		sizeof(drvinfo->bus_info));
300 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
301 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
302 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
303 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
304 }
305 
306 static u32 xgbe_get_msglevel(struct net_device *netdev)
307 {
308 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
309 
310 	return pdata->msg_enable;
311 }
312 
313 static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
314 {
315 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
316 
317 	pdata->msg_enable = msglevel;
318 }
319 
320 static int xgbe_get_coalesce(struct net_device *netdev,
321 			     struct ethtool_coalesce *ec,
322 			     struct kernel_ethtool_coalesce *kernel_coal,
323 			     struct netlink_ext_ack *extack)
324 {
325 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
326 
327 	memset(ec, 0, sizeof(struct ethtool_coalesce));
328 
329 	ec->rx_coalesce_usecs = pdata->rx_usecs;
330 	ec->rx_max_coalesced_frames = pdata->rx_frames;
331 
332 	ec->tx_coalesce_usecs = pdata->tx_usecs;
333 	ec->tx_max_coalesced_frames = pdata->tx_frames;
334 
335 	return 0;
336 }
337 
338 static int xgbe_set_coalesce(struct net_device *netdev,
339 			     struct ethtool_coalesce *ec,
340 			     struct kernel_ethtool_coalesce *kernel_coal,
341 			     struct netlink_ext_ack *extack)
342 {
343 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
344 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
345 	unsigned int rx_frames, rx_riwt, rx_usecs;
346 	unsigned int tx_frames, tx_usecs;
347 	unsigned int jiffy_us = jiffies_to_usecs(1);
348 
349 	rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
350 	rx_usecs = ec->rx_coalesce_usecs;
351 	rx_frames = ec->rx_max_coalesced_frames;
352 
353 	/* Use smallest possible value if conversion resulted in zero */
354 	if (rx_usecs && !rx_riwt)
355 		rx_riwt = 1;
356 
357 	/* Check the bounds of values for Rx */
358 	if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
359 		netdev_err(netdev, "rx-usec is limited to %d usecs\n",
360 			   hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
361 		return -EINVAL;
362 	}
363 	if (rx_frames > pdata->rx_desc_count) {
364 		netdev_err(netdev, "rx-frames is limited to %d frames\n",
365 			   pdata->rx_desc_count);
366 		return -EINVAL;
367 	}
368 
369 	tx_usecs = ec->tx_coalesce_usecs;
370 	tx_frames = ec->tx_max_coalesced_frames;
371 
372 	/* Check the bounds of values for Tx */
373 	if (!tx_usecs) {
374 		NL_SET_ERR_MSG_FMT_MOD(extack,
375 				       "tx-usecs must not be 0");
376 		return -EINVAL;
377 	}
378 	if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
379 		NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
380 				       XGMAC_MAX_COAL_TX_TICK);
381 		return -EINVAL;
382 	}
383 	if (tx_frames > pdata->tx_desc_count) {
384 		netdev_err(netdev, "tx-frames is limited to %d frames\n",
385 			   pdata->tx_desc_count);
386 		return -EINVAL;
387 	}
388 
389 	/* Round tx-usecs to nearest multiple of jiffy granularity */
390 	if (tx_usecs % jiffy_us) {
391 		tx_usecs = rounddown(tx_usecs, jiffy_us);
392 		if (!tx_usecs)
393 			tx_usecs = jiffy_us;
394 		NL_SET_ERR_MSG_FMT_MOD(extack,
395 				       "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
396 				       tx_usecs, jiffy_us);
397 	}
398 
399 	pdata->rx_riwt = rx_riwt;
400 	pdata->rx_usecs = rx_usecs;
401 	pdata->rx_frames = rx_frames;
402 	hw_if->config_rx_coalesce(pdata);
403 
404 	pdata->tx_usecs = tx_usecs;
405 	pdata->tx_frames = tx_frames;
406 	hw_if->config_tx_coalesce(pdata);
407 
408 	return 0;
409 }
410 
411 static int xgbe_get_rxnfc(struct net_device *netdev,
412 			  struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
413 {
414 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
415 
416 	switch (rxnfc->cmd) {
417 	case ETHTOOL_GRXRINGS:
418 		rxnfc->data = pdata->rx_ring_count;
419 		break;
420 	default:
421 		return -EOPNOTSUPP;
422 	}
423 
424 	return 0;
425 }
426 
427 static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
428 {
429 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
430 
431 	return sizeof(pdata->rss_key);
432 }
433 
434 static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
435 {
436 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
437 
438 	return ARRAY_SIZE(pdata->rss_table);
439 }
440 
441 static int xgbe_get_rxfh(struct net_device *netdev,
442 			 struct ethtool_rxfh_param *rxfh)
443 {
444 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
445 	unsigned int i;
446 
447 	if (rxfh->indir) {
448 		for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
449 			rxfh->indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
450 							MAC_RSSDR, DMCH);
451 	}
452 
453 	if (rxfh->key)
454 		memcpy(rxfh->key, pdata->rss_key, sizeof(pdata->rss_key));
455 
456 	rxfh->hfunc = ETH_RSS_HASH_TOP;
457 
458 	return 0;
459 }
460 
461 static int xgbe_set_rxfh(struct net_device *netdev,
462 			 struct ethtool_rxfh_param *rxfh,
463 			 struct netlink_ext_ack *extack)
464 {
465 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
466 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
467 	int ret;
468 
469 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
470 	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
471 		netdev_err(netdev, "unsupported hash function\n");
472 		return -EOPNOTSUPP;
473 	}
474 
475 	if (rxfh->indir) {
476 		ret = hw_if->set_rss_lookup_table(pdata, rxfh->indir);
477 		if (ret)
478 			return ret;
479 	}
480 
481 	if (rxfh->key) {
482 		ret = hw_if->set_rss_hash_key(pdata, rxfh->key);
483 		if (ret)
484 			return ret;
485 	}
486 
487 	return 0;
488 }
489 
490 static int xgbe_get_ts_info(struct net_device *netdev,
491 			    struct kernel_ethtool_ts_info *ts_info)
492 {
493 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
494 
495 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
496 				   SOF_TIMESTAMPING_TX_HARDWARE |
497 				   SOF_TIMESTAMPING_RX_HARDWARE |
498 				   SOF_TIMESTAMPING_RAW_HARDWARE;
499 
500 	if (pdata->ptp_clock)
501 		ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
502 
503 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
504 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
505 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
506 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
507 			      (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
508 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
509 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
510 			      (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
511 			      (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
512 			      (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
513 			      (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
514 			      (1 << HWTSTAMP_FILTER_ALL);
515 
516 	return 0;
517 }
518 
519 static int xgbe_get_module_info(struct net_device *netdev,
520 				struct ethtool_modinfo *modinfo)
521 {
522 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
523 
524 	return pdata->phy_if.module_info(pdata, modinfo);
525 }
526 
527 static int xgbe_get_module_eeprom(struct net_device *netdev,
528 				  struct ethtool_eeprom *eeprom, u8 *data)
529 {
530 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
531 
532 	return pdata->phy_if.module_eeprom(pdata, eeprom, data);
533 }
534 
535 static void
536 xgbe_get_ringparam(struct net_device *netdev,
537 		   struct ethtool_ringparam *ringparam,
538 		   struct kernel_ethtool_ringparam *kernel_ringparam,
539 		   struct netlink_ext_ack *extack)
540 {
541 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
542 
543 	ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
544 	ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
545 	ringparam->rx_pending = pdata->rx_desc_count;
546 	ringparam->tx_pending = pdata->tx_desc_count;
547 }
548 
549 static int xgbe_set_ringparam(struct net_device *netdev,
550 			      struct ethtool_ringparam *ringparam,
551 			      struct kernel_ethtool_ringparam *kernel_ringparam,
552 			      struct netlink_ext_ack *extack)
553 {
554 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
555 	unsigned int rx, tx;
556 
557 	if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
558 		netdev_err(netdev, "unsupported ring parameter\n");
559 		return -EINVAL;
560 	}
561 
562 	if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
563 	    (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
564 		netdev_err(netdev,
565 			   "rx ring parameter must be between %u and %u\n",
566 			   XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
567 		return -EINVAL;
568 	}
569 
570 	if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
571 	    (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
572 		netdev_err(netdev,
573 			   "tx ring parameter must be between %u and %u\n",
574 			   XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
575 		return -EINVAL;
576 	}
577 
578 	rx = __rounddown_pow_of_two(ringparam->rx_pending);
579 	if (rx != ringparam->rx_pending)
580 		netdev_notice(netdev,
581 			      "rx ring parameter rounded to power of two: %u\n",
582 			      rx);
583 
584 	tx = __rounddown_pow_of_two(ringparam->tx_pending);
585 	if (tx != ringparam->tx_pending)
586 		netdev_notice(netdev,
587 			      "tx ring parameter rounded to power of two: %u\n",
588 			      tx);
589 
590 	if ((rx == pdata->rx_desc_count) &&
591 	    (tx == pdata->tx_desc_count))
592 		goto out;
593 
594 	pdata->rx_desc_count = rx;
595 	pdata->tx_desc_count = tx;
596 
597 	xgbe_restart_dev(pdata);
598 
599 out:
600 	return 0;
601 }
602 
603 static void xgbe_get_channels(struct net_device *netdev,
604 			      struct ethtool_channels *channels)
605 {
606 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
607 	unsigned int rx, tx, combined;
608 
609 	/* Calculate maximums allowed:
610 	 *   - Take into account the number of available IRQs
611 	 *   - Do not take into account the number of online CPUs so that
612 	 *     the user can over-subscribe if desired
613 	 *   - Tx is additionally limited by the number of hardware queues
614 	 */
615 	rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
616 	rx = min(rx, pdata->channel_irq_count);
617 	tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
618 	tx = min(tx, pdata->channel_irq_count);
619 	tx = min(tx, pdata->tx_max_q_count);
620 
621 	combined = min(rx, tx);
622 
623 	channels->max_combined = combined;
624 	channels->max_rx = rx ? rx - 1 : 0;
625 	channels->max_tx = tx ? tx - 1 : 0;
626 
627 	/* Get current settings based on device state */
628 	rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
629 	tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
630 
631 	combined = min(rx, tx);
632 	rx -= combined;
633 	tx -= combined;
634 
635 	channels->combined_count = combined;
636 	channels->rx_count = rx;
637 	channels->tx_count = tx;
638 }
639 
640 static void xgbe_print_set_channels_input(struct net_device *netdev,
641 					  struct ethtool_channels *channels)
642 {
643 	netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
644 		   channels->combined_count, channels->rx_count,
645 		   channels->tx_count);
646 }
647 
648 static int xgbe_set_channels(struct net_device *netdev,
649 			     struct ethtool_channels *channels)
650 {
651 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
652 	unsigned int rx, rx_curr, tx, tx_curr, combined;
653 
654 	/* Calculate maximums allowed:
655 	 *   - Take into account the number of available IRQs
656 	 *   - Do not take into account the number of online CPUs so that
657 	 *     the user can over-subscribe if desired
658 	 *   - Tx is additionally limited by the number of hardware queues
659 	 */
660 	rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
661 	rx = min(rx, pdata->channel_irq_count);
662 	tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
663 	tx = min(tx, pdata->tx_max_q_count);
664 	tx = min(tx, pdata->channel_irq_count);
665 
666 	combined = min(rx, tx);
667 
668 	/* Should not be setting other count */
669 	if (channels->other_count) {
670 		netdev_err(netdev,
671 			   "other channel count must be zero\n");
672 		return -EINVAL;
673 	}
674 
675 	/* Require at least one Combined (Rx and Tx) channel */
676 	if (!channels->combined_count) {
677 		netdev_err(netdev,
678 			   "at least one combined Rx/Tx channel is required\n");
679 		xgbe_print_set_channels_input(netdev, channels);
680 		return -EINVAL;
681 	}
682 
683 	/* Check combined channels */
684 	if (channels->combined_count > combined) {
685 		netdev_err(netdev,
686 			   "combined channel count cannot exceed %u\n",
687 			   combined);
688 		xgbe_print_set_channels_input(netdev, channels);
689 		return -EINVAL;
690 	}
691 
692 	/* Can have some Rx-only or Tx-only channels, but not both */
693 	if (channels->rx_count && channels->tx_count) {
694 		netdev_err(netdev,
695 			   "cannot specify both Rx-only and Tx-only channels\n");
696 		xgbe_print_set_channels_input(netdev, channels);
697 		return -EINVAL;
698 	}
699 
700 	/* Check that we don't exceed the maximum number of channels */
701 	if ((channels->combined_count + channels->rx_count) > rx) {
702 		netdev_err(netdev,
703 			   "total Rx channels (%u) requested exceeds maximum available (%u)\n",
704 			   channels->combined_count + channels->rx_count, rx);
705 		xgbe_print_set_channels_input(netdev, channels);
706 		return -EINVAL;
707 	}
708 
709 	if ((channels->combined_count + channels->tx_count) > tx) {
710 		netdev_err(netdev,
711 			   "total Tx channels (%u) requested exceeds maximum available (%u)\n",
712 			   channels->combined_count + channels->tx_count, tx);
713 		xgbe_print_set_channels_input(netdev, channels);
714 		return -EINVAL;
715 	}
716 
717 	rx = channels->combined_count + channels->rx_count;
718 	tx = channels->combined_count + channels->tx_count;
719 
720 	rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
721 	tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
722 
723 	if ((rx == rx_curr) && (tx == tx_curr))
724 		goto out;
725 
726 	pdata->new_rx_ring_count = rx;
727 	pdata->new_tx_ring_count = tx;
728 
729 	xgbe_full_restart_dev(pdata);
730 
731 out:
732 	return 0;
733 }
734 
735 static const struct ethtool_ops xgbe_ethtool_ops = {
736 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
737 				     ETHTOOL_COALESCE_MAX_FRAMES,
738 	.get_drvinfo = xgbe_get_drvinfo,
739 	.get_msglevel = xgbe_get_msglevel,
740 	.set_msglevel = xgbe_set_msglevel,
741 	.get_link = ethtool_op_get_link,
742 	.get_coalesce = xgbe_get_coalesce,
743 	.set_coalesce = xgbe_set_coalesce,
744 	.get_pauseparam = xgbe_get_pauseparam,
745 	.set_pauseparam = xgbe_set_pauseparam,
746 	.get_strings = xgbe_get_strings,
747 	.get_ethtool_stats = xgbe_get_ethtool_stats,
748 	.get_sset_count = xgbe_get_sset_count,
749 	.get_rxnfc = xgbe_get_rxnfc,
750 	.get_rxfh_key_size = xgbe_get_rxfh_key_size,
751 	.get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
752 	.get_rxfh = xgbe_get_rxfh,
753 	.set_rxfh = xgbe_set_rxfh,
754 	.get_ts_info = xgbe_get_ts_info,
755 	.get_link_ksettings = xgbe_get_link_ksettings,
756 	.set_link_ksettings = xgbe_set_link_ksettings,
757 	.get_module_info = xgbe_get_module_info,
758 	.get_module_eeprom = xgbe_get_module_eeprom,
759 	.get_ringparam = xgbe_get_ringparam,
760 	.set_ringparam = xgbe_set_ringparam,
761 	.get_channels = xgbe_get_channels,
762 	.set_channels = xgbe_set_channels,
763 };
764 
765 const struct ethtool_ops *xgbe_get_ethtool_ops(void)
766 {
767 	return &xgbe_ethtool_ops;
768 }
769