xref: /linux/drivers/net/ethernet/cisco/enic/enic_ethtool.c (revision d7f39aee79f04eeaa42085728423501b33ac5be5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2013 Cisco Systems, Inc.  All rights reserved.
3 
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/net_tstamp.h>
7 
8 #include "enic_res.h"
9 #include "enic.h"
10 #include "enic_dev.h"
11 #include "enic_clsf.h"
12 #include "vnic_rss.h"
13 #include "vnic_stats.h"
14 
15 struct enic_stat {
16 	char name[ETH_GSTRING_LEN];
17 	unsigned int index;
18 };
19 
20 #define ENIC_TX_STAT(stat) { \
21 	.name = #stat, \
22 	.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
23 }
24 
25 #define ENIC_RX_STAT(stat) { \
26 	.name = #stat, \
27 	.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
28 }
29 
30 #define ENIC_GEN_STAT(stat) { \
31 	.name = #stat, \
32 	.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
33 }
34 
35 static const struct enic_stat enic_tx_stats[] = {
36 	ENIC_TX_STAT(tx_frames_ok),
37 	ENIC_TX_STAT(tx_unicast_frames_ok),
38 	ENIC_TX_STAT(tx_multicast_frames_ok),
39 	ENIC_TX_STAT(tx_broadcast_frames_ok),
40 	ENIC_TX_STAT(tx_bytes_ok),
41 	ENIC_TX_STAT(tx_unicast_bytes_ok),
42 	ENIC_TX_STAT(tx_multicast_bytes_ok),
43 	ENIC_TX_STAT(tx_broadcast_bytes_ok),
44 	ENIC_TX_STAT(tx_drops),
45 	ENIC_TX_STAT(tx_errors),
46 	ENIC_TX_STAT(tx_tso),
47 };
48 
49 static const struct enic_stat enic_rx_stats[] = {
50 	ENIC_RX_STAT(rx_frames_ok),
51 	ENIC_RX_STAT(rx_frames_total),
52 	ENIC_RX_STAT(rx_unicast_frames_ok),
53 	ENIC_RX_STAT(rx_multicast_frames_ok),
54 	ENIC_RX_STAT(rx_broadcast_frames_ok),
55 	ENIC_RX_STAT(rx_bytes_ok),
56 	ENIC_RX_STAT(rx_unicast_bytes_ok),
57 	ENIC_RX_STAT(rx_multicast_bytes_ok),
58 	ENIC_RX_STAT(rx_broadcast_bytes_ok),
59 	ENIC_RX_STAT(rx_drop),
60 	ENIC_RX_STAT(rx_no_bufs),
61 	ENIC_RX_STAT(rx_errors),
62 	ENIC_RX_STAT(rx_rss),
63 	ENIC_RX_STAT(rx_crc_errors),
64 	ENIC_RX_STAT(rx_frames_64),
65 	ENIC_RX_STAT(rx_frames_127),
66 	ENIC_RX_STAT(rx_frames_255),
67 	ENIC_RX_STAT(rx_frames_511),
68 	ENIC_RX_STAT(rx_frames_1023),
69 	ENIC_RX_STAT(rx_frames_1518),
70 	ENIC_RX_STAT(rx_frames_to_max),
71 };
72 
73 static const struct enic_stat enic_gen_stats[] = {
74 	ENIC_GEN_STAT(dma_map_error),
75 };
76 
77 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
78 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
79 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
80 
81 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
82 {
83 	int i;
84 	int intr;
85 
86 	for (i = 0; i < enic->rq_count; i++) {
87 		intr = enic_msix_rq_intr(enic, i);
88 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
89 	}
90 }
91 
92 static int enic_get_ksettings(struct net_device *netdev,
93 			      struct ethtool_link_ksettings *ecmd)
94 {
95 	struct enic *enic = netdev_priv(netdev);
96 	struct ethtool_link_settings *base = &ecmd->base;
97 
98 	ethtool_link_ksettings_add_link_mode(ecmd, supported,
99 					     10000baseT_Full);
100 	ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
101 	ethtool_link_ksettings_add_link_mode(ecmd, advertising,
102 					     10000baseT_Full);
103 	ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
104 	base->port = PORT_FIBRE;
105 
106 	if (netif_carrier_ok(netdev)) {
107 		base->speed = vnic_dev_port_speed(enic->vdev);
108 		base->duplex = DUPLEX_FULL;
109 	} else {
110 		base->speed = SPEED_UNKNOWN;
111 		base->duplex = DUPLEX_UNKNOWN;
112 	}
113 
114 	base->autoneg = AUTONEG_DISABLE;
115 
116 	return 0;
117 }
118 
119 static void enic_get_drvinfo(struct net_device *netdev,
120 	struct ethtool_drvinfo *drvinfo)
121 {
122 	struct enic *enic = netdev_priv(netdev);
123 	struct vnic_devcmd_fw_info *fw_info;
124 	int err;
125 
126 	err = enic_dev_fw_info(enic, &fw_info);
127 	/* return only when dma_alloc_coherent fails in vnic_dev_fw_info
128 	 * For other failures, like devcmd failure, we return previously
129 	 * recorded info.
130 	 */
131 	if (err == -ENOMEM)
132 		return;
133 
134 	strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
135 	strscpy(drvinfo->fw_version, fw_info->fw_version,
136 		sizeof(drvinfo->fw_version));
137 	strscpy(drvinfo->bus_info, pci_name(enic->pdev),
138 		sizeof(drvinfo->bus_info));
139 }
140 
141 static void enic_get_strings(struct net_device *netdev, u32 stringset,
142 	u8 *data)
143 {
144 	unsigned int i;
145 
146 	switch (stringset) {
147 	case ETH_SS_STATS:
148 		for (i = 0; i < enic_n_tx_stats; i++) {
149 			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
150 			data += ETH_GSTRING_LEN;
151 		}
152 		for (i = 0; i < enic_n_rx_stats; i++) {
153 			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
154 			data += ETH_GSTRING_LEN;
155 		}
156 		for (i = 0; i < enic_n_gen_stats; i++) {
157 			memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
158 			data += ETH_GSTRING_LEN;
159 		}
160 		break;
161 	}
162 }
163 
164 static void enic_get_ringparam(struct net_device *netdev,
165 			       struct ethtool_ringparam *ring,
166 			       struct kernel_ethtool_ringparam *kernel_ring,
167 			       struct netlink_ext_ack *extack)
168 {
169 	struct enic *enic = netdev_priv(netdev);
170 	struct vnic_enet_config *c = &enic->config;
171 
172 	ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
173 	ring->rx_pending = c->rq_desc_count;
174 	ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
175 	ring->tx_pending = c->wq_desc_count;
176 }
177 
178 static int enic_set_ringparam(struct net_device *netdev,
179 			      struct ethtool_ringparam *ring,
180 			      struct kernel_ethtool_ringparam *kernel_ring,
181 			      struct netlink_ext_ack *extack)
182 {
183 	struct enic *enic = netdev_priv(netdev);
184 	struct vnic_enet_config *c = &enic->config;
185 	int running = netif_running(netdev);
186 	unsigned int rx_pending;
187 	unsigned int tx_pending;
188 	int err = 0;
189 
190 	if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
191 		netdev_info(netdev,
192 			    "modifying mini ring params is not supported");
193 		return -EINVAL;
194 	}
195 	if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
196 		netdev_info(netdev,
197 			    "modifying jumbo ring params is not supported");
198 		return -EINVAL;
199 	}
200 	rx_pending = c->rq_desc_count;
201 	tx_pending = c->wq_desc_count;
202 	if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
203 	    ring->rx_pending < ENIC_MIN_RQ_DESCS) {
204 		netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
205 			    ring->rx_pending, ENIC_MIN_RQ_DESCS,
206 			    ENIC_MAX_RQ_DESCS);
207 		return -EINVAL;
208 	}
209 	if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
210 	    ring->tx_pending < ENIC_MIN_WQ_DESCS) {
211 		netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
212 			    ring->tx_pending, ENIC_MIN_WQ_DESCS,
213 			    ENIC_MAX_WQ_DESCS);
214 		return -EINVAL;
215 	}
216 	if (running)
217 		dev_close(netdev);
218 	c->rq_desc_count =
219 		ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
220 	c->wq_desc_count =
221 		ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
222 	enic_free_vnic_resources(enic);
223 	err = enic_alloc_vnic_resources(enic);
224 	if (err) {
225 		netdev_err(netdev,
226 			   "Failed to alloc vNIC resources, aborting\n");
227 		enic_free_vnic_resources(enic);
228 		goto err_out;
229 	}
230 	enic_init_vnic_resources(enic);
231 	if (running) {
232 		err = dev_open(netdev, NULL);
233 		if (err)
234 			goto err_out;
235 	}
236 	return 0;
237 err_out:
238 	c->rq_desc_count = rx_pending;
239 	c->wq_desc_count = tx_pending;
240 	return err;
241 }
242 
243 static int enic_get_sset_count(struct net_device *netdev, int sset)
244 {
245 	switch (sset) {
246 	case ETH_SS_STATS:
247 		return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
248 	default:
249 		return -EOPNOTSUPP;
250 	}
251 }
252 
253 static void enic_get_ethtool_stats(struct net_device *netdev,
254 	struct ethtool_stats *stats, u64 *data)
255 {
256 	struct enic *enic = netdev_priv(netdev);
257 	struct vnic_stats *vstats;
258 	unsigned int i;
259 	int err;
260 
261 	err = enic_dev_stats_dump(enic, &vstats);
262 	/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
263 	 * For other failures, like devcmd failure, we return previously
264 	 * recorded stats.
265 	 */
266 	if (err == -ENOMEM)
267 		return;
268 
269 	for (i = 0; i < enic_n_tx_stats; i++)
270 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
271 	for (i = 0; i < enic_n_rx_stats; i++)
272 		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
273 	for (i = 0; i < enic_n_gen_stats; i++)
274 		*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
275 }
276 
277 static u32 enic_get_msglevel(struct net_device *netdev)
278 {
279 	struct enic *enic = netdev_priv(netdev);
280 	return enic->msg_enable;
281 }
282 
283 static void enic_set_msglevel(struct net_device *netdev, u32 value)
284 {
285 	struct enic *enic = netdev_priv(netdev);
286 	enic->msg_enable = value;
287 }
288 
289 static int enic_get_coalesce(struct net_device *netdev,
290 			     struct ethtool_coalesce *ecmd,
291 			     struct kernel_ethtool_coalesce *kernel_coal,
292 			     struct netlink_ext_ack *extack)
293 {
294 	struct enic *enic = netdev_priv(netdev);
295 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
296 
297 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
298 		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
299 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
300 	if (rxcoal->use_adaptive_rx_coalesce)
301 		ecmd->use_adaptive_rx_coalesce = 1;
302 	ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
303 	ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
304 
305 	return 0;
306 }
307 
308 static int enic_coalesce_valid(struct enic *enic,
309 			       struct ethtool_coalesce *ec)
310 {
311 	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
312 	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
313 					   ec->rx_coalesce_usecs_high);
314 	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
315 					  ec->rx_coalesce_usecs_low);
316 
317 	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
318 	    ec->tx_coalesce_usecs)
319 		return -EINVAL;
320 
321 	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
322 	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
323 	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
324 	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
325 		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
326 			    coalesce_usecs_max);
327 
328 	if (ec->rx_coalesce_usecs_high &&
329 	    (rx_coalesce_usecs_high <
330 	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
331 		return -EINVAL;
332 
333 	return 0;
334 }
335 
336 static int enic_set_coalesce(struct net_device *netdev,
337 			     struct ethtool_coalesce *ecmd,
338 			     struct kernel_ethtool_coalesce *kernel_coal,
339 			     struct netlink_ext_ack *extack)
340 {
341 	struct enic *enic = netdev_priv(netdev);
342 	u32 tx_coalesce_usecs;
343 	u32 rx_coalesce_usecs;
344 	u32 rx_coalesce_usecs_low;
345 	u32 rx_coalesce_usecs_high;
346 	u32 coalesce_usecs_max;
347 	unsigned int i, intr;
348 	int ret;
349 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
350 
351 	ret = enic_coalesce_valid(enic, ecmd);
352 	if (ret)
353 		return ret;
354 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
355 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
356 				  coalesce_usecs_max);
357 	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
358 				  coalesce_usecs_max);
359 
360 	rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
361 				      coalesce_usecs_max);
362 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
363 				       coalesce_usecs_max);
364 
365 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
366 		for (i = 0; i < enic->wq_count; i++) {
367 			intr = enic_msix_wq_intr(enic, i);
368 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
369 						       tx_coalesce_usecs);
370 		}
371 		enic->tx_coalesce_usecs = tx_coalesce_usecs;
372 	}
373 	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
374 	if (!rxcoal->use_adaptive_rx_coalesce)
375 		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
376 	if (ecmd->rx_coalesce_usecs_high) {
377 		rxcoal->range_end = rx_coalesce_usecs_high;
378 		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
379 		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
380 						ENIC_AIC_LARGE_PKT_DIFF;
381 	}
382 
383 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
384 
385 	return 0;
386 }
387 
388 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
389 			    u32 *rule_locs)
390 {
391 	int j, ret = 0, cnt = 0;
392 
393 	cmd->data = enic->rfs_h.max - enic->rfs_h.free;
394 	for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
395 		struct hlist_head *hhead;
396 		struct hlist_node *tmp;
397 		struct enic_rfs_fltr_node *n;
398 
399 		hhead = &enic->rfs_h.ht_head[j];
400 		hlist_for_each_entry_safe(n, tmp, hhead, node) {
401 			if (cnt == cmd->rule_cnt)
402 				return -EMSGSIZE;
403 			rule_locs[cnt] = n->fltr_id;
404 			cnt++;
405 		}
406 	}
407 	cmd->rule_cnt = cnt;
408 
409 	return ret;
410 }
411 
412 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
413 {
414 	struct ethtool_rx_flow_spec *fsp =
415 				(struct ethtool_rx_flow_spec *)&cmd->fs;
416 	struct enic_rfs_fltr_node *n;
417 
418 	n = htbl_fltr_search(enic, (u16)fsp->location);
419 	if (!n)
420 		return -EINVAL;
421 	switch (n->keys.basic.ip_proto) {
422 	case IPPROTO_TCP:
423 		fsp->flow_type = TCP_V4_FLOW;
424 		break;
425 	case IPPROTO_UDP:
426 		fsp->flow_type = UDP_V4_FLOW;
427 		break;
428 	default:
429 		return -EINVAL;
430 	}
431 
432 	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
433 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
434 
435 	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
436 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
437 
438 	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
439 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
440 
441 	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
442 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
443 
444 	fsp->ring_cookie = n->rq_id;
445 
446 	return 0;
447 }
448 
449 static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
450 {
451 	u8 rss_hash_type = 0;
452 	cmd->data = 0;
453 
454 	spin_lock_bh(&enic->devcmd_lock);
455 	(void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
456 	spin_unlock_bh(&enic->devcmd_lock);
457 	switch (cmd->flow_type) {
458 	case TCP_V6_FLOW:
459 	case TCP_V4_FLOW:
460 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
461 			     RXH_IP_SRC | RXH_IP_DST;
462 		break;
463 	case UDP_V6_FLOW:
464 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
465 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
466 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
467 		break;
468 	case UDP_V4_FLOW:
469 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
470 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
471 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
472 		break;
473 	case SCTP_V4_FLOW:
474 	case AH_ESP_V4_FLOW:
475 	case AH_V4_FLOW:
476 	case ESP_V4_FLOW:
477 	case SCTP_V6_FLOW:
478 	case AH_ESP_V6_FLOW:
479 	case AH_V6_FLOW:
480 	case ESP_V6_FLOW:
481 	case IPV4_FLOW:
482 	case IPV6_FLOW:
483 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
484 		break;
485 	default:
486 		return -EINVAL;
487 	}
488 
489 	return 0;
490 }
491 
492 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
493 			  u32 *rule_locs)
494 {
495 	struct enic *enic = netdev_priv(dev);
496 	int ret = 0;
497 
498 	switch (cmd->cmd) {
499 	case ETHTOOL_GRXRINGS:
500 		cmd->data = enic->rq_count;
501 		break;
502 	case ETHTOOL_GRXCLSRLCNT:
503 		spin_lock_bh(&enic->rfs_h.lock);
504 		cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
505 		cmd->data = enic->rfs_h.max;
506 		spin_unlock_bh(&enic->rfs_h.lock);
507 		break;
508 	case ETHTOOL_GRXCLSRLALL:
509 		spin_lock_bh(&enic->rfs_h.lock);
510 		ret = enic_grxclsrlall(enic, cmd, rule_locs);
511 		spin_unlock_bh(&enic->rfs_h.lock);
512 		break;
513 	case ETHTOOL_GRXCLSRULE:
514 		spin_lock_bh(&enic->rfs_h.lock);
515 		ret = enic_grxclsrule(enic, cmd);
516 		spin_unlock_bh(&enic->rfs_h.lock);
517 		break;
518 	case ETHTOOL_GRXFH:
519 		ret = enic_get_rx_flow_hash(enic, cmd);
520 		break;
521 	default:
522 		ret = -EOPNOTSUPP;
523 		break;
524 	}
525 
526 	return ret;
527 }
528 
529 static int enic_get_tunable(struct net_device *dev,
530 			    const struct ethtool_tunable *tuna, void *data)
531 {
532 	struct enic *enic = netdev_priv(dev);
533 	int ret = 0;
534 
535 	switch (tuna->id) {
536 	case ETHTOOL_RX_COPYBREAK:
537 		*(u32 *)data = enic->rx_copybreak;
538 		break;
539 	default:
540 		ret = -EINVAL;
541 		break;
542 	}
543 
544 	return ret;
545 }
546 
547 static int enic_set_tunable(struct net_device *dev,
548 			    const struct ethtool_tunable *tuna,
549 			    const void *data)
550 {
551 	struct enic *enic = netdev_priv(dev);
552 	int ret = 0;
553 
554 	switch (tuna->id) {
555 	case ETHTOOL_RX_COPYBREAK:
556 		enic->rx_copybreak = *(u32 *)data;
557 		break;
558 	default:
559 		ret = -EINVAL;
560 		break;
561 	}
562 
563 	return ret;
564 }
565 
566 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
567 {
568 	return ENIC_RSS_LEN;
569 }
570 
571 static int enic_get_rxfh(struct net_device *netdev,
572 			 struct ethtool_rxfh_param *rxfh)
573 {
574 	struct enic *enic = netdev_priv(netdev);
575 
576 	if (rxfh->key)
577 		memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
578 
579 	rxfh->hfunc = ETH_RSS_HASH_TOP;
580 
581 	return 0;
582 }
583 
584 static int enic_set_rxfh(struct net_device *netdev,
585 			 struct ethtool_rxfh_param *rxfh,
586 			 struct netlink_ext_ack *extack)
587 {
588 	struct enic *enic = netdev_priv(netdev);
589 
590 	if (rxfh->indir ||
591 	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
592 	     rxfh->hfunc != ETH_RSS_HASH_TOP))
593 		return -EINVAL;
594 
595 	if (rxfh->key)
596 		memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
597 
598 	return __enic_set_rsskey(enic);
599 }
600 
601 static int enic_get_ts_info(struct net_device *netdev,
602 			    struct ethtool_ts_info *info)
603 {
604 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
605 				SOF_TIMESTAMPING_RX_SOFTWARE |
606 				SOF_TIMESTAMPING_SOFTWARE;
607 
608 	return 0;
609 }
610 
611 static void enic_get_channels(struct net_device *netdev,
612 			      struct ethtool_channels *channels)
613 {
614 	struct enic *enic = netdev_priv(netdev);
615 
616 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
617 	case VNIC_DEV_INTR_MODE_MSIX:
618 		channels->max_rx = ENIC_RQ_MAX;
619 		channels->max_tx = ENIC_WQ_MAX;
620 		channels->rx_count = enic->rq_count;
621 		channels->tx_count = enic->wq_count;
622 		break;
623 	case VNIC_DEV_INTR_MODE_MSI:
624 	case VNIC_DEV_INTR_MODE_INTX:
625 		channels->max_combined = 1;
626 		channels->combined_count = 1;
627 		break;
628 	default:
629 		break;
630 	}
631 }
632 
633 static const struct ethtool_ops enic_ethtool_ops = {
634 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
635 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
636 				     ETHTOOL_COALESCE_RX_USECS_LOW |
637 				     ETHTOOL_COALESCE_RX_USECS_HIGH,
638 	.get_drvinfo = enic_get_drvinfo,
639 	.get_msglevel = enic_get_msglevel,
640 	.set_msglevel = enic_set_msglevel,
641 	.get_link = ethtool_op_get_link,
642 	.get_strings = enic_get_strings,
643 	.get_ringparam = enic_get_ringparam,
644 	.set_ringparam = enic_set_ringparam,
645 	.get_sset_count = enic_get_sset_count,
646 	.get_ethtool_stats = enic_get_ethtool_stats,
647 	.get_coalesce = enic_get_coalesce,
648 	.set_coalesce = enic_set_coalesce,
649 	.get_rxnfc = enic_get_rxnfc,
650 	.get_tunable = enic_get_tunable,
651 	.set_tunable = enic_set_tunable,
652 	.get_rxfh_key_size = enic_get_rxfh_key_size,
653 	.get_rxfh = enic_get_rxfh,
654 	.set_rxfh = enic_set_rxfh,
655 	.get_link_ksettings = enic_get_ksettings,
656 	.get_ts_info = enic_get_ts_info,
657 	.get_channels = enic_get_channels,
658 };
659 
660 void enic_set_ethtool_ops(struct net_device *netdev)
661 {
662 	netdev->ethtool_ops = &enic_ethtool_ops;
663 }
664