xref: /linux/drivers/net/ethernet/cisco/enic/enic_ethtool.c (revision 63307d015b91e626c97bb82e88054af3d0b74643)
1 /**
2  * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  */
18 
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/net_tstamp.h>
22 
23 #include "enic_res.h"
24 #include "enic.h"
25 #include "enic_dev.h"
26 #include "enic_clsf.h"
27 #include "vnic_rss.h"
28 #include "vnic_stats.h"
29 
30 struct enic_stat {
31 	char name[ETH_GSTRING_LEN];
32 	unsigned int index;
33 };
34 
35 #define ENIC_TX_STAT(stat) { \
36 	.name = #stat, \
37 	.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
38 }
39 
40 #define ENIC_RX_STAT(stat) { \
41 	.name = #stat, \
42 	.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
43 }
44 
45 #define ENIC_GEN_STAT(stat) { \
46 	.name = #stat, \
47 	.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
48 }
49 
50 static const struct enic_stat enic_tx_stats[] = {
51 	ENIC_TX_STAT(tx_frames_ok),
52 	ENIC_TX_STAT(tx_unicast_frames_ok),
53 	ENIC_TX_STAT(tx_multicast_frames_ok),
54 	ENIC_TX_STAT(tx_broadcast_frames_ok),
55 	ENIC_TX_STAT(tx_bytes_ok),
56 	ENIC_TX_STAT(tx_unicast_bytes_ok),
57 	ENIC_TX_STAT(tx_multicast_bytes_ok),
58 	ENIC_TX_STAT(tx_broadcast_bytes_ok),
59 	ENIC_TX_STAT(tx_drops),
60 	ENIC_TX_STAT(tx_errors),
61 	ENIC_TX_STAT(tx_tso),
62 };
63 
64 static const struct enic_stat enic_rx_stats[] = {
65 	ENIC_RX_STAT(rx_frames_ok),
66 	ENIC_RX_STAT(rx_frames_total),
67 	ENIC_RX_STAT(rx_unicast_frames_ok),
68 	ENIC_RX_STAT(rx_multicast_frames_ok),
69 	ENIC_RX_STAT(rx_broadcast_frames_ok),
70 	ENIC_RX_STAT(rx_bytes_ok),
71 	ENIC_RX_STAT(rx_unicast_bytes_ok),
72 	ENIC_RX_STAT(rx_multicast_bytes_ok),
73 	ENIC_RX_STAT(rx_broadcast_bytes_ok),
74 	ENIC_RX_STAT(rx_drop),
75 	ENIC_RX_STAT(rx_no_bufs),
76 	ENIC_RX_STAT(rx_errors),
77 	ENIC_RX_STAT(rx_rss),
78 	ENIC_RX_STAT(rx_crc_errors),
79 	ENIC_RX_STAT(rx_frames_64),
80 	ENIC_RX_STAT(rx_frames_127),
81 	ENIC_RX_STAT(rx_frames_255),
82 	ENIC_RX_STAT(rx_frames_511),
83 	ENIC_RX_STAT(rx_frames_1023),
84 	ENIC_RX_STAT(rx_frames_1518),
85 	ENIC_RX_STAT(rx_frames_to_max),
86 };
87 
88 static const struct enic_stat enic_gen_stats[] = {
89 	ENIC_GEN_STAT(dma_map_error),
90 };
91 
92 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
93 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
94 static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
95 
96 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
97 {
98 	int i;
99 	int intr;
100 
101 	for (i = 0; i < enic->rq_count; i++) {
102 		intr = enic_msix_rq_intr(enic, i);
103 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
104 	}
105 }
106 
107 static int enic_get_ksettings(struct net_device *netdev,
108 			      struct ethtool_link_ksettings *ecmd)
109 {
110 	struct enic *enic = netdev_priv(netdev);
111 	struct ethtool_link_settings *base = &ecmd->base;
112 
113 	ethtool_link_ksettings_add_link_mode(ecmd, supported,
114 					     10000baseT_Full);
115 	ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
116 	ethtool_link_ksettings_add_link_mode(ecmd, advertising,
117 					     10000baseT_Full);
118 	ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
119 	base->port = PORT_FIBRE;
120 
121 	if (netif_carrier_ok(netdev)) {
122 		base->speed = vnic_dev_port_speed(enic->vdev);
123 		base->duplex = DUPLEX_FULL;
124 	} else {
125 		base->speed = SPEED_UNKNOWN;
126 		base->duplex = DUPLEX_UNKNOWN;
127 	}
128 
129 	base->autoneg = AUTONEG_DISABLE;
130 
131 	return 0;
132 }
133 
134 static void enic_get_drvinfo(struct net_device *netdev,
135 	struct ethtool_drvinfo *drvinfo)
136 {
137 	struct enic *enic = netdev_priv(netdev);
138 	struct vnic_devcmd_fw_info *fw_info;
139 	int err;
140 
141 	err = enic_dev_fw_info(enic, &fw_info);
142 	/* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
143 	 * For other failures, like devcmd failure, we return previously
144 	 * recorded info.
145 	 */
146 	if (err == -ENOMEM)
147 		return;
148 
149 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
150 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
151 	strlcpy(drvinfo->fw_version, fw_info->fw_version,
152 		sizeof(drvinfo->fw_version));
153 	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
154 		sizeof(drvinfo->bus_info));
155 }
156 
157 static void enic_get_strings(struct net_device *netdev, u32 stringset,
158 	u8 *data)
159 {
160 	unsigned int i;
161 
162 	switch (stringset) {
163 	case ETH_SS_STATS:
164 		for (i = 0; i < enic_n_tx_stats; i++) {
165 			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
166 			data += ETH_GSTRING_LEN;
167 		}
168 		for (i = 0; i < enic_n_rx_stats; i++) {
169 			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
170 			data += ETH_GSTRING_LEN;
171 		}
172 		for (i = 0; i < enic_n_gen_stats; i++) {
173 			memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
174 			data += ETH_GSTRING_LEN;
175 		}
176 		break;
177 	}
178 }
179 
180 static void enic_get_ringparam(struct net_device *netdev,
181 			       struct ethtool_ringparam *ring)
182 {
183 	struct enic *enic = netdev_priv(netdev);
184 	struct vnic_enet_config *c = &enic->config;
185 
186 	ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
187 	ring->rx_pending = c->rq_desc_count;
188 	ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
189 	ring->tx_pending = c->wq_desc_count;
190 }
191 
192 static int enic_set_ringparam(struct net_device *netdev,
193 			      struct ethtool_ringparam *ring)
194 {
195 	struct enic *enic = netdev_priv(netdev);
196 	struct vnic_enet_config *c = &enic->config;
197 	int running = netif_running(netdev);
198 	unsigned int rx_pending;
199 	unsigned int tx_pending;
200 	int err = 0;
201 
202 	if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
203 		netdev_info(netdev,
204 			    "modifying mini ring params is not supported");
205 		return -EINVAL;
206 	}
207 	if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
208 		netdev_info(netdev,
209 			    "modifying jumbo ring params is not supported");
210 		return -EINVAL;
211 	}
212 	rx_pending = c->rq_desc_count;
213 	tx_pending = c->wq_desc_count;
214 	if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
215 	    ring->rx_pending < ENIC_MIN_RQ_DESCS) {
216 		netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
217 			    ring->rx_pending, ENIC_MIN_RQ_DESCS,
218 			    ENIC_MAX_RQ_DESCS);
219 		return -EINVAL;
220 	}
221 	if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
222 	    ring->tx_pending < ENIC_MIN_WQ_DESCS) {
223 		netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
224 			    ring->tx_pending, ENIC_MIN_WQ_DESCS,
225 			    ENIC_MAX_WQ_DESCS);
226 		return -EINVAL;
227 	}
228 	if (running)
229 		dev_close(netdev);
230 	c->rq_desc_count =
231 		ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
232 	c->wq_desc_count =
233 		ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
234 	enic_free_vnic_resources(enic);
235 	err = enic_alloc_vnic_resources(enic);
236 	if (err) {
237 		netdev_err(netdev,
238 			   "Failed to alloc vNIC resources, aborting\n");
239 		enic_free_vnic_resources(enic);
240 		goto err_out;
241 	}
242 	enic_init_vnic_resources(enic);
243 	if (running) {
244 		err = dev_open(netdev, NULL);
245 		if (err)
246 			goto err_out;
247 	}
248 	return 0;
249 err_out:
250 	c->rq_desc_count = rx_pending;
251 	c->wq_desc_count = tx_pending;
252 	return err;
253 }
254 
255 static int enic_get_sset_count(struct net_device *netdev, int sset)
256 {
257 	switch (sset) {
258 	case ETH_SS_STATS:
259 		return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
260 	default:
261 		return -EOPNOTSUPP;
262 	}
263 }
264 
265 static void enic_get_ethtool_stats(struct net_device *netdev,
266 	struct ethtool_stats *stats, u64 *data)
267 {
268 	struct enic *enic = netdev_priv(netdev);
269 	struct vnic_stats *vstats;
270 	unsigned int i;
271 	int err;
272 
273 	err = enic_dev_stats_dump(enic, &vstats);
274 	/* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
275 	 * For other failures, like devcmd failure, we return previously
276 	 * recorded stats.
277 	 */
278 	if (err == -ENOMEM)
279 		return;
280 
281 	for (i = 0; i < enic_n_tx_stats; i++)
282 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
283 	for (i = 0; i < enic_n_rx_stats; i++)
284 		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
285 	for (i = 0; i < enic_n_gen_stats; i++)
286 		*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
287 }
288 
289 static u32 enic_get_msglevel(struct net_device *netdev)
290 {
291 	struct enic *enic = netdev_priv(netdev);
292 	return enic->msg_enable;
293 }
294 
295 static void enic_set_msglevel(struct net_device *netdev, u32 value)
296 {
297 	struct enic *enic = netdev_priv(netdev);
298 	enic->msg_enable = value;
299 }
300 
301 static int enic_get_coalesce(struct net_device *netdev,
302 	struct ethtool_coalesce *ecmd)
303 {
304 	struct enic *enic = netdev_priv(netdev);
305 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
306 
307 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
308 		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
309 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
310 	if (rxcoal->use_adaptive_rx_coalesce)
311 		ecmd->use_adaptive_rx_coalesce = 1;
312 	ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
313 	ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
314 
315 	return 0;
316 }
317 
318 static int enic_coalesce_valid(struct enic *enic,
319 			       struct ethtool_coalesce *ec)
320 {
321 	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
322 	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
323 					   ec->rx_coalesce_usecs_high);
324 	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
325 					  ec->rx_coalesce_usecs_low);
326 
327 	if (ec->rx_max_coalesced_frames		||
328 	    ec->rx_coalesce_usecs_irq		||
329 	    ec->rx_max_coalesced_frames_irq	||
330 	    ec->tx_max_coalesced_frames		||
331 	    ec->tx_coalesce_usecs_irq		||
332 	    ec->tx_max_coalesced_frames_irq	||
333 	    ec->stats_block_coalesce_usecs	||
334 	    ec->use_adaptive_tx_coalesce	||
335 	    ec->pkt_rate_low			||
336 	    ec->rx_max_coalesced_frames_low	||
337 	    ec->tx_coalesce_usecs_low		||
338 	    ec->tx_max_coalesced_frames_low	||
339 	    ec->pkt_rate_high			||
340 	    ec->rx_max_coalesced_frames_high	||
341 	    ec->tx_coalesce_usecs_high		||
342 	    ec->tx_max_coalesced_frames_high	||
343 	    ec->rate_sample_interval)
344 		return -EINVAL;
345 
346 	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
347 	    ec->tx_coalesce_usecs)
348 		return -EINVAL;
349 
350 	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
351 	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
352 	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
353 	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
354 		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
355 			    coalesce_usecs_max);
356 
357 	if (ec->rx_coalesce_usecs_high &&
358 	    (rx_coalesce_usecs_high <
359 	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
360 		return -EINVAL;
361 
362 	return 0;
363 }
364 
365 static int enic_set_coalesce(struct net_device *netdev,
366 	struct ethtool_coalesce *ecmd)
367 {
368 	struct enic *enic = netdev_priv(netdev);
369 	u32 tx_coalesce_usecs;
370 	u32 rx_coalesce_usecs;
371 	u32 rx_coalesce_usecs_low;
372 	u32 rx_coalesce_usecs_high;
373 	u32 coalesce_usecs_max;
374 	unsigned int i, intr;
375 	int ret;
376 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
377 
378 	ret = enic_coalesce_valid(enic, ecmd);
379 	if (ret)
380 		return ret;
381 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
382 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
383 				  coalesce_usecs_max);
384 	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
385 				  coalesce_usecs_max);
386 
387 	rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
388 				      coalesce_usecs_max);
389 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
390 				       coalesce_usecs_max);
391 
392 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
393 		for (i = 0; i < enic->wq_count; i++) {
394 			intr = enic_msix_wq_intr(enic, i);
395 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
396 						       tx_coalesce_usecs);
397 		}
398 		enic->tx_coalesce_usecs = tx_coalesce_usecs;
399 	}
400 	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
401 	if (!rxcoal->use_adaptive_rx_coalesce)
402 		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
403 	if (ecmd->rx_coalesce_usecs_high) {
404 		rxcoal->range_end = rx_coalesce_usecs_high;
405 		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
406 		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
407 						ENIC_AIC_LARGE_PKT_DIFF;
408 	}
409 
410 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
411 
412 	return 0;
413 }
414 
415 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
416 			    u32 *rule_locs)
417 {
418 	int j, ret = 0, cnt = 0;
419 
420 	cmd->data = enic->rfs_h.max - enic->rfs_h.free;
421 	for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
422 		struct hlist_head *hhead;
423 		struct hlist_node *tmp;
424 		struct enic_rfs_fltr_node *n;
425 
426 		hhead = &enic->rfs_h.ht_head[j];
427 		hlist_for_each_entry_safe(n, tmp, hhead, node) {
428 			if (cnt == cmd->rule_cnt)
429 				return -EMSGSIZE;
430 			rule_locs[cnt] = n->fltr_id;
431 			cnt++;
432 		}
433 	}
434 	cmd->rule_cnt = cnt;
435 
436 	return ret;
437 }
438 
439 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
440 {
441 	struct ethtool_rx_flow_spec *fsp =
442 				(struct ethtool_rx_flow_spec *)&cmd->fs;
443 	struct enic_rfs_fltr_node *n;
444 
445 	n = htbl_fltr_search(enic, (u16)fsp->location);
446 	if (!n)
447 		return -EINVAL;
448 	switch (n->keys.basic.ip_proto) {
449 	case IPPROTO_TCP:
450 		fsp->flow_type = TCP_V4_FLOW;
451 		break;
452 	case IPPROTO_UDP:
453 		fsp->flow_type = UDP_V4_FLOW;
454 		break;
455 	default:
456 		return -EINVAL;
457 		break;
458 	}
459 
460 	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
461 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
462 
463 	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
464 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
465 
466 	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
467 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
468 
469 	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
470 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
471 
472 	fsp->ring_cookie = n->rq_id;
473 
474 	return 0;
475 }
476 
477 static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
478 {
479 	u8 rss_hash_type = 0;
480 	cmd->data = 0;
481 
482 	spin_lock_bh(&enic->devcmd_lock);
483 	(void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
484 	spin_unlock_bh(&enic->devcmd_lock);
485 	switch (cmd->flow_type) {
486 	case TCP_V6_FLOW:
487 	case TCP_V4_FLOW:
488 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
489 			     RXH_IP_SRC | RXH_IP_DST;
490 		break;
491 	case UDP_V6_FLOW:
492 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
493 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
494 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
495 		break;
496 	case UDP_V4_FLOW:
497 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
498 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
499 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
500 		break;
501 	case SCTP_V4_FLOW:
502 	case AH_ESP_V4_FLOW:
503 	case AH_V4_FLOW:
504 	case ESP_V4_FLOW:
505 	case SCTP_V6_FLOW:
506 	case AH_ESP_V6_FLOW:
507 	case AH_V6_FLOW:
508 	case ESP_V6_FLOW:
509 	case IPV4_FLOW:
510 	case IPV6_FLOW:
511 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
512 		break;
513 	default:
514 		return -EINVAL;
515 	}
516 
517 	return 0;
518 }
519 
520 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
521 			  u32 *rule_locs)
522 {
523 	struct enic *enic = netdev_priv(dev);
524 	int ret = 0;
525 
526 	switch (cmd->cmd) {
527 	case ETHTOOL_GRXRINGS:
528 		cmd->data = enic->rq_count;
529 		break;
530 	case ETHTOOL_GRXCLSRLCNT:
531 		spin_lock_bh(&enic->rfs_h.lock);
532 		cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
533 		cmd->data = enic->rfs_h.max;
534 		spin_unlock_bh(&enic->rfs_h.lock);
535 		break;
536 	case ETHTOOL_GRXCLSRLALL:
537 		spin_lock_bh(&enic->rfs_h.lock);
538 		ret = enic_grxclsrlall(enic, cmd, rule_locs);
539 		spin_unlock_bh(&enic->rfs_h.lock);
540 		break;
541 	case ETHTOOL_GRXCLSRULE:
542 		spin_lock_bh(&enic->rfs_h.lock);
543 		ret = enic_grxclsrule(enic, cmd);
544 		spin_unlock_bh(&enic->rfs_h.lock);
545 		break;
546 	case ETHTOOL_GRXFH:
547 		ret = enic_get_rx_flow_hash(enic, cmd);
548 		break;
549 	default:
550 		ret = -EOPNOTSUPP;
551 		break;
552 	}
553 
554 	return ret;
555 }
556 
557 static int enic_get_tunable(struct net_device *dev,
558 			    const struct ethtool_tunable *tuna, void *data)
559 {
560 	struct enic *enic = netdev_priv(dev);
561 	int ret = 0;
562 
563 	switch (tuna->id) {
564 	case ETHTOOL_RX_COPYBREAK:
565 		*(u32 *)data = enic->rx_copybreak;
566 		break;
567 	default:
568 		ret = -EINVAL;
569 		break;
570 	}
571 
572 	return ret;
573 }
574 
575 static int enic_set_tunable(struct net_device *dev,
576 			    const struct ethtool_tunable *tuna,
577 			    const void *data)
578 {
579 	struct enic *enic = netdev_priv(dev);
580 	int ret = 0;
581 
582 	switch (tuna->id) {
583 	case ETHTOOL_RX_COPYBREAK:
584 		enic->rx_copybreak = *(u32 *)data;
585 		break;
586 	default:
587 		ret = -EINVAL;
588 		break;
589 	}
590 
591 	return ret;
592 }
593 
594 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
595 {
596 	return ENIC_RSS_LEN;
597 }
598 
599 static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
600 			 u8 *hfunc)
601 {
602 	struct enic *enic = netdev_priv(netdev);
603 
604 	if (hkey)
605 		memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
606 
607 	if (hfunc)
608 		*hfunc = ETH_RSS_HASH_TOP;
609 
610 	return 0;
611 }
612 
613 static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
614 			 const u8 *hkey, const u8 hfunc)
615 {
616 	struct enic *enic = netdev_priv(netdev);
617 
618 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
619 	    indir)
620 		return -EINVAL;
621 
622 	if (hkey)
623 		memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
624 
625 	return __enic_set_rsskey(enic);
626 }
627 
628 static int enic_get_ts_info(struct net_device *netdev,
629 			    struct ethtool_ts_info *info)
630 {
631 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
632 				SOF_TIMESTAMPING_RX_SOFTWARE |
633 				SOF_TIMESTAMPING_SOFTWARE;
634 
635 	return 0;
636 }
637 
638 static const struct ethtool_ops enic_ethtool_ops = {
639 	.get_drvinfo = enic_get_drvinfo,
640 	.get_msglevel = enic_get_msglevel,
641 	.set_msglevel = enic_set_msglevel,
642 	.get_link = ethtool_op_get_link,
643 	.get_strings = enic_get_strings,
644 	.get_ringparam = enic_get_ringparam,
645 	.set_ringparam = enic_set_ringparam,
646 	.get_sset_count = enic_get_sset_count,
647 	.get_ethtool_stats = enic_get_ethtool_stats,
648 	.get_coalesce = enic_get_coalesce,
649 	.set_coalesce = enic_set_coalesce,
650 	.get_rxnfc = enic_get_rxnfc,
651 	.get_tunable = enic_get_tunable,
652 	.set_tunable = enic_set_tunable,
653 	.get_rxfh_key_size = enic_get_rxfh_key_size,
654 	.get_rxfh = enic_get_rxfh,
655 	.set_rxfh = enic_set_rxfh,
656 	.get_link_ksettings = enic_get_ksettings,
657 	.get_ts_info = enic_get_ts_info,
658 };
659 
660 void enic_set_ethtool_ops(struct net_device *netdev)
661 {
662 	netdev->ethtool_ops = &enic_ethtool_ops;
663 }
664