xref: /linux/drivers/net/ethernet/cisco/enic/enic_ethtool.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2013 Cisco Systems, Inc.  All rights reserved.
3 
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/net_tstamp.h>
7 
8 #include "enic_res.h"
9 #include "enic.h"
10 #include "enic_dev.h"
11 #include "enic_clsf.h"
12 #include "vnic_rss.h"
13 #include "vnic_stats.h"
14 
15 struct enic_stat {
16 	char name[ETH_GSTRING_LEN];
17 	unsigned int index;
18 };
19 
20 #define ENIC_TX_STAT(stat) { \
21 	.name = #stat, \
22 	.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
23 }
24 
25 #define ENIC_RX_STAT(stat) { \
26 	.name = #stat, \
27 	.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
28 }
29 
30 #define ENIC_GEN_STAT(stat) { \
31 	.name = #stat, \
32 	.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
33 }
34 
35 #define ENIC_PER_RQ_STAT(stat) { \
36 	.name = "rq[%d]_"#stat, \
37 	.index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
38 }
39 
40 #define ENIC_PER_WQ_STAT(stat) { \
41 	.name = "wq[%d]_"#stat, \
42 	.index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
43 }
44 
45 static const struct enic_stat enic_per_rq_stats[] = {
46 	ENIC_PER_RQ_STAT(l4_rss_hash),
47 	ENIC_PER_RQ_STAT(l3_rss_hash),
48 	ENIC_PER_RQ_STAT(csum_unnecessary_encap),
49 	ENIC_PER_RQ_STAT(vlan_stripped),
50 	ENIC_PER_RQ_STAT(napi_complete),
51 	ENIC_PER_RQ_STAT(napi_repoll),
52 	ENIC_PER_RQ_STAT(no_skb),
53 	ENIC_PER_RQ_STAT(desc_skip),
54 };
55 
56 #define NUM_ENIC_PER_RQ_STATS   ARRAY_SIZE(enic_per_rq_stats)
57 
58 static const struct enic_stat enic_per_wq_stats[] = {
59 	ENIC_PER_WQ_STAT(encap_tso),
60 	ENIC_PER_WQ_STAT(encap_csum),
61 	ENIC_PER_WQ_STAT(add_vlan),
62 	ENIC_PER_WQ_STAT(cq_work),
63 	ENIC_PER_WQ_STAT(cq_bytes),
64 	ENIC_PER_WQ_STAT(null_pkt),
65 	ENIC_PER_WQ_STAT(skb_linear_fail),
66 	ENIC_PER_WQ_STAT(desc_full_awake),
67 };
68 
69 #define NUM_ENIC_PER_WQ_STATS   ARRAY_SIZE(enic_per_wq_stats)
70 static const struct enic_stat enic_tx_stats[] = {
71 	ENIC_TX_STAT(tx_frames_ok),
72 	ENIC_TX_STAT(tx_unicast_frames_ok),
73 	ENIC_TX_STAT(tx_multicast_frames_ok),
74 	ENIC_TX_STAT(tx_broadcast_frames_ok),
75 	ENIC_TX_STAT(tx_bytes_ok),
76 	ENIC_TX_STAT(tx_unicast_bytes_ok),
77 	ENIC_TX_STAT(tx_multicast_bytes_ok),
78 	ENIC_TX_STAT(tx_broadcast_bytes_ok),
79 	ENIC_TX_STAT(tx_drops),
80 	ENIC_TX_STAT(tx_errors),
81 	ENIC_TX_STAT(tx_tso),
82 };
83 
84 #define NUM_ENIC_TX_STATS	ARRAY_SIZE(enic_tx_stats)
85 
86 static const struct enic_stat enic_rx_stats[] = {
87 	ENIC_RX_STAT(rx_frames_ok),
88 	ENIC_RX_STAT(rx_frames_total),
89 	ENIC_RX_STAT(rx_unicast_frames_ok),
90 	ENIC_RX_STAT(rx_multicast_frames_ok),
91 	ENIC_RX_STAT(rx_broadcast_frames_ok),
92 	ENIC_RX_STAT(rx_bytes_ok),
93 	ENIC_RX_STAT(rx_unicast_bytes_ok),
94 	ENIC_RX_STAT(rx_multicast_bytes_ok),
95 	ENIC_RX_STAT(rx_broadcast_bytes_ok),
96 	ENIC_RX_STAT(rx_drop),
97 	ENIC_RX_STAT(rx_no_bufs),
98 	ENIC_RX_STAT(rx_errors),
99 	ENIC_RX_STAT(rx_rss),
100 	ENIC_RX_STAT(rx_crc_errors),
101 	ENIC_RX_STAT(rx_frames_64),
102 	ENIC_RX_STAT(rx_frames_127),
103 	ENIC_RX_STAT(rx_frames_255),
104 	ENIC_RX_STAT(rx_frames_511),
105 	ENIC_RX_STAT(rx_frames_1023),
106 	ENIC_RX_STAT(rx_frames_1518),
107 	ENIC_RX_STAT(rx_frames_to_max),
108 };
109 
110 #define NUM_ENIC_RX_STATS	ARRAY_SIZE(enic_rx_stats)
111 
112 static const struct enic_stat enic_gen_stats[] = {
113 	ENIC_GEN_STAT(dma_map_error),
114 };
115 
116 #define NUM_ENIC_GEN_STATS	ARRAY_SIZE(enic_gen_stats)
117 
enic_intr_coal_set_rx(struct enic * enic,u32 timer)118 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
119 {
120 	int i;
121 	int intr;
122 
123 	for (i = 0; i < enic->rq_count; i++) {
124 		intr = enic_msix_rq_intr(enic, i);
125 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
126 	}
127 }
128 
enic_get_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ecmd)129 static int enic_get_ksettings(struct net_device *netdev,
130 			      struct ethtool_link_ksettings *ecmd)
131 {
132 	struct enic *enic = netdev_priv(netdev);
133 	struct ethtool_link_settings *base = &ecmd->base;
134 
135 	ethtool_link_ksettings_add_link_mode(ecmd, supported,
136 					     10000baseT_Full);
137 	ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
138 	ethtool_link_ksettings_add_link_mode(ecmd, advertising,
139 					     10000baseT_Full);
140 	ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
141 	base->port = PORT_FIBRE;
142 
143 	if (netif_carrier_ok(netdev)) {
144 		base->speed = vnic_dev_port_speed(enic->vdev);
145 		base->duplex = DUPLEX_FULL;
146 	} else {
147 		base->speed = SPEED_UNKNOWN;
148 		base->duplex = DUPLEX_UNKNOWN;
149 	}
150 
151 	base->autoneg = AUTONEG_DISABLE;
152 
153 	return 0;
154 }
155 
enic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)156 static void enic_get_drvinfo(struct net_device *netdev,
157 	struct ethtool_drvinfo *drvinfo)
158 {
159 	struct enic *enic = netdev_priv(netdev);
160 	struct vnic_devcmd_fw_info *fw_info;
161 	int err;
162 
163 	err = enic_dev_fw_info(enic, &fw_info);
164 	/* return only when dma_alloc_coherent fails in vnic_dev_fw_info
165 	 * For other failures, like devcmd failure, we return previously
166 	 * recorded info.
167 	 */
168 	if (err == -ENOMEM)
169 		return;
170 
171 	strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
172 	strscpy(drvinfo->fw_version, fw_info->fw_version,
173 		sizeof(drvinfo->fw_version));
174 	strscpy(drvinfo->bus_info, pci_name(enic->pdev),
175 		sizeof(drvinfo->bus_info));
176 }
177 
enic_get_strings(struct net_device * netdev,u32 stringset,u8 * data)178 static void enic_get_strings(struct net_device *netdev, u32 stringset,
179 	u8 *data)
180 {
181 	struct enic *enic = netdev_priv(netdev);
182 	unsigned int i;
183 	unsigned int j;
184 
185 	switch (stringset) {
186 	case ETH_SS_STATS:
187 		for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
188 			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
189 			data += ETH_GSTRING_LEN;
190 		}
191 		for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
192 			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
193 			data += ETH_GSTRING_LEN;
194 		}
195 		for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
196 			memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
197 			data += ETH_GSTRING_LEN;
198 		}
199 		for (i = 0; i < enic->rq_count; i++) {
200 			for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
201 				snprintf(data, ETH_GSTRING_LEN,
202 					 enic_per_rq_stats[j].name, i);
203 				data += ETH_GSTRING_LEN;
204 			}
205 		}
206 		for (i = 0; i < enic->wq_count; i++) {
207 			for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
208 				snprintf(data, ETH_GSTRING_LEN,
209 					 enic_per_wq_stats[j].name, i);
210 				data += ETH_GSTRING_LEN;
211 			}
212 		}
213 		break;
214 	}
215 }
216 
enic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)217 static void enic_get_ringparam(struct net_device *netdev,
218 			       struct ethtool_ringparam *ring,
219 			       struct kernel_ethtool_ringparam *kernel_ring,
220 			       struct netlink_ext_ack *extack)
221 {
222 	struct enic *enic = netdev_priv(netdev);
223 	struct vnic_enet_config *c = &enic->config;
224 
225 	ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
226 	ring->rx_pending = c->rq_desc_count;
227 	ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
228 	ring->tx_pending = c->wq_desc_count;
229 }
230 
enic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)231 static int enic_set_ringparam(struct net_device *netdev,
232 			      struct ethtool_ringparam *ring,
233 			      struct kernel_ethtool_ringparam *kernel_ring,
234 			      struct netlink_ext_ack *extack)
235 {
236 	struct enic *enic = netdev_priv(netdev);
237 	struct vnic_enet_config *c = &enic->config;
238 	int running = netif_running(netdev);
239 	unsigned int rx_pending;
240 	unsigned int tx_pending;
241 	int err = 0;
242 
243 	if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
244 		netdev_info(netdev,
245 			    "modifying mini ring params is not supported");
246 		return -EINVAL;
247 	}
248 	if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
249 		netdev_info(netdev,
250 			    "modifying jumbo ring params is not supported");
251 		return -EINVAL;
252 	}
253 	rx_pending = c->rq_desc_count;
254 	tx_pending = c->wq_desc_count;
255 	if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
256 	    ring->rx_pending < ENIC_MIN_RQ_DESCS) {
257 		netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
258 			    ring->rx_pending, ENIC_MIN_RQ_DESCS,
259 			    ENIC_MAX_RQ_DESCS);
260 		return -EINVAL;
261 	}
262 	if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
263 	    ring->tx_pending < ENIC_MIN_WQ_DESCS) {
264 		netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
265 			    ring->tx_pending, ENIC_MIN_WQ_DESCS,
266 			    ENIC_MAX_WQ_DESCS);
267 		return -EINVAL;
268 	}
269 	if (running)
270 		dev_close(netdev);
271 	c->rq_desc_count =
272 		ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
273 	c->wq_desc_count =
274 		ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
275 	enic_free_vnic_resources(enic);
276 	err = enic_alloc_vnic_resources(enic);
277 	if (err) {
278 		netdev_err(netdev,
279 			   "Failed to alloc vNIC resources, aborting\n");
280 		enic_free_vnic_resources(enic);
281 		goto err_out;
282 	}
283 	enic_init_vnic_resources(enic);
284 	if (running) {
285 		err = dev_open(netdev, NULL);
286 		if (err)
287 			goto err_out;
288 	}
289 	return 0;
290 err_out:
291 	c->rq_desc_count = rx_pending;
292 	c->wq_desc_count = tx_pending;
293 	return err;
294 }
295 
enic_get_sset_count(struct net_device * netdev,int sset)296 static int enic_get_sset_count(struct net_device *netdev, int sset)
297 {
298 	struct enic *enic = netdev_priv(netdev);
299 	unsigned int n_per_rq_stats;
300 	unsigned int n_per_wq_stats;
301 	unsigned int n_stats;
302 
303 	switch (sset) {
304 	case ETH_SS_STATS:
305 		n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
306 		n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
307 		n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
308 			NUM_ENIC_GEN_STATS +
309 			n_per_rq_stats + n_per_wq_stats;
310 		return n_stats;
311 	default:
312 		return -EOPNOTSUPP;
313 	}
314 }
315 
enic_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)316 static void enic_get_ethtool_stats(struct net_device *netdev,
317 	struct ethtool_stats *stats, u64 *data)
318 {
319 	struct enic *enic = netdev_priv(netdev);
320 	struct vnic_stats *vstats;
321 	unsigned int i;
322 	unsigned int j;
323 	int err;
324 
325 	err = enic_dev_stats_dump(enic, &vstats);
326 	/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
327 	 * For other failures, like devcmd failure, we return previously
328 	 * recorded stats.
329 	 */
330 	if (err == -ENOMEM)
331 		return;
332 
333 	for (i = 0; i < NUM_ENIC_TX_STATS; i++)
334 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
335 	for (i = 0; i < NUM_ENIC_RX_STATS; i++)
336 		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
337 	for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
338 		*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
339 	for (i = 0; i < enic->rq_count; i++) {
340 		struct enic_rq_stats *rqstats = &enic->rq_stats[i];
341 		int index;
342 
343 		for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
344 			index = enic_per_rq_stats[j].index;
345 			*(data++) = ((u64 *)rqstats)[index];
346 		}
347 	}
348 	for (i = 0; i < enic->wq_count; i++) {
349 		struct enic_wq_stats *wqstats = &enic->wq_stats[i];
350 		int index;
351 
352 		for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
353 			index = enic_per_wq_stats[j].index;
354 			*(data++) = ((u64 *)wqstats)[index];
355 		}
356 	}
357 }
358 
enic_get_msglevel(struct net_device * netdev)359 static u32 enic_get_msglevel(struct net_device *netdev)
360 {
361 	struct enic *enic = netdev_priv(netdev);
362 	return enic->msg_enable;
363 }
364 
enic_set_msglevel(struct net_device * netdev,u32 value)365 static void enic_set_msglevel(struct net_device *netdev, u32 value)
366 {
367 	struct enic *enic = netdev_priv(netdev);
368 	enic->msg_enable = value;
369 }
370 
enic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ecmd,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)371 static int enic_get_coalesce(struct net_device *netdev,
372 			     struct ethtool_coalesce *ecmd,
373 			     struct kernel_ethtool_coalesce *kernel_coal,
374 			     struct netlink_ext_ack *extack)
375 {
376 	struct enic *enic = netdev_priv(netdev);
377 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
378 
379 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
380 		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
381 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
382 	if (rxcoal->use_adaptive_rx_coalesce)
383 		ecmd->use_adaptive_rx_coalesce = 1;
384 	ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
385 	ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
386 
387 	return 0;
388 }
389 
enic_coalesce_valid(struct enic * enic,struct ethtool_coalesce * ec)390 static int enic_coalesce_valid(struct enic *enic,
391 			       struct ethtool_coalesce *ec)
392 {
393 	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
394 	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
395 					   ec->rx_coalesce_usecs_high);
396 	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
397 					  ec->rx_coalesce_usecs_low);
398 
399 	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
400 	    ec->tx_coalesce_usecs)
401 		return -EINVAL;
402 
403 	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
404 	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
405 	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
406 	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
407 		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
408 			    coalesce_usecs_max);
409 
410 	if (ec->rx_coalesce_usecs_high &&
411 	    (rx_coalesce_usecs_high <
412 	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
413 		return -EINVAL;
414 
415 	return 0;
416 }
417 
enic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ecmd,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)418 static int enic_set_coalesce(struct net_device *netdev,
419 			     struct ethtool_coalesce *ecmd,
420 			     struct kernel_ethtool_coalesce *kernel_coal,
421 			     struct netlink_ext_ack *extack)
422 {
423 	struct enic *enic = netdev_priv(netdev);
424 	u32 tx_coalesce_usecs;
425 	u32 rx_coalesce_usecs;
426 	u32 rx_coalesce_usecs_low;
427 	u32 rx_coalesce_usecs_high;
428 	u32 coalesce_usecs_max;
429 	unsigned int i, intr;
430 	int ret;
431 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
432 
433 	ret = enic_coalesce_valid(enic, ecmd);
434 	if (ret)
435 		return ret;
436 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
437 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
438 				  coalesce_usecs_max);
439 	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
440 				  coalesce_usecs_max);
441 
442 	rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
443 				      coalesce_usecs_max);
444 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
445 				       coalesce_usecs_max);
446 
447 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
448 		for (i = 0; i < enic->wq_count; i++) {
449 			intr = enic_msix_wq_intr(enic, i);
450 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
451 						       tx_coalesce_usecs);
452 		}
453 		enic->tx_coalesce_usecs = tx_coalesce_usecs;
454 	}
455 	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
456 	if (!rxcoal->use_adaptive_rx_coalesce)
457 		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
458 	if (ecmd->rx_coalesce_usecs_high) {
459 		rxcoal->range_end = rx_coalesce_usecs_high;
460 		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
461 		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
462 						ENIC_AIC_LARGE_PKT_DIFF;
463 	}
464 
465 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
466 
467 	return 0;
468 }
469 
enic_grxclsrlall(struct enic * enic,struct ethtool_rxnfc * cmd,u32 * rule_locs)470 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
471 			    u32 *rule_locs)
472 {
473 	int j, ret = 0, cnt = 0;
474 
475 	cmd->data = enic->rfs_h.max - enic->rfs_h.free;
476 	for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
477 		struct hlist_head *hhead;
478 		struct hlist_node *tmp;
479 		struct enic_rfs_fltr_node *n;
480 
481 		hhead = &enic->rfs_h.ht_head[j];
482 		hlist_for_each_entry_safe(n, tmp, hhead, node) {
483 			if (cnt == cmd->rule_cnt)
484 				return -EMSGSIZE;
485 			rule_locs[cnt] = n->fltr_id;
486 			cnt++;
487 		}
488 	}
489 	cmd->rule_cnt = cnt;
490 
491 	return ret;
492 }
493 
enic_grxclsrule(struct enic * enic,struct ethtool_rxnfc * cmd)494 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
495 {
496 	struct ethtool_rx_flow_spec *fsp =
497 				(struct ethtool_rx_flow_spec *)&cmd->fs;
498 	struct enic_rfs_fltr_node *n;
499 
500 	n = htbl_fltr_search(enic, (u16)fsp->location);
501 	if (!n)
502 		return -EINVAL;
503 	switch (n->keys.basic.ip_proto) {
504 	case IPPROTO_TCP:
505 		fsp->flow_type = TCP_V4_FLOW;
506 		break;
507 	case IPPROTO_UDP:
508 		fsp->flow_type = UDP_V4_FLOW;
509 		break;
510 	default:
511 		return -EINVAL;
512 	}
513 
514 	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
515 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
516 
517 	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
518 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
519 
520 	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
521 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
522 
523 	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
524 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
525 
526 	fsp->ring_cookie = n->rq_id;
527 
528 	return 0;
529 }
530 
enic_get_rx_flow_hash(struct enic * enic,struct ethtool_rxnfc * cmd)531 static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
532 {
533 	u8 rss_hash_type = 0;
534 	cmd->data = 0;
535 
536 	spin_lock_bh(&enic->devcmd_lock);
537 	(void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
538 	spin_unlock_bh(&enic->devcmd_lock);
539 	switch (cmd->flow_type) {
540 	case TCP_V6_FLOW:
541 	case TCP_V4_FLOW:
542 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
543 			     RXH_IP_SRC | RXH_IP_DST;
544 		break;
545 	case UDP_V6_FLOW:
546 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
547 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
548 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
549 		break;
550 	case UDP_V4_FLOW:
551 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
552 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
553 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
554 		break;
555 	case SCTP_V4_FLOW:
556 	case AH_ESP_V4_FLOW:
557 	case AH_V4_FLOW:
558 	case ESP_V4_FLOW:
559 	case SCTP_V6_FLOW:
560 	case AH_ESP_V6_FLOW:
561 	case AH_V6_FLOW:
562 	case ESP_V6_FLOW:
563 	case IPV4_FLOW:
564 	case IPV6_FLOW:
565 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
566 		break;
567 	default:
568 		return -EINVAL;
569 	}
570 
571 	return 0;
572 }
573 
enic_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)574 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
575 			  u32 *rule_locs)
576 {
577 	struct enic *enic = netdev_priv(dev);
578 	int ret = 0;
579 
580 	switch (cmd->cmd) {
581 	case ETHTOOL_GRXRINGS:
582 		cmd->data = enic->rq_count;
583 		break;
584 	case ETHTOOL_GRXCLSRLCNT:
585 		spin_lock_bh(&enic->rfs_h.lock);
586 		cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
587 		cmd->data = enic->rfs_h.max;
588 		spin_unlock_bh(&enic->rfs_h.lock);
589 		break;
590 	case ETHTOOL_GRXCLSRLALL:
591 		spin_lock_bh(&enic->rfs_h.lock);
592 		ret = enic_grxclsrlall(enic, cmd, rule_locs);
593 		spin_unlock_bh(&enic->rfs_h.lock);
594 		break;
595 	case ETHTOOL_GRXCLSRULE:
596 		spin_lock_bh(&enic->rfs_h.lock);
597 		ret = enic_grxclsrule(enic, cmd);
598 		spin_unlock_bh(&enic->rfs_h.lock);
599 		break;
600 	case ETHTOOL_GRXFH:
601 		ret = enic_get_rx_flow_hash(enic, cmd);
602 		break;
603 	default:
604 		ret = -EOPNOTSUPP;
605 		break;
606 	}
607 
608 	return ret;
609 }
610 
enic_get_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,void * data)611 static int enic_get_tunable(struct net_device *dev,
612 			    const struct ethtool_tunable *tuna, void *data)
613 {
614 	struct enic *enic = netdev_priv(dev);
615 	int ret = 0;
616 
617 	switch (tuna->id) {
618 	case ETHTOOL_RX_COPYBREAK:
619 		*(u32 *)data = enic->rx_copybreak;
620 		break;
621 	default:
622 		ret = -EINVAL;
623 		break;
624 	}
625 
626 	return ret;
627 }
628 
enic_set_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,const void * data)629 static int enic_set_tunable(struct net_device *dev,
630 			    const struct ethtool_tunable *tuna,
631 			    const void *data)
632 {
633 	struct enic *enic = netdev_priv(dev);
634 	int ret = 0;
635 
636 	switch (tuna->id) {
637 	case ETHTOOL_RX_COPYBREAK:
638 		enic->rx_copybreak = *(u32 *)data;
639 		break;
640 	default:
641 		ret = -EINVAL;
642 		break;
643 	}
644 
645 	return ret;
646 }
647 
enic_get_rxfh_key_size(struct net_device * netdev)648 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
649 {
650 	return ENIC_RSS_LEN;
651 }
652 
enic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)653 static int enic_get_rxfh(struct net_device *netdev,
654 			 struct ethtool_rxfh_param *rxfh)
655 {
656 	struct enic *enic = netdev_priv(netdev);
657 
658 	if (rxfh->key)
659 		memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
660 
661 	rxfh->hfunc = ETH_RSS_HASH_TOP;
662 
663 	return 0;
664 }
665 
enic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)666 static int enic_set_rxfh(struct net_device *netdev,
667 			 struct ethtool_rxfh_param *rxfh,
668 			 struct netlink_ext_ack *extack)
669 {
670 	struct enic *enic = netdev_priv(netdev);
671 
672 	if (rxfh->indir ||
673 	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
674 	     rxfh->hfunc != ETH_RSS_HASH_TOP))
675 		return -EINVAL;
676 
677 	if (rxfh->key)
678 		memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
679 
680 	return __enic_set_rsskey(enic);
681 }
682 
enic_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)683 static int enic_get_ts_info(struct net_device *netdev,
684 			    struct kernel_ethtool_ts_info *info)
685 {
686 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
687 
688 	return 0;
689 }
690 
enic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)691 static void enic_get_channels(struct net_device *netdev,
692 			      struct ethtool_channels *channels)
693 {
694 	struct enic *enic = netdev_priv(netdev);
695 
696 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
697 	case VNIC_DEV_INTR_MODE_MSIX:
698 		channels->max_rx = ENIC_RQ_MAX;
699 		channels->max_tx = ENIC_WQ_MAX;
700 		channels->rx_count = enic->rq_count;
701 		channels->tx_count = enic->wq_count;
702 		break;
703 	case VNIC_DEV_INTR_MODE_MSI:
704 	case VNIC_DEV_INTR_MODE_INTX:
705 		channels->max_combined = 1;
706 		channels->combined_count = 1;
707 		break;
708 	default:
709 		break;
710 	}
711 }
712 
713 static const struct ethtool_ops enic_ethtool_ops = {
714 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
715 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
716 				     ETHTOOL_COALESCE_RX_USECS_LOW |
717 				     ETHTOOL_COALESCE_RX_USECS_HIGH,
718 	.get_drvinfo = enic_get_drvinfo,
719 	.get_msglevel = enic_get_msglevel,
720 	.set_msglevel = enic_set_msglevel,
721 	.get_link = ethtool_op_get_link,
722 	.get_strings = enic_get_strings,
723 	.get_ringparam = enic_get_ringparam,
724 	.set_ringparam = enic_set_ringparam,
725 	.get_sset_count = enic_get_sset_count,
726 	.get_ethtool_stats = enic_get_ethtool_stats,
727 	.get_coalesce = enic_get_coalesce,
728 	.set_coalesce = enic_set_coalesce,
729 	.get_rxnfc = enic_get_rxnfc,
730 	.get_tunable = enic_get_tunable,
731 	.set_tunable = enic_set_tunable,
732 	.get_rxfh_key_size = enic_get_rxfh_key_size,
733 	.get_rxfh = enic_get_rxfh,
734 	.set_rxfh = enic_set_rxfh,
735 	.get_link_ksettings = enic_get_ksettings,
736 	.get_ts_info = enic_get_ts_info,
737 	.get_channels = enic_get_channels,
738 };
739 
enic_set_ethtool_ops(struct net_device * netdev)740 void enic_set_ethtool_ops(struct net_device *netdev)
741 {
742 	netdev->ethtool_ops = &enic_ethtool_ops;
743 }
744