1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2019 Intel Corporation. */
3
4 #include <linux/ethtool.h>
5 #include <linux/vmalloc.h>
6
7 #include "fm10k.h"
8
9 struct fm10k_stats {
10 /* The stat_string is expected to be a format string formatted using
11 * vsnprintf by fm10k_add_stat_strings. Every member of a stats array
12 * should use the same format specifiers as they will be formatted
13 * using the same variadic arguments.
14 */
15 char stat_string[ETH_GSTRING_LEN];
16 int sizeof_stat;
17 int stat_offset;
18 };
19
20 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \
21 .stat_string = _name, \
22 .sizeof_stat = sizeof_field(_type, _stat), \
23 .stat_offset = offsetof(_type, _stat) \
24 }
25
26 /* netdevice statistics */
27 #define FM10K_NETDEV_STAT(_net_stat) \
28 FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \
29 _net_stat)
30
31 static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
32 FM10K_NETDEV_STAT(tx_packets),
33 FM10K_NETDEV_STAT(tx_bytes),
34 FM10K_NETDEV_STAT(tx_errors),
35 FM10K_NETDEV_STAT(rx_packets),
36 FM10K_NETDEV_STAT(rx_bytes),
37 FM10K_NETDEV_STAT(rx_errors),
38 FM10K_NETDEV_STAT(rx_dropped),
39
40 /* detailed Rx errors */
41 FM10K_NETDEV_STAT(rx_length_errors),
42 FM10K_NETDEV_STAT(rx_crc_errors),
43 FM10K_NETDEV_STAT(rx_fifo_errors),
44 };
45
46 #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats)
47
48 /* General interface statistics */
49 #define FM10K_STAT(_name, _stat) \
50 FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat)
51
52 static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
53 FM10K_STAT("tx_restart_queue", restart_queue),
54 FM10K_STAT("tx_busy", tx_busy),
55 FM10K_STAT("tx_csum_errors", tx_csum_errors),
56 FM10K_STAT("rx_alloc_failed", alloc_failed),
57 FM10K_STAT("rx_csum_errors", rx_csum_errors),
58
59 FM10K_STAT("tx_packets_nic", tx_packets_nic),
60 FM10K_STAT("tx_bytes_nic", tx_bytes_nic),
61 FM10K_STAT("rx_packets_nic", rx_packets_nic),
62 FM10K_STAT("rx_bytes_nic", rx_bytes_nic),
63 FM10K_STAT("rx_drops_nic", rx_drops_nic),
64 FM10K_STAT("rx_overrun_pf", rx_overrun_pf),
65 FM10K_STAT("rx_overrun_vf", rx_overrun_vf),
66
67 FM10K_STAT("swapi_status", hw.swapi.status),
68 FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
69 FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
70
71 FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
72
73 FM10K_STAT("tx_hang_count", tx_timeout_count),
74 };
75
76 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
77 FM10K_STAT("timeout", stats.timeout.count),
78 FM10K_STAT("ur", stats.ur.count),
79 FM10K_STAT("ca", stats.ca.count),
80 FM10K_STAT("um", stats.um.count),
81 FM10K_STAT("xec", stats.xec.count),
82 FM10K_STAT("vlan_drop", stats.vlan_drop.count),
83 FM10K_STAT("loopback_drop", stats.loopback_drop.count),
84 FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
85 };
86
87 /* mailbox statistics */
88 #define FM10K_MBX_STAT(_name, _stat) \
89 FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat)
90
91 static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
92 FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
93 FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped),
94 FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
95 FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
96 FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled),
97 FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
98 FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
99 FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
100 FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
101 };
102
103 /* per-queue ring statistics */
104 #define FM10K_QUEUE_STAT(_name, _stat) \
105 FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat)
106
107 static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
108 FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets),
109 FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes),
110 };
111
112 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
113 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
114 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
115 #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
116
117 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
118 FM10K_NETDEV_STATS_LEN + \
119 FM10K_MBX_STATS_LEN)
120
121 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
122 "Mailbox test (on/offline)"
123 };
124
125 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN)
126
127 enum fm10k_self_test_types {
128 FM10K_TEST_MBX,
129 FM10K_TEST_MAX = FM10K_TEST_LEN
130 };
131
132 enum {
133 FM10K_PRV_FLAG_LEN,
134 };
135
136 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
137 };
138
__fm10k_add_stat_strings(u8 ** p,const struct fm10k_stats stats[],const unsigned int size,...)139 static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[],
140 const unsigned int size, ...)
141 {
142 unsigned int i;
143
144 for (i = 0; i < size; i++) {
145 va_list args;
146
147 va_start(args, size);
148 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
149 *p += ETH_GSTRING_LEN;
150 va_end(args);
151 }
152 }
153
154 #define fm10k_add_stat_strings(p, stats, ...) \
155 __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
156
fm10k_get_stat_strings(struct net_device * dev,u8 * data)157 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
158 {
159 struct fm10k_intfc *interface = netdev_priv(dev);
160 unsigned int i;
161
162 fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats);
163
164 fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats);
165
166 fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats);
167
168 if (interface->hw.mac.type != fm10k_mac_vf)
169 fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats);
170
171 for (i = 0; i < interface->hw.mac.max_queues; i++) {
172 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
173 "tx", i);
174
175 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
176 "rx", i);
177 }
178 }
179
fm10k_get_strings(struct net_device * dev,u32 stringset,u8 * data)180 static void fm10k_get_strings(struct net_device *dev,
181 u32 stringset, u8 *data)
182 {
183 switch (stringset) {
184 case ETH_SS_TEST:
185 memcpy(data, fm10k_gstrings_test,
186 FM10K_TEST_LEN * ETH_GSTRING_LEN);
187 break;
188 case ETH_SS_STATS:
189 fm10k_get_stat_strings(dev, data);
190 break;
191 case ETH_SS_PRIV_FLAGS:
192 memcpy(data, fm10k_prv_flags,
193 FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
194 break;
195 }
196 }
197
fm10k_get_sset_count(struct net_device * dev,int sset)198 static int fm10k_get_sset_count(struct net_device *dev, int sset)
199 {
200 struct fm10k_intfc *interface = netdev_priv(dev);
201 struct fm10k_hw *hw = &interface->hw;
202 int stats_len = FM10K_STATIC_STATS_LEN;
203
204 switch (sset) {
205 case ETH_SS_TEST:
206 return FM10K_TEST_LEN;
207 case ETH_SS_STATS:
208 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
209
210 if (hw->mac.type != fm10k_mac_vf)
211 stats_len += FM10K_PF_STATS_LEN;
212
213 return stats_len;
214 case ETH_SS_PRIV_FLAGS:
215 return FM10K_PRV_FLAG_LEN;
216 default:
217 return -EOPNOTSUPP;
218 }
219 }
220
__fm10k_add_ethtool_stats(u64 ** data,void * pointer,const struct fm10k_stats stats[],const unsigned int size)221 static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
222 const struct fm10k_stats stats[],
223 const unsigned int size)
224 {
225 unsigned int i;
226
227 if (!pointer) {
228 /* memory is not zero allocated so we have to clear it */
229 for (i = 0; i < size; i++)
230 *((*data)++) = 0;
231 return;
232 }
233
234 for (i = 0; i < size; i++) {
235 char *p = (char *)pointer + stats[i].stat_offset;
236
237 switch (stats[i].sizeof_stat) {
238 case sizeof(u64):
239 *((*data)++) = *(u64 *)p;
240 break;
241 case sizeof(u32):
242 *((*data)++) = *(u32 *)p;
243 break;
244 case sizeof(u16):
245 *((*data)++) = *(u16 *)p;
246 break;
247 case sizeof(u8):
248 *((*data)++) = *(u8 *)p;
249 break;
250 default:
251 WARN_ONCE(1, "unexpected stat size for %s",
252 stats[i].stat_string);
253 *((*data)++) = 0;
254 }
255 }
256 }
257
258 #define fm10k_add_ethtool_stats(data, pointer, stats) \
259 __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
260
fm10k_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)261 static void fm10k_get_ethtool_stats(struct net_device *netdev,
262 struct ethtool_stats __always_unused *stats,
263 u64 *data)
264 {
265 struct fm10k_intfc *interface = netdev_priv(netdev);
266 struct net_device_stats *net_stats = &netdev->stats;
267 int i;
268
269 fm10k_update_stats(interface);
270
271 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats);
272
273 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats);
274
275 fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
276 fm10k_gstrings_mbx_stats);
277
278 if (interface->hw.mac.type != fm10k_mac_vf) {
279 fm10k_add_ethtool_stats(&data, interface,
280 fm10k_gstrings_pf_stats);
281 }
282
283 for (i = 0; i < interface->hw.mac.max_queues; i++) {
284 struct fm10k_ring *ring;
285
286 ring = interface->tx_ring[i];
287 fm10k_add_ethtool_stats(&data, ring,
288 fm10k_gstrings_queue_stats);
289
290 ring = interface->rx_ring[i];
291 fm10k_add_ethtool_stats(&data, ring,
292 fm10k_gstrings_queue_stats);
293 }
294 }
295
296 /* If function below adds more registers this define needs to be updated */
297 #define FM10K_REGS_LEN_Q 29
298
fm10k_get_reg_q(struct fm10k_hw * hw,u32 * buff,int i)299 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i)
300 {
301 int idx = 0;
302
303 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i));
304 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i));
305 buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i));
306 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i));
307 buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i));
308 buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i));
309 buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i));
310 buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i));
311 buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i));
312 buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i));
313 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i));
314 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i));
315 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i));
316 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i));
317 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i));
318 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i));
319 buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i));
320 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i));
321 buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i));
322 buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i));
323 buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i));
324 buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i));
325 buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i));
326 buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i));
327 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i));
328 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i));
329 buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i));
330 buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i));
331 buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i));
332
333 BUG_ON(idx != FM10K_REGS_LEN_Q);
334 }
335
336 /* If function above adds more registers this define needs to be updated */
337 #define FM10K_REGS_LEN_VSI 43
338
fm10k_get_reg_vsi(struct fm10k_hw * hw,u32 * buff,int i)339 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i)
340 {
341 int idx = 0, j;
342
343 buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i));
344 for (j = 0; j < 10; j++)
345 buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j));
346 for (j = 0; j < 32; j++)
347 buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j));
348
349 BUG_ON(idx != FM10K_REGS_LEN_VSI);
350 }
351
fm10k_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)352 static void fm10k_get_regs(struct net_device *netdev,
353 struct ethtool_regs *regs, void *p)
354 {
355 struct fm10k_intfc *interface = netdev_priv(netdev);
356 struct fm10k_hw *hw = &interface->hw;
357 u32 *buff = p;
358 u16 i;
359
360 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
361
362 switch (hw->mac.type) {
363 case fm10k_mac_pf:
364 /* General PF Registers */
365 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL);
366 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT);
367 *(buff++) = fm10k_read_reg(hw, FM10K_GCR);
368 *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT);
369
370 for (i = 0; i < 8; i++) {
371 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i));
372 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i));
373 }
374
375 for (i = 0; i < 65; i++) {
376 fm10k_get_reg_vsi(hw, buff, i);
377 buff += FM10K_REGS_LEN_VSI;
378 }
379
380 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL);
381 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
382
383 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
384 fm10k_get_reg_q(hw, buff, i);
385 buff += FM10K_REGS_LEN_Q;
386 }
387
388 *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL);
389
390 for (i = 0; i < 8; i++)
391 *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i));
392
393 /* Interrupt Throttling Registers */
394 for (i = 0; i < 130; i++)
395 *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i));
396
397 break;
398 case fm10k_mac_vf:
399 /* General VF registers */
400 *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL);
401 *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP);
402 *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME);
403
404 /* Interrupt Throttling Registers */
405 for (i = 0; i < 8; i++)
406 *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i));
407
408 fm10k_get_reg_vsi(hw, buff, 0);
409 buff += FM10K_REGS_LEN_VSI;
410
411 for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) {
412 if (i < hw->mac.max_queues)
413 fm10k_get_reg_q(hw, buff, i);
414 else
415 memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q);
416 buff += FM10K_REGS_LEN_Q;
417 }
418
419 break;
420 default:
421 return;
422 }
423 }
424
425 /* If function above adds more registers these define need to be updated */
426 #define FM10K_REGS_LEN_PF \
427 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q))
428 #define FM10K_REGS_LEN_VF \
429 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q))
430
fm10k_get_regs_len(struct net_device * netdev)431 static int fm10k_get_regs_len(struct net_device *netdev)
432 {
433 struct fm10k_intfc *interface = netdev_priv(netdev);
434 struct fm10k_hw *hw = &interface->hw;
435
436 switch (hw->mac.type) {
437 case fm10k_mac_pf:
438 return FM10K_REGS_LEN_PF * sizeof(u32);
439 case fm10k_mac_vf:
440 return FM10K_REGS_LEN_VF * sizeof(u32);
441 default:
442 return 0;
443 }
444 }
445
fm10k_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)446 static void fm10k_get_drvinfo(struct net_device *dev,
447 struct ethtool_drvinfo *info)
448 {
449 struct fm10k_intfc *interface = netdev_priv(dev);
450
451 strscpy(info->driver, fm10k_driver_name,
452 sizeof(info->driver));
453 strscpy(info->bus_info, pci_name(interface->pdev),
454 sizeof(info->bus_info));
455 }
456
fm10k_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)457 static void fm10k_get_pauseparam(struct net_device *dev,
458 struct ethtool_pauseparam *pause)
459 {
460 struct fm10k_intfc *interface = netdev_priv(dev);
461
462 /* record fixed values for autoneg and tx pause */
463 pause->autoneg = 0;
464 pause->tx_pause = 1;
465
466 pause->rx_pause = interface->rx_pause ? 1 : 0;
467 }
468
fm10k_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)469 static int fm10k_set_pauseparam(struct net_device *dev,
470 struct ethtool_pauseparam *pause)
471 {
472 struct fm10k_intfc *interface = netdev_priv(dev);
473 struct fm10k_hw *hw = &interface->hw;
474
475 if (pause->autoneg || !pause->tx_pause)
476 return -EINVAL;
477
478 /* we can only support pause on the PF to avoid head-of-line blocking */
479 if (hw->mac.type == fm10k_mac_pf)
480 interface->rx_pause = pause->rx_pause ? ~0 : 0;
481 else if (pause->rx_pause)
482 return -EINVAL;
483
484 if (netif_running(dev))
485 fm10k_update_rx_drop_en(interface);
486
487 return 0;
488 }
489
fm10k_get_msglevel(struct net_device * netdev)490 static u32 fm10k_get_msglevel(struct net_device *netdev)
491 {
492 struct fm10k_intfc *interface = netdev_priv(netdev);
493
494 return interface->msg_enable;
495 }
496
fm10k_set_msglevel(struct net_device * netdev,u32 data)497 static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
498 {
499 struct fm10k_intfc *interface = netdev_priv(netdev);
500
501 interface->msg_enable = data;
502 }
503
fm10k_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)504 static void fm10k_get_ringparam(struct net_device *netdev,
505 struct ethtool_ringparam *ring,
506 struct kernel_ethtool_ringparam *kernel_ring,
507 struct netlink_ext_ack *extack)
508 {
509 struct fm10k_intfc *interface = netdev_priv(netdev);
510
511 ring->rx_max_pending = FM10K_MAX_RXD;
512 ring->tx_max_pending = FM10K_MAX_TXD;
513 ring->rx_mini_max_pending = 0;
514 ring->rx_jumbo_max_pending = 0;
515 ring->rx_pending = interface->rx_ring_count;
516 ring->tx_pending = interface->tx_ring_count;
517 ring->rx_mini_pending = 0;
518 ring->rx_jumbo_pending = 0;
519 }
520
fm10k_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)521 static int fm10k_set_ringparam(struct net_device *netdev,
522 struct ethtool_ringparam *ring,
523 struct kernel_ethtool_ringparam *kernel_ring,
524 struct netlink_ext_ack *extack)
525 {
526 struct fm10k_intfc *interface = netdev_priv(netdev);
527 struct fm10k_ring *temp_ring;
528 int i, err = 0;
529 u32 new_rx_count, new_tx_count;
530
531 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
532 return -EINVAL;
533
534 new_tx_count = clamp_t(u32, ring->tx_pending,
535 FM10K_MIN_TXD, FM10K_MAX_TXD);
536 new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE);
537
538 new_rx_count = clamp_t(u32, ring->rx_pending,
539 FM10K_MIN_RXD, FM10K_MAX_RXD);
540 new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE);
541
542 if ((new_tx_count == interface->tx_ring_count) &&
543 (new_rx_count == interface->rx_ring_count)) {
544 /* nothing to do */
545 return 0;
546 }
547
548 while (test_and_set_bit(__FM10K_RESETTING, interface->state))
549 usleep_range(1000, 2000);
550
551 if (!netif_running(interface->netdev)) {
552 for (i = 0; i < interface->num_tx_queues; i++)
553 interface->tx_ring[i]->count = new_tx_count;
554 for (i = 0; i < interface->num_rx_queues; i++)
555 interface->rx_ring[i]->count = new_rx_count;
556 interface->tx_ring_count = new_tx_count;
557 interface->rx_ring_count = new_rx_count;
558 goto clear_reset;
559 }
560
561 /* allocate temporary buffer to store rings in */
562 i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
563 temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
564
565 if (!temp_ring) {
566 err = -ENOMEM;
567 goto clear_reset;
568 }
569
570 fm10k_down(interface);
571
572 /* Setup new Tx resources and free the old Tx resources in that order.
573 * We can then assign the new resources to the rings via a memcpy.
574 * The advantage to this approach is that we are guaranteed to still
575 * have resources even in the case of an allocation failure.
576 */
577 if (new_tx_count != interface->tx_ring_count) {
578 for (i = 0; i < interface->num_tx_queues; i++) {
579 memcpy(&temp_ring[i], interface->tx_ring[i],
580 sizeof(struct fm10k_ring));
581
582 temp_ring[i].count = new_tx_count;
583 err = fm10k_setup_tx_resources(&temp_ring[i]);
584 if (err) {
585 while (i) {
586 i--;
587 fm10k_free_tx_resources(&temp_ring[i]);
588 }
589 goto err_setup;
590 }
591 }
592
593 for (i = 0; i < interface->num_tx_queues; i++) {
594 fm10k_free_tx_resources(interface->tx_ring[i]);
595
596 memcpy(interface->tx_ring[i], &temp_ring[i],
597 sizeof(struct fm10k_ring));
598 }
599
600 interface->tx_ring_count = new_tx_count;
601 }
602
603 /* Repeat the process for the Rx rings if needed */
604 if (new_rx_count != interface->rx_ring_count) {
605 for (i = 0; i < interface->num_rx_queues; i++) {
606 memcpy(&temp_ring[i], interface->rx_ring[i],
607 sizeof(struct fm10k_ring));
608
609 temp_ring[i].count = new_rx_count;
610 err = fm10k_setup_rx_resources(&temp_ring[i]);
611 if (err) {
612 while (i) {
613 i--;
614 fm10k_free_rx_resources(&temp_ring[i]);
615 }
616 goto err_setup;
617 }
618 }
619
620 for (i = 0; i < interface->num_rx_queues; i++) {
621 fm10k_free_rx_resources(interface->rx_ring[i]);
622
623 memcpy(interface->rx_ring[i], &temp_ring[i],
624 sizeof(struct fm10k_ring));
625 }
626
627 interface->rx_ring_count = new_rx_count;
628 }
629
630 err_setup:
631 fm10k_up(interface);
632 vfree(temp_ring);
633 clear_reset:
634 clear_bit(__FM10K_RESETTING, interface->state);
635 return err;
636 }
637
fm10k_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)638 static int fm10k_get_coalesce(struct net_device *dev,
639 struct ethtool_coalesce *ec,
640 struct kernel_ethtool_coalesce *kernel_coal,
641 struct netlink_ext_ack *extack)
642 {
643 struct fm10k_intfc *interface = netdev_priv(dev);
644
645 ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr);
646 ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE;
647
648 ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr);
649 ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE;
650
651 return 0;
652 }
653
fm10k_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)654 static int fm10k_set_coalesce(struct net_device *dev,
655 struct ethtool_coalesce *ec,
656 struct kernel_ethtool_coalesce *kernel_coal,
657 struct netlink_ext_ack *extack)
658 {
659 struct fm10k_intfc *interface = netdev_priv(dev);
660 u16 tx_itr, rx_itr;
661 int i;
662
663 /* verify limits */
664 if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) ||
665 (ec->tx_coalesce_usecs > FM10K_ITR_MAX))
666 return -EINVAL;
667
668 /* record settings */
669 tx_itr = ec->tx_coalesce_usecs;
670 rx_itr = ec->rx_coalesce_usecs;
671
672 /* set initial values for adaptive ITR */
673 if (ec->use_adaptive_tx_coalesce)
674 tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT;
675
676 if (ec->use_adaptive_rx_coalesce)
677 rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
678
679 /* update interface */
680 interface->tx_itr = tx_itr;
681 interface->rx_itr = rx_itr;
682
683 /* update q_vectors */
684 for (i = 0; i < interface->num_q_vectors; i++) {
685 struct fm10k_q_vector *qv = interface->q_vector[i];
686
687 qv->tx.itr = tx_itr;
688 qv->rx.itr = rx_itr;
689 }
690
691 return 0;
692 }
693
fm10k_get_rss_hash_opts(struct fm10k_intfc * interface,struct ethtool_rxnfc * cmd)694 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
695 struct ethtool_rxnfc *cmd)
696 {
697 cmd->data = 0;
698
699 /* Report default options for RSS on fm10k */
700 switch (cmd->flow_type) {
701 case TCP_V4_FLOW:
702 case TCP_V6_FLOW:
703 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
704 fallthrough;
705 case UDP_V4_FLOW:
706 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
707 interface->flags))
708 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
709 fallthrough;
710 case SCTP_V4_FLOW:
711 case SCTP_V6_FLOW:
712 case AH_ESP_V4_FLOW:
713 case AH_ESP_V6_FLOW:
714 case AH_V4_FLOW:
715 case AH_V6_FLOW:
716 case ESP_V4_FLOW:
717 case ESP_V6_FLOW:
718 case IPV4_FLOW:
719 case IPV6_FLOW:
720 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
721 break;
722 case UDP_V6_FLOW:
723 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
724 interface->flags))
725 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
726 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
727 break;
728 default:
729 return -EINVAL;
730 }
731
732 return 0;
733 }
734
fm10k_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)735 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
736 u32 __always_unused *rule_locs)
737 {
738 struct fm10k_intfc *interface = netdev_priv(dev);
739 int ret = -EOPNOTSUPP;
740
741 switch (cmd->cmd) {
742 case ETHTOOL_GRXRINGS:
743 cmd->data = interface->num_rx_queues;
744 ret = 0;
745 break;
746 case ETHTOOL_GRXFH:
747 ret = fm10k_get_rss_hash_opts(interface, cmd);
748 break;
749 default:
750 break;
751 }
752
753 return ret;
754 }
755
fm10k_set_rss_hash_opt(struct fm10k_intfc * interface,struct ethtool_rxnfc * nfc)756 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
757 struct ethtool_rxnfc *nfc)
758 {
759 int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
760 interface->flags);
761 int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
762 interface->flags);
763
764 /* RSS does not support anything other than hashing
765 * to queues on src and dst IPs and ports
766 */
767 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
768 RXH_L4_B_0_1 | RXH_L4_B_2_3))
769 return -EINVAL;
770
771 switch (nfc->flow_type) {
772 case TCP_V4_FLOW:
773 case TCP_V6_FLOW:
774 if (!(nfc->data & RXH_IP_SRC) ||
775 !(nfc->data & RXH_IP_DST) ||
776 !(nfc->data & RXH_L4_B_0_1) ||
777 !(nfc->data & RXH_L4_B_2_3))
778 return -EINVAL;
779 break;
780 case UDP_V4_FLOW:
781 if (!(nfc->data & RXH_IP_SRC) ||
782 !(nfc->data & RXH_IP_DST))
783 return -EINVAL;
784 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
785 case 0:
786 clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
787 interface->flags);
788 break;
789 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
790 set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
791 interface->flags);
792 break;
793 default:
794 return -EINVAL;
795 }
796 break;
797 case UDP_V6_FLOW:
798 if (!(nfc->data & RXH_IP_SRC) ||
799 !(nfc->data & RXH_IP_DST))
800 return -EINVAL;
801 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
802 case 0:
803 clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
804 interface->flags);
805 break;
806 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
807 set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
808 interface->flags);
809 break;
810 default:
811 return -EINVAL;
812 }
813 break;
814 case AH_ESP_V4_FLOW:
815 case AH_V4_FLOW:
816 case ESP_V4_FLOW:
817 case SCTP_V4_FLOW:
818 case AH_ESP_V6_FLOW:
819 case AH_V6_FLOW:
820 case ESP_V6_FLOW:
821 case SCTP_V6_FLOW:
822 if (!(nfc->data & RXH_IP_SRC) ||
823 !(nfc->data & RXH_IP_DST) ||
824 (nfc->data & RXH_L4_B_0_1) ||
825 (nfc->data & RXH_L4_B_2_3))
826 return -EINVAL;
827 break;
828 default:
829 return -EINVAL;
830 }
831
832 /* If something changed we need to update the MRQC register. Note that
833 * test_bit() is guaranteed to return strictly 0 or 1, so testing for
834 * equality is safe.
835 */
836 if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
837 interface->flags)) ||
838 (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
839 interface->flags))) {
840 struct fm10k_hw *hw = &interface->hw;
841 bool warn = false;
842 u32 mrqc;
843
844 /* Perform hash on these packet types */
845 mrqc = FM10K_MRQC_IPV4 |
846 FM10K_MRQC_TCP_IPV4 |
847 FM10K_MRQC_IPV6 |
848 FM10K_MRQC_TCP_IPV6;
849
850 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
851 interface->flags)) {
852 mrqc |= FM10K_MRQC_UDP_IPV4;
853 warn = true;
854 }
855 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
856 interface->flags)) {
857 mrqc |= FM10K_MRQC_UDP_IPV6;
858 warn = true;
859 }
860
861 /* If we enable UDP RSS display a warning that this may cause
862 * fragmented UDP packets to arrive out of order.
863 */
864 if (warn)
865 netif_warn(interface, drv, interface->netdev,
866 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
867
868 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
869 }
870
871 return 0;
872 }
873
fm10k_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)874 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
875 {
876 struct fm10k_intfc *interface = netdev_priv(dev);
877 int ret = -EOPNOTSUPP;
878
879 switch (cmd->cmd) {
880 case ETHTOOL_SRXFH:
881 ret = fm10k_set_rss_hash_opt(interface, cmd);
882 break;
883 default:
884 break;
885 }
886
887 return ret;
888 }
889
fm10k_mbx_test(struct fm10k_intfc * interface,u64 * data)890 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
891 {
892 struct fm10k_hw *hw = &interface->hw;
893 struct fm10k_mbx_info *mbx = &hw->mbx;
894 u32 attr_flag, test_msg[6];
895 unsigned long timeout;
896 int err = -EINVAL;
897
898 /* For now this is a VF only feature */
899 if (hw->mac.type != fm10k_mac_vf)
900 return 0;
901
902 /* loop through both nested and unnested attribute types */
903 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
904 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
905 attr_flag += attr_flag) {
906 /* generate message to be tested */
907 fm10k_tlv_msg_test_create(test_msg, attr_flag);
908
909 fm10k_mbx_lock(interface);
910 mbx->test_result = FM10K_NOT_IMPLEMENTED;
911 err = mbx->ops.enqueue_tx(hw, mbx, test_msg);
912 fm10k_mbx_unlock(interface);
913
914 /* wait up to 1 second for response */
915 timeout = jiffies + HZ;
916 do {
917 if (err < 0)
918 goto err_out;
919
920 usleep_range(500, 1000);
921
922 fm10k_mbx_lock(interface);
923 mbx->ops.process(hw, mbx);
924 fm10k_mbx_unlock(interface);
925
926 err = mbx->test_result;
927 if (!err)
928 break;
929 } while (time_is_after_jiffies(timeout));
930
931 /* reporting errors */
932 if (err)
933 goto err_out;
934 }
935
936 err_out:
937 *data = err < 0 ? (attr_flag) : (err > 0);
938 return err;
939 }
940
fm10k_self_test(struct net_device * dev,struct ethtool_test * eth_test,u64 * data)941 static void fm10k_self_test(struct net_device *dev,
942 struct ethtool_test *eth_test, u64 *data)
943 {
944 struct fm10k_intfc *interface = netdev_priv(dev);
945 struct fm10k_hw *hw = &interface->hw;
946
947 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
948
949 if (FM10K_REMOVED(hw->hw_addr)) {
950 netif_err(interface, drv, dev,
951 "Interface removed - test blocked\n");
952 eth_test->flags |= ETH_TEST_FL_FAILED;
953 return;
954 }
955
956 if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX]))
957 eth_test->flags |= ETH_TEST_FL_FAILED;
958 }
959
fm10k_get_priv_flags(struct net_device * netdev)960 static u32 fm10k_get_priv_flags(struct net_device *netdev)
961 {
962 return 0;
963 }
964
fm10k_set_priv_flags(struct net_device * netdev,u32 priv_flags)965 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
966 {
967 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
968 return -EINVAL;
969
970 return 0;
971 }
972
fm10k_get_reta_size(struct net_device __always_unused * netdev)973 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
974 {
975 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
976 }
977
fm10k_write_reta(struct fm10k_intfc * interface,const u32 * indir)978 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
979 {
980 u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
981 struct fm10k_hw *hw = &interface->hw;
982 u32 table[4];
983 int i, j;
984
985 /* record entries to reta table */
986 for (i = 0; i < FM10K_RETA_SIZE; i++) {
987 u32 reta, n;
988
989 /* generate a new table if we weren't given one */
990 for (j = 0; j < 4; j++) {
991 if (indir)
992 n = indir[4 * i + j];
993 else
994 n = ethtool_rxfh_indir_default(4 * i + j,
995 rss_i);
996
997 table[j] = n;
998 }
999
1000 reta = table[0] |
1001 (table[1] << 8) |
1002 (table[2] << 16) |
1003 (table[3] << 24);
1004
1005 if (interface->reta[i] == reta)
1006 continue;
1007
1008 interface->reta[i] = reta;
1009 fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
1010 }
1011 }
1012
fm10k_get_reta(struct net_device * netdev,u32 * indir)1013 static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
1014 {
1015 struct fm10k_intfc *interface = netdev_priv(netdev);
1016 int i;
1017
1018 if (!indir)
1019 return 0;
1020
1021 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
1022 u32 reta = interface->reta[i];
1023
1024 indir[0] = (reta << 24) >> 24;
1025 indir[1] = (reta << 16) >> 24;
1026 indir[2] = (reta << 8) >> 24;
1027 indir[3] = (reta) >> 24;
1028 }
1029
1030 return 0;
1031 }
1032
fm10k_set_reta(struct net_device * netdev,const u32 * indir)1033 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
1034 {
1035 struct fm10k_intfc *interface = netdev_priv(netdev);
1036 int i;
1037 u16 rss_i;
1038
1039 if (!indir)
1040 return 0;
1041
1042 /* Verify user input. */
1043 rss_i = interface->ring_feature[RING_F_RSS].indices;
1044 for (i = fm10k_get_reta_size(netdev); i--;) {
1045 if (indir[i] < rss_i)
1046 continue;
1047 return -EINVAL;
1048 }
1049
1050 fm10k_write_reta(interface, indir);
1051
1052 return 0;
1053 }
1054
fm10k_get_rssrk_size(struct net_device __always_unused * netdev)1055 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
1056 {
1057 return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
1058 }
1059
fm10k_get_rssh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1060 static int fm10k_get_rssh(struct net_device *netdev,
1061 struct ethtool_rxfh_param *rxfh)
1062 {
1063 struct fm10k_intfc *interface = netdev_priv(netdev);
1064 u8 *key = rxfh->key;
1065 int i, err;
1066
1067 rxfh->hfunc = ETH_RSS_HASH_TOP;
1068
1069 err = fm10k_get_reta(netdev, rxfh->indir);
1070 if (err || !key)
1071 return err;
1072
1073 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4)
1074 *(__le32 *)key = cpu_to_le32(interface->rssrk[i]);
1075
1076 return 0;
1077 }
1078
fm10k_set_rssh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1079 static int fm10k_set_rssh(struct net_device *netdev,
1080 struct ethtool_rxfh_param *rxfh,
1081 struct netlink_ext_ack *extack)
1082 {
1083 struct fm10k_intfc *interface = netdev_priv(netdev);
1084 struct fm10k_hw *hw = &interface->hw;
1085 int i, err;
1086
1087 /* We do not allow change in unsupported parameters */
1088 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1089 rxfh->hfunc != ETH_RSS_HASH_TOP)
1090 return -EOPNOTSUPP;
1091
1092 err = fm10k_set_reta(netdev, rxfh->indir);
1093 if (err || !rxfh->key)
1094 return err;
1095
1096 for (i = 0; i < FM10K_RSSRK_SIZE; i++, rxfh->key += 4) {
1097 u32 rssrk = le32_to_cpu(*(__le32 *)rxfh->key);
1098
1099 if (interface->rssrk[i] == rssrk)
1100 continue;
1101
1102 interface->rssrk[i] = rssrk;
1103 fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk);
1104 }
1105
1106 return 0;
1107 }
1108
fm10k_max_channels(struct net_device * dev)1109 static unsigned int fm10k_max_channels(struct net_device *dev)
1110 {
1111 struct fm10k_intfc *interface = netdev_priv(dev);
1112 unsigned int max_combined = interface->hw.mac.max_queues;
1113 u8 tcs = netdev_get_num_tc(dev);
1114
1115 /* For QoS report channels per traffic class */
1116 if (tcs > 1)
1117 max_combined = BIT((fls(max_combined / tcs) - 1));
1118
1119 return max_combined;
1120 }
1121
fm10k_get_channels(struct net_device * dev,struct ethtool_channels * ch)1122 static void fm10k_get_channels(struct net_device *dev,
1123 struct ethtool_channels *ch)
1124 {
1125 struct fm10k_intfc *interface = netdev_priv(dev);
1126
1127 /* report maximum channels */
1128 ch->max_combined = fm10k_max_channels(dev);
1129
1130 /* report info for other vector */
1131 ch->max_other = NON_Q_VECTORS;
1132 ch->other_count = ch->max_other;
1133
1134 /* record RSS queues */
1135 ch->combined_count = interface->ring_feature[RING_F_RSS].indices;
1136 }
1137
fm10k_set_channels(struct net_device * dev,struct ethtool_channels * ch)1138 static int fm10k_set_channels(struct net_device *dev,
1139 struct ethtool_channels *ch)
1140 {
1141 struct fm10k_intfc *interface = netdev_priv(dev);
1142 unsigned int count = ch->combined_count;
1143
1144 /* verify they are not requesting separate vectors */
1145 if (!count || ch->rx_count || ch->tx_count)
1146 return -EINVAL;
1147
1148 /* verify other_count has not changed */
1149 if (ch->other_count != NON_Q_VECTORS)
1150 return -EINVAL;
1151
1152 /* verify the number of channels does not exceed hardware limits */
1153 if (count > fm10k_max_channels(dev))
1154 return -EINVAL;
1155
1156 interface->ring_feature[RING_F_RSS].limit = count;
1157
1158 /* use setup TC to update any traffic class queue mapping */
1159 return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
1160 }
1161
1162 static const struct ethtool_ops fm10k_ethtool_ops = {
1163 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1164 ETHTOOL_COALESCE_USE_ADAPTIVE,
1165 .get_strings = fm10k_get_strings,
1166 .get_sset_count = fm10k_get_sset_count,
1167 .get_ethtool_stats = fm10k_get_ethtool_stats,
1168 .get_drvinfo = fm10k_get_drvinfo,
1169 .get_link = ethtool_op_get_link,
1170 .get_pauseparam = fm10k_get_pauseparam,
1171 .set_pauseparam = fm10k_set_pauseparam,
1172 .get_msglevel = fm10k_get_msglevel,
1173 .set_msglevel = fm10k_set_msglevel,
1174 .get_ringparam = fm10k_get_ringparam,
1175 .set_ringparam = fm10k_set_ringparam,
1176 .get_coalesce = fm10k_get_coalesce,
1177 .set_coalesce = fm10k_set_coalesce,
1178 .get_rxnfc = fm10k_get_rxnfc,
1179 .set_rxnfc = fm10k_set_rxnfc,
1180 .get_regs = fm10k_get_regs,
1181 .get_regs_len = fm10k_get_regs_len,
1182 .self_test = fm10k_self_test,
1183 .get_priv_flags = fm10k_get_priv_flags,
1184 .set_priv_flags = fm10k_set_priv_flags,
1185 .get_rxfh_indir_size = fm10k_get_reta_size,
1186 .get_rxfh_key_size = fm10k_get_rssrk_size,
1187 .get_rxfh = fm10k_get_rssh,
1188 .set_rxfh = fm10k_set_rssh,
1189 .get_channels = fm10k_get_channels,
1190 .set_channels = fm10k_set_channels,
1191 .get_ts_info = ethtool_op_get_ts_info,
1192 };
1193
fm10k_set_ethtool_ops(struct net_device * dev)1194 void fm10k_set_ethtool_ops(struct net_device *dev)
1195 {
1196 dev->ethtool_ops = &fm10k_ethtool_ops;
1197 }
1198