1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include <linux/bitfield.h>
5 #include <linux/uaccess.h>
6
7 #include <net/netdev_lock.h>
8
9 /* ethtool support for iavf */
10 #include "iavf.h"
11
12 /* ethtool statistics helpers */
13
14 /**
15 * struct iavf_stats - definition for an ethtool statistic
16 * @stat_string: statistic name to display in ethtool -S output
17 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
18 * @stat_offset: offsetof() the stat from a base pointer
19 *
20 * This structure defines a statistic to be added to the ethtool stats buffer.
21 * It defines a statistic as offset from a common base pointer. Stats should
22 * be defined in constant arrays using the IAVF_STAT macro, with every element
23 * of the array using the same _type for calculating the sizeof_stat and
24 * stat_offset.
25 *
26 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
27 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
28 * the iavf_add_ethtool_stat() helper function.
29 *
30 * The @stat_string is interpreted as a format string, allowing formatted
31 * values to be inserted while looping over multiple structures for a given
32 * statistics array. Thus, every statistic string in an array should have the
33 * same type and number of format specifiers, to be formatted by variadic
34 * arguments to the iavf_add_stat_string() helper function.
35 **/
36 struct iavf_stats {
37 char stat_string[ETH_GSTRING_LEN];
38 int sizeof_stat;
39 int stat_offset;
40 };
41
42 /* Helper macro to define an iavf_stat structure with proper size and type.
43 * Use this when defining constant statistics arrays. Note that @_type expects
44 * only a type name and is used multiple times.
45 */
46 #define IAVF_STAT(_type, _name, _stat) { \
47 .stat_string = _name, \
48 .sizeof_stat = sizeof_field(_type, _stat), \
49 .stat_offset = offsetof(_type, _stat) \
50 }
51
52 /* Helper macro for defining some statistics related to queues */
53 #define IAVF_QUEUE_STAT(_name, _stat) \
54 IAVF_STAT(struct iavf_ring, _name, _stat)
55
56 /* Stats associated with a Tx or Rx ring */
57 static const struct iavf_stats iavf_gstrings_queue_stats[] = {
58 IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
59 IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
60 };
61
62 /**
63 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
64 * @data: location to store the stat value
65 * @pointer: basis for where to copy from
66 * @stat: the stat definition
67 *
68 * Copies the stat data defined by the pointer and stat structure pair into
69 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
70 * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
71 */
72 static void
iavf_add_one_ethtool_stat(u64 * data,void * pointer,const struct iavf_stats * stat)73 iavf_add_one_ethtool_stat(u64 *data, void *pointer,
74 const struct iavf_stats *stat)
75 {
76 char *p;
77
78 if (!pointer) {
79 /* ensure that the ethtool data buffer is zero'd for any stats
80 * which don't have a valid pointer.
81 */
82 *data = 0;
83 return;
84 }
85
86 p = (char *)pointer + stat->stat_offset;
87 switch (stat->sizeof_stat) {
88 case sizeof(u64):
89 *data = *((u64 *)p);
90 break;
91 case sizeof(u32):
92 *data = *((u32 *)p);
93 break;
94 case sizeof(u16):
95 *data = *((u16 *)p);
96 break;
97 case sizeof(u8):
98 *data = *((u8 *)p);
99 break;
100 default:
101 WARN_ONCE(1, "unexpected stat size for %s",
102 stat->stat_string);
103 *data = 0;
104 }
105 }
106
107 /**
108 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
109 * @data: ethtool stats buffer
110 * @pointer: location to copy stats from
111 * @stats: array of stats to copy
112 * @size: the size of the stats definition
113 *
114 * Copy the stats defined by the stats array using the pointer as a base into
115 * the data buffer supplied by ethtool. Updates the data pointer to point to
116 * the next empty location for successive calls to __iavf_add_ethtool_stats.
117 * If pointer is null, set the data values to zero and update the pointer to
118 * skip these stats.
119 **/
120 static void
__iavf_add_ethtool_stats(u64 ** data,void * pointer,const struct iavf_stats stats[],const unsigned int size)121 __iavf_add_ethtool_stats(u64 **data, void *pointer,
122 const struct iavf_stats stats[],
123 const unsigned int size)
124 {
125 unsigned int i;
126
127 for (i = 0; i < size; i++)
128 iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
129 }
130
131 /**
132 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
133 * @data: ethtool stats buffer
134 * @pointer: location where stats are stored
135 * @stats: static const array of stat definitions
136 *
137 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
138 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
139 * ensuring that we pass the size associated with the given stats array.
140 *
141 * The parameter @stats is evaluated twice, so parameters with side effects
142 * should be avoided.
143 **/
144 #define iavf_add_ethtool_stats(data, pointer, stats) \
145 __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
146
147 /**
148 * iavf_add_queue_stats - copy queue statistics into supplied buffer
149 * @data: ethtool stats buffer
150 * @ring: the ring to copy
151 *
152 * Queue statistics must be copied while protected by
153 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
154 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
155 * ring pointer is null, zero out the queue stat values and update the data
156 * pointer. Otherwise safely copy the stats from the ring into the supplied
157 * buffer and update the data pointer when finished.
158 *
159 * This function expects to be called while under rcu_read_lock().
160 **/
161 static void
iavf_add_queue_stats(u64 ** data,struct iavf_ring * ring)162 iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
163 {
164 const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
165 const struct iavf_stats *stats = iavf_gstrings_queue_stats;
166 unsigned int start;
167 unsigned int i;
168
169 /* To avoid invalid statistics values, ensure that we keep retrying
170 * the copy until we get a consistent value according to
171 * u64_stats_fetch_retry. But first, make sure our ring is
172 * non-null before attempting to access its syncp.
173 */
174 do {
175 start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
176 for (i = 0; i < size; i++)
177 iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
178 } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
179
180 /* Once we successfully copy the stats in, update the data pointer */
181 *data += size;
182 }
183
184 /**
185 * __iavf_add_stat_strings - copy stat strings into ethtool buffer
186 * @p: ethtool supplied buffer
187 * @stats: stat definitions array
188 * @size: size of the stats array
189 *
190 * Format and copy the strings described by stats into the buffer pointed at
191 * by p.
192 **/
__iavf_add_stat_strings(u8 ** p,const struct iavf_stats stats[],const unsigned int size,...)193 static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
194 const unsigned int size, ...)
195 {
196 unsigned int i;
197
198 for (i = 0; i < size; i++) {
199 va_list args;
200
201 va_start(args, size);
202 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
203 *p += ETH_GSTRING_LEN;
204 va_end(args);
205 }
206 }
207
208 /**
209 * iavf_add_stat_strings - copy stat strings into ethtool buffer
210 * @p: ethtool supplied buffer
211 * @stats: stat definitions array
212 *
213 * Format and copy the strings described by the const static stats value into
214 * the buffer pointed at by p.
215 *
216 * The parameter @stats is evaluated twice, so parameters with side effects
217 * should be avoided. Additionally, stats must be an array such that
218 * ARRAY_SIZE can be called on it.
219 **/
220 #define iavf_add_stat_strings(p, stats, ...) \
221 __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
222
223 #define VF_STAT(_name, _stat) \
224 IAVF_STAT(struct iavf_adapter, _name, _stat)
225
226 static const struct iavf_stats iavf_gstrings_stats[] = {
227 VF_STAT("rx_bytes", current_stats.rx_bytes),
228 VF_STAT("rx_unicast", current_stats.rx_unicast),
229 VF_STAT("rx_multicast", current_stats.rx_multicast),
230 VF_STAT("rx_broadcast", current_stats.rx_broadcast),
231 VF_STAT("rx_discards", current_stats.rx_discards),
232 VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
233 VF_STAT("tx_bytes", current_stats.tx_bytes),
234 VF_STAT("tx_unicast", current_stats.tx_unicast),
235 VF_STAT("tx_multicast", current_stats.tx_multicast),
236 VF_STAT("tx_broadcast", current_stats.tx_broadcast),
237 VF_STAT("tx_discards", current_stats.tx_discards),
238 VF_STAT("tx_errors", current_stats.tx_errors),
239 };
240
241 #define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats)
242
243 #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
244
245 /**
246 * iavf_get_link_ksettings - Get Link Speed and Duplex settings
247 * @netdev: network interface device structure
248 * @cmd: ethtool command
249 *
250 * Reports speed/duplex settings. Because this is a VF, we don't know what
251 * kind of link we really have, so we fake it.
252 **/
iavf_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)253 static int iavf_get_link_ksettings(struct net_device *netdev,
254 struct ethtool_link_ksettings *cmd)
255 {
256 struct iavf_adapter *adapter = netdev_priv(netdev);
257
258 ethtool_link_ksettings_zero_link_mode(cmd, supported);
259 cmd->base.autoneg = AUTONEG_DISABLE;
260 cmd->base.port = PORT_NONE;
261 cmd->base.duplex = DUPLEX_FULL;
262
263 if (ADV_LINK_SUPPORT(adapter)) {
264 if (adapter->link_speed_mbps &&
265 adapter->link_speed_mbps < U32_MAX)
266 cmd->base.speed = adapter->link_speed_mbps;
267 else
268 cmd->base.speed = SPEED_UNKNOWN;
269
270 return 0;
271 }
272
273 switch (adapter->link_speed) {
274 case VIRTCHNL_LINK_SPEED_40GB:
275 cmd->base.speed = SPEED_40000;
276 break;
277 case VIRTCHNL_LINK_SPEED_25GB:
278 cmd->base.speed = SPEED_25000;
279 break;
280 case VIRTCHNL_LINK_SPEED_20GB:
281 cmd->base.speed = SPEED_20000;
282 break;
283 case VIRTCHNL_LINK_SPEED_10GB:
284 cmd->base.speed = SPEED_10000;
285 break;
286 case VIRTCHNL_LINK_SPEED_5GB:
287 cmd->base.speed = SPEED_5000;
288 break;
289 case VIRTCHNL_LINK_SPEED_2_5GB:
290 cmd->base.speed = SPEED_2500;
291 break;
292 case VIRTCHNL_LINK_SPEED_1GB:
293 cmd->base.speed = SPEED_1000;
294 break;
295 case VIRTCHNL_LINK_SPEED_100MB:
296 cmd->base.speed = SPEED_100;
297 break;
298 default:
299 break;
300 }
301
302 return 0;
303 }
304
305 /**
306 * iavf_get_sset_count - Get length of string set
307 * @netdev: network interface device structure
308 * @sset: id of string set
309 *
310 * Reports size of various string tables.
311 **/
iavf_get_sset_count(struct net_device * netdev,int sset)312 static int iavf_get_sset_count(struct net_device *netdev, int sset)
313 {
314 /* Report the maximum number queues, even if not every queue is
315 * currently configured. Since allocation of queues is in pairs,
316 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
317 * at device creation and never changes.
318 */
319
320 if (sset == ETH_SS_STATS)
321 return IAVF_STATS_LEN +
322 (IAVF_QUEUE_STATS_LEN * 2 *
323 netdev->real_num_tx_queues);
324 else
325 return -EINVAL;
326 }
327
328 /**
329 * iavf_get_ethtool_stats - report device statistics
330 * @netdev: network interface device structure
331 * @stats: ethtool statistics structure
332 * @data: pointer to data buffer
333 *
334 * All statistics are added to the data buffer as an array of u64.
335 **/
iavf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)336 static void iavf_get_ethtool_stats(struct net_device *netdev,
337 struct ethtool_stats *stats, u64 *data)
338 {
339 struct iavf_adapter *adapter = netdev_priv(netdev);
340 unsigned int i;
341
342 /* Explicitly request stats refresh */
343 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
344
345 iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
346
347 rcu_read_lock();
348 /* As num_active_queues describe both tx and rx queues, we can use
349 * it to iterate over rings' stats.
350 */
351 for (i = 0; i < adapter->num_active_queues; i++) {
352 struct iavf_ring *ring;
353
354 /* Tx rings stats */
355 ring = &adapter->tx_rings[i];
356 iavf_add_queue_stats(&data, ring);
357
358 /* Rx rings stats */
359 ring = &adapter->rx_rings[i];
360 iavf_add_queue_stats(&data, ring);
361 }
362 rcu_read_unlock();
363 }
364
365 /**
366 * iavf_get_stat_strings - Get stat strings
367 * @netdev: network interface device structure
368 * @data: buffer for string data
369 *
370 * Builds the statistics string table
371 **/
iavf_get_stat_strings(struct net_device * netdev,u8 * data)372 static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
373 {
374 unsigned int i;
375
376 iavf_add_stat_strings(&data, iavf_gstrings_stats);
377
378 /* Queues are always allocated in pairs, so we just use
379 * real_num_tx_queues for both Tx and Rx queues.
380 */
381 for (i = 0; i < netdev->real_num_tx_queues; i++) {
382 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
383 "tx", i);
384 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
385 "rx", i);
386 }
387 }
388
389 /**
390 * iavf_get_strings - Get string set
391 * @netdev: network interface device structure
392 * @sset: id of string set
393 * @data: buffer for string data
394 *
395 * Builds string tables for various string sets
396 **/
iavf_get_strings(struct net_device * netdev,u32 sset,u8 * data)397 static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
398 {
399 switch (sset) {
400 case ETH_SS_STATS:
401 iavf_get_stat_strings(netdev, data);
402 break;
403 default:
404 break;
405 }
406 }
407
408 /**
409 * iavf_get_msglevel - Get debug message level
410 * @netdev: network interface device structure
411 *
412 * Returns current debug message level.
413 **/
iavf_get_msglevel(struct net_device * netdev)414 static u32 iavf_get_msglevel(struct net_device *netdev)
415 {
416 struct iavf_adapter *adapter = netdev_priv(netdev);
417
418 return adapter->msg_enable;
419 }
420
421 /**
422 * iavf_set_msglevel - Set debug message level
423 * @netdev: network interface device structure
424 * @data: message level
425 *
426 * Set current debug message level. Higher values cause the driver to
427 * be noisier.
428 **/
iavf_set_msglevel(struct net_device * netdev,u32 data)429 static void iavf_set_msglevel(struct net_device *netdev, u32 data)
430 {
431 struct iavf_adapter *adapter = netdev_priv(netdev);
432
433 if (IAVF_DEBUG_USER & data)
434 adapter->hw.debug_mask = data;
435 adapter->msg_enable = data;
436 }
437
438 /**
439 * iavf_get_drvinfo - Get driver info
440 * @netdev: network interface device structure
441 * @drvinfo: ethool driver info structure
442 *
443 * Returns information about the driver and device for display to the user.
444 **/
iavf_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)445 static void iavf_get_drvinfo(struct net_device *netdev,
446 struct ethtool_drvinfo *drvinfo)
447 {
448 struct iavf_adapter *adapter = netdev_priv(netdev);
449
450 strscpy(drvinfo->driver, iavf_driver_name, 32);
451 strscpy(drvinfo->fw_version, "N/A", 4);
452 strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
453 }
454
455 /**
456 * iavf_get_ringparam - Get ring parameters
457 * @netdev: network interface device structure
458 * @ring: ethtool ringparam structure
459 * @kernel_ring: ethtool extenal ringparam structure
460 * @extack: netlink extended ACK report struct
461 *
462 * Returns current ring parameters. TX and RX rings are reported separately,
463 * but the number of rings is not reported.
464 **/
iavf_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)465 static void iavf_get_ringparam(struct net_device *netdev,
466 struct ethtool_ringparam *ring,
467 struct kernel_ethtool_ringparam *kernel_ring,
468 struct netlink_ext_ack *extack)
469 {
470 struct iavf_adapter *adapter = netdev_priv(netdev);
471
472 ring->rx_max_pending = IAVF_MAX_RXD;
473 ring->tx_max_pending = IAVF_MAX_TXD;
474 ring->rx_pending = adapter->rx_desc_count;
475 ring->tx_pending = adapter->tx_desc_count;
476 }
477
478 /**
479 * iavf_set_ringparam - Set ring parameters
480 * @netdev: network interface device structure
481 * @ring: ethtool ringparam structure
482 * @kernel_ring: ethtool external ringparam structure
483 * @extack: netlink extended ACK report struct
484 *
485 * Sets ring parameters. TX and RX rings are controlled separately, but the
486 * number of rings is not specified, so all rings get the same settings.
487 **/
iavf_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)488 static int iavf_set_ringparam(struct net_device *netdev,
489 struct ethtool_ringparam *ring,
490 struct kernel_ethtool_ringparam *kernel_ring,
491 struct netlink_ext_ack *extack)
492 {
493 struct iavf_adapter *adapter = netdev_priv(netdev);
494 u32 new_rx_count, new_tx_count;
495 int ret = 0;
496
497 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
498 return -EINVAL;
499
500 if (ring->tx_pending > IAVF_MAX_TXD ||
501 ring->tx_pending < IAVF_MIN_TXD ||
502 ring->rx_pending > IAVF_MAX_RXD ||
503 ring->rx_pending < IAVF_MIN_RXD) {
504 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
505 ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
506 IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
507 return -EINVAL;
508 }
509
510 new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
511 if (new_tx_count != ring->tx_pending)
512 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
513 new_tx_count);
514
515 new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
516 if (new_rx_count != ring->rx_pending)
517 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
518 new_rx_count);
519
520 /* if nothing to do return success */
521 if ((new_tx_count == adapter->tx_desc_count) &&
522 (new_rx_count == adapter->rx_desc_count)) {
523 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
524 return 0;
525 }
526
527 if (new_tx_count != adapter->tx_desc_count) {
528 netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
529 adapter->tx_desc_count, new_tx_count);
530 adapter->tx_desc_count = new_tx_count;
531 }
532
533 if (new_rx_count != adapter->rx_desc_count) {
534 netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
535 adapter->rx_desc_count, new_rx_count);
536 adapter->rx_desc_count = new_rx_count;
537 }
538
539 if (netif_running(netdev)) {
540 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
541 ret = iavf_wait_for_reset(adapter);
542 if (ret)
543 netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
544 }
545
546 return ret;
547 }
548
549 /**
550 * __iavf_get_coalesce - get per-queue coalesce settings
551 * @netdev: the netdev to check
552 * @ec: ethtool coalesce data structure
553 * @queue: which queue to pick
554 *
555 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
556 * are per queue. If queue is <0 then we default to queue 0 as the
557 * representative value.
558 **/
__iavf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int queue)559 static int __iavf_get_coalesce(struct net_device *netdev,
560 struct ethtool_coalesce *ec, int queue)
561 {
562 struct iavf_adapter *adapter = netdev_priv(netdev);
563 struct iavf_ring *rx_ring, *tx_ring;
564
565 /* Rx and Tx usecs per queue value. If user doesn't specify the
566 * queue, return queue 0's value to represent.
567 */
568 if (queue < 0)
569 queue = 0;
570 else if (queue >= adapter->num_active_queues)
571 return -EINVAL;
572
573 rx_ring = &adapter->rx_rings[queue];
574 tx_ring = &adapter->tx_rings[queue];
575
576 if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
577 ec->use_adaptive_rx_coalesce = 1;
578
579 if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
580 ec->use_adaptive_tx_coalesce = 1;
581
582 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
583 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
584
585 return 0;
586 }
587
588 /**
589 * iavf_get_coalesce - Get interrupt coalescing settings
590 * @netdev: network interface device structure
591 * @ec: ethtool coalesce structure
592 * @kernel_coal: ethtool CQE mode setting structure
593 * @extack: extack for reporting error messages
594 *
595 * Returns current coalescing settings. This is referred to elsewhere in the
596 * driver as Interrupt Throttle Rate, as this is how the hardware describes
597 * this functionality. Note that if per-queue settings have been modified this
598 * only represents the settings of queue 0.
599 **/
iavf_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)600 static int iavf_get_coalesce(struct net_device *netdev,
601 struct ethtool_coalesce *ec,
602 struct kernel_ethtool_coalesce *kernel_coal,
603 struct netlink_ext_ack *extack)
604 {
605 return __iavf_get_coalesce(netdev, ec, -1);
606 }
607
608 /**
609 * iavf_get_per_queue_coalesce - get coalesce values for specific queue
610 * @netdev: netdev to read
611 * @ec: coalesce settings from ethtool
612 * @queue: the queue to read
613 *
614 * Read specific queue's coalesce settings.
615 **/
iavf_get_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * ec)616 static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
617 struct ethtool_coalesce *ec)
618 {
619 return __iavf_get_coalesce(netdev, ec, queue);
620 }
621
622 /**
623 * iavf_set_itr_per_queue - set ITR values for specific queue
624 * @adapter: the VF adapter struct to set values for
625 * @ec: coalesce settings from ethtool
626 * @queue: the queue to modify
627 *
628 * Change the ITR settings for a specific queue.
629 **/
iavf_set_itr_per_queue(struct iavf_adapter * adapter,struct ethtool_coalesce * ec,int queue)630 static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
631 struct ethtool_coalesce *ec, int queue)
632 {
633 struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
634 struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
635 struct iavf_q_vector *q_vector;
636 u16 itr_setting;
637
638 itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
639
640 if (ec->rx_coalesce_usecs != itr_setting &&
641 ec->use_adaptive_rx_coalesce) {
642 netif_info(adapter, drv, adapter->netdev,
643 "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
644 return -EINVAL;
645 }
646
647 itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
648
649 if (ec->tx_coalesce_usecs != itr_setting &&
650 ec->use_adaptive_tx_coalesce) {
651 netif_info(adapter, drv, adapter->netdev,
652 "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
653 return -EINVAL;
654 }
655
656 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
657 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
658
659 rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
660 if (!ec->use_adaptive_rx_coalesce)
661 rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
662
663 tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
664 if (!ec->use_adaptive_tx_coalesce)
665 tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
666
667 q_vector = rx_ring->q_vector;
668 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
669
670 q_vector = tx_ring->q_vector;
671 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
672
673 /* The interrupt handler itself will take care of programming
674 * the Tx and Rx ITR values based on the values we have entered
675 * into the q_vector, no need to write the values now.
676 */
677 return 0;
678 }
679
680 /**
681 * __iavf_set_coalesce - set coalesce settings for particular queue
682 * @netdev: the netdev to change
683 * @ec: ethtool coalesce settings
684 * @queue: the queue to change
685 *
686 * Sets the coalesce settings for a particular queue.
687 **/
__iavf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,int queue)688 static int __iavf_set_coalesce(struct net_device *netdev,
689 struct ethtool_coalesce *ec, int queue)
690 {
691 struct iavf_adapter *adapter = netdev_priv(netdev);
692 int i;
693
694 if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
695 netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
696 return -EINVAL;
697 } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
698 netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
699 return -EINVAL;
700 }
701
702 /* Rx and Tx usecs has per queue value. If user doesn't specify the
703 * queue, apply to all queues.
704 */
705 if (queue < 0) {
706 for (i = 0; i < adapter->num_active_queues; i++)
707 if (iavf_set_itr_per_queue(adapter, ec, i))
708 return -EINVAL;
709 } else if (queue < adapter->num_active_queues) {
710 if (iavf_set_itr_per_queue(adapter, ec, queue))
711 return -EINVAL;
712 } else {
713 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
714 adapter->num_active_queues - 1);
715 return -EINVAL;
716 }
717
718 return 0;
719 }
720
721 /**
722 * iavf_set_coalesce - Set interrupt coalescing settings
723 * @netdev: network interface device structure
724 * @ec: ethtool coalesce structure
725 * @kernel_coal: ethtool CQE mode setting structure
726 * @extack: extack for reporting error messages
727 *
728 * Change current coalescing settings for every queue.
729 **/
iavf_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)730 static int iavf_set_coalesce(struct net_device *netdev,
731 struct ethtool_coalesce *ec,
732 struct kernel_ethtool_coalesce *kernel_coal,
733 struct netlink_ext_ack *extack)
734 {
735 return __iavf_set_coalesce(netdev, ec, -1);
736 }
737
738 /**
739 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
740 * @netdev: the netdev to change
741 * @ec: ethtool's coalesce settings
742 * @queue: the queue to modify
743 *
744 * Modifies a specific queue's coalesce settings.
745 */
iavf_set_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * ec)746 static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
747 struct ethtool_coalesce *ec)
748 {
749 return __iavf_set_coalesce(netdev, ec, queue);
750 }
751
752 /**
753 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
754 * flow type values
755 * @flow: filter type to be converted
756 *
757 * Returns the corresponding ethtool flow type.
758 */
iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)759 static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
760 {
761 switch (flow) {
762 case IAVF_FDIR_FLOW_IPV4_TCP:
763 return TCP_V4_FLOW;
764 case IAVF_FDIR_FLOW_IPV4_UDP:
765 return UDP_V4_FLOW;
766 case IAVF_FDIR_FLOW_IPV4_SCTP:
767 return SCTP_V4_FLOW;
768 case IAVF_FDIR_FLOW_IPV4_AH:
769 return AH_V4_FLOW;
770 case IAVF_FDIR_FLOW_IPV4_ESP:
771 return ESP_V4_FLOW;
772 case IAVF_FDIR_FLOW_IPV4_OTHER:
773 return IPV4_USER_FLOW;
774 case IAVF_FDIR_FLOW_IPV6_TCP:
775 return TCP_V6_FLOW;
776 case IAVF_FDIR_FLOW_IPV6_UDP:
777 return UDP_V6_FLOW;
778 case IAVF_FDIR_FLOW_IPV6_SCTP:
779 return SCTP_V6_FLOW;
780 case IAVF_FDIR_FLOW_IPV6_AH:
781 return AH_V6_FLOW;
782 case IAVF_FDIR_FLOW_IPV6_ESP:
783 return ESP_V6_FLOW;
784 case IAVF_FDIR_FLOW_IPV6_OTHER:
785 return IPV6_USER_FLOW;
786 case IAVF_FDIR_FLOW_NON_IP_L2:
787 return ETHER_FLOW;
788 default:
789 /* 0 is undefined ethtool flow */
790 return 0;
791 }
792 }
793
794 /**
795 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
796 * @eth: Ethtool flow type to be converted
797 *
798 * Returns flow enum
799 */
iavf_ethtool_flow_to_fltr(int eth)800 static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
801 {
802 switch (eth) {
803 case TCP_V4_FLOW:
804 return IAVF_FDIR_FLOW_IPV4_TCP;
805 case UDP_V4_FLOW:
806 return IAVF_FDIR_FLOW_IPV4_UDP;
807 case SCTP_V4_FLOW:
808 return IAVF_FDIR_FLOW_IPV4_SCTP;
809 case AH_V4_FLOW:
810 return IAVF_FDIR_FLOW_IPV4_AH;
811 case ESP_V4_FLOW:
812 return IAVF_FDIR_FLOW_IPV4_ESP;
813 case IPV4_USER_FLOW:
814 return IAVF_FDIR_FLOW_IPV4_OTHER;
815 case TCP_V6_FLOW:
816 return IAVF_FDIR_FLOW_IPV6_TCP;
817 case UDP_V6_FLOW:
818 return IAVF_FDIR_FLOW_IPV6_UDP;
819 case SCTP_V6_FLOW:
820 return IAVF_FDIR_FLOW_IPV6_SCTP;
821 case AH_V6_FLOW:
822 return IAVF_FDIR_FLOW_IPV6_AH;
823 case ESP_V6_FLOW:
824 return IAVF_FDIR_FLOW_IPV6_ESP;
825 case IPV6_USER_FLOW:
826 return IAVF_FDIR_FLOW_IPV6_OTHER;
827 case ETHER_FLOW:
828 return IAVF_FDIR_FLOW_NON_IP_L2;
829 default:
830 return IAVF_FDIR_FLOW_NONE;
831 }
832 }
833
834 /**
835 * iavf_is_mask_valid - check mask field set
836 * @mask: full mask to check
837 * @field: field for which mask should be valid
838 *
839 * If the mask is fully set return true. If it is not valid for field return
840 * false.
841 */
iavf_is_mask_valid(u64 mask,u64 field)842 static bool iavf_is_mask_valid(u64 mask, u64 field)
843 {
844 return (mask & field) == field;
845 }
846
847 /**
848 * iavf_parse_rx_flow_user_data - deconstruct user-defined data
849 * @fsp: pointer to ethtool Rx flow specification
850 * @fltr: pointer to Flow Director filter for userdef data storage
851 *
852 * Returns 0 on success, negative error value on failure
853 */
854 static int
iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec * fsp,struct iavf_fdir_fltr * fltr)855 iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
856 struct iavf_fdir_fltr *fltr)
857 {
858 struct iavf_flex_word *flex;
859 int i, cnt = 0;
860
861 if (!(fsp->flow_type & FLOW_EXT))
862 return 0;
863
864 for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
865 #define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
866 #define IAVF_USERDEF_FLEX_OFFS_S 16
867 #define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
868 #define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
869 u32 value = be32_to_cpu(fsp->h_ext.data[i]);
870 u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
871
872 if (!value || !mask)
873 continue;
874
875 if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
876 return -EINVAL;
877
878 /* 504 is the maximum value for offsets, and offset is measured
879 * from the start of the MAC address.
880 */
881 #define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
882 flex = &fltr->flex_words[cnt++];
883 flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
884 flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
885 if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
886 return -EINVAL;
887 }
888
889 fltr->flex_cnt = cnt;
890
891 return 0;
892 }
893
894 /**
895 * iavf_fill_rx_flow_ext_data - fill the additional data
896 * @fsp: pointer to ethtool Rx flow specification
897 * @fltr: pointer to Flow Director filter to get additional data
898 */
899 static void
iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec * fsp,struct iavf_fdir_fltr * fltr)900 iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
901 struct iavf_fdir_fltr *fltr)
902 {
903 if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
904 return;
905
906 fsp->flow_type |= FLOW_EXT;
907
908 memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
909 memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
910 }
911
912 /**
913 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
914 * @adapter: the VF adapter structure that contains filter list
915 * @cmd: ethtool command data structure to receive the filter data
916 *
917 * Returns 0 as expected for success by ethtool
918 */
919 static int
iavf_get_ethtool_fdir_entry(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)920 iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
921 struct ethtool_rxnfc *cmd)
922 {
923 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
924 struct iavf_fdir_fltr *rule = NULL;
925 int ret = 0;
926
927 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
928 return -EOPNOTSUPP;
929
930 spin_lock_bh(&adapter->fdir_fltr_lock);
931
932 rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
933 if (!rule) {
934 ret = -EINVAL;
935 goto release_lock;
936 }
937
938 fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
939
940 memset(&fsp->m_u, 0, sizeof(fsp->m_u));
941 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
942
943 switch (fsp->flow_type) {
944 case TCP_V4_FLOW:
945 case UDP_V4_FLOW:
946 case SCTP_V4_FLOW:
947 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
948 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
949 fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
950 fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
951 fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
952 fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
953 fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
954 fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
955 fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
956 fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
957 break;
958 case AH_V4_FLOW:
959 case ESP_V4_FLOW:
960 fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
961 fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
962 fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
963 fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
964 fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
965 fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
966 fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
967 fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
968 break;
969 case IPV4_USER_FLOW:
970 fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
971 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
972 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
973 fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
974 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
975 fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
976 fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
977 fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
978 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
979 fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
980 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
981 fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
982 break;
983 case TCP_V6_FLOW:
984 case UDP_V6_FLOW:
985 case SCTP_V6_FLOW:
986 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
987 sizeof(struct in6_addr));
988 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
989 sizeof(struct in6_addr));
990 fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
991 fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
992 fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
993 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
994 sizeof(struct in6_addr));
995 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
996 sizeof(struct in6_addr));
997 fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
998 fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
999 fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
1000 break;
1001 case AH_V6_FLOW:
1002 case ESP_V6_FLOW:
1003 memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1004 sizeof(struct in6_addr));
1005 memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1006 sizeof(struct in6_addr));
1007 fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
1008 fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
1009 memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1010 sizeof(struct in6_addr));
1011 memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1012 sizeof(struct in6_addr));
1013 fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
1014 fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
1015 break;
1016 case IPV6_USER_FLOW:
1017 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1018 sizeof(struct in6_addr));
1019 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1020 sizeof(struct in6_addr));
1021 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
1022 fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
1023 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
1024 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1025 sizeof(struct in6_addr));
1026 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1027 sizeof(struct in6_addr));
1028 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
1029 fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
1030 fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
1031 break;
1032 case ETHER_FLOW:
1033 fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
1034 fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
1035 break;
1036 default:
1037 ret = -EINVAL;
1038 break;
1039 }
1040
1041 iavf_fill_rx_flow_ext_data(fsp, rule);
1042
1043 if (rule->action == VIRTCHNL_ACTION_DROP)
1044 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1045 else
1046 fsp->ring_cookie = rule->q_index;
1047
1048 release_lock:
1049 spin_unlock_bh(&adapter->fdir_fltr_lock);
1050 return ret;
1051 }
1052
1053 /**
1054 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
1055 * @adapter: the VF adapter structure containing the filter list
1056 * @cmd: ethtool command data structure
1057 * @rule_locs: ethtool array passed in from OS to receive filter IDs
1058 *
1059 * Returns 0 as expected for success by ethtool
1060 */
1061 static int
iavf_get_fdir_fltr_ids(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd,u32 * rule_locs)1062 iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
1063 u32 *rule_locs)
1064 {
1065 struct iavf_fdir_fltr *fltr;
1066 unsigned int cnt = 0;
1067 int val = 0;
1068
1069 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1070 return -EOPNOTSUPP;
1071
1072 cmd->data = IAVF_MAX_FDIR_FILTERS;
1073
1074 spin_lock_bh(&adapter->fdir_fltr_lock);
1075
1076 list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
1077 if (iavf_is_raw_fdir(fltr))
1078 continue;
1079
1080 if (cnt == cmd->rule_cnt) {
1081 val = -EMSGSIZE;
1082 goto release_lock;
1083 }
1084 rule_locs[cnt] = fltr->loc;
1085 cnt++;
1086 }
1087
1088 release_lock:
1089 spin_unlock_bh(&adapter->fdir_fltr_lock);
1090 if (!val)
1091 cmd->rule_cnt = cnt;
1092
1093 return val;
1094 }
1095
1096 /**
1097 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
1098 * @adapter: pointer to the VF adapter structure
1099 * @fsp: pointer to ethtool Rx flow specification
1100 * @fltr: filter structure
1101 */
1102 static int
iavf_add_fdir_fltr_info(struct iavf_adapter * adapter,struct ethtool_rx_flow_spec * fsp,struct iavf_fdir_fltr * fltr)1103 iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
1104 struct iavf_fdir_fltr *fltr)
1105 {
1106 u32 flow_type, q_index = 0;
1107 enum virtchnl_action act;
1108 int err;
1109
1110 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1111 act = VIRTCHNL_ACTION_DROP;
1112 } else {
1113 q_index = fsp->ring_cookie;
1114 if (q_index >= adapter->num_active_queues)
1115 return -EINVAL;
1116
1117 act = VIRTCHNL_ACTION_QUEUE;
1118 }
1119
1120 fltr->action = act;
1121 fltr->loc = fsp->location;
1122 fltr->q_index = q_index;
1123
1124 if (fsp->flow_type & FLOW_EXT) {
1125 memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
1126 sizeof(fltr->ext_data.usr_def));
1127 memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
1128 sizeof(fltr->ext_mask.usr_def));
1129 }
1130
1131 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
1132 fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
1133
1134 switch (flow_type) {
1135 case TCP_V4_FLOW:
1136 case UDP_V4_FLOW:
1137 case SCTP_V4_FLOW:
1138 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1139 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1140 fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1141 fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1142 fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
1143 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1144 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1145 fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1146 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1147 fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
1148 fltr->ip_ver = 4;
1149 break;
1150 case AH_V4_FLOW:
1151 case ESP_V4_FLOW:
1152 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
1153 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
1154 fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
1155 fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
1156 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
1157 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
1158 fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
1159 fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
1160 fltr->ip_ver = 4;
1161 break;
1162 case IPV4_USER_FLOW:
1163 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1164 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1165 fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1166 fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
1167 fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
1168 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1169 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1170 fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1171 fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
1172 fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
1173 fltr->ip_ver = 4;
1174 break;
1175 case TCP_V6_FLOW:
1176 case UDP_V6_FLOW:
1177 case SCTP_V6_FLOW:
1178 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1179 sizeof(struct in6_addr));
1180 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1181 sizeof(struct in6_addr));
1182 fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1183 fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1184 fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
1185 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1186 sizeof(struct in6_addr));
1187 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1188 sizeof(struct in6_addr));
1189 fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1190 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1191 fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
1192 fltr->ip_ver = 6;
1193 break;
1194 case AH_V6_FLOW:
1195 case ESP_V6_FLOW:
1196 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
1197 sizeof(struct in6_addr));
1198 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
1199 sizeof(struct in6_addr));
1200 fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
1201 fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
1202 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
1203 sizeof(struct in6_addr));
1204 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
1205 sizeof(struct in6_addr));
1206 fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
1207 fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
1208 fltr->ip_ver = 6;
1209 break;
1210 case IPV6_USER_FLOW:
1211 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1212 sizeof(struct in6_addr));
1213 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1214 sizeof(struct in6_addr));
1215 fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1216 fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
1217 fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1218 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1219 sizeof(struct in6_addr));
1220 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1221 sizeof(struct in6_addr));
1222 fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1223 fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
1224 fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1225 fltr->ip_ver = 6;
1226 break;
1227 case ETHER_FLOW:
1228 fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
1229 fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
1230 break;
1231 default:
1232 /* not doing un-parsed flow types */
1233 return -EINVAL;
1234 }
1235
1236 err = iavf_validate_fdir_fltr_masks(adapter, fltr);
1237 if (err)
1238 return err;
1239
1240 if (iavf_fdir_is_dup_fltr(adapter, fltr))
1241 return -EEXIST;
1242
1243 err = iavf_parse_rx_flow_user_data(fsp, fltr);
1244 if (err)
1245 return err;
1246
1247 return iavf_fill_fdir_add_msg(adapter, fltr);
1248 }
1249
1250 /**
1251 * iavf_add_fdir_ethtool - add Flow Director filter
1252 * @adapter: pointer to the VF adapter structure
1253 * @cmd: command to add Flow Director filter
1254 *
1255 * Returns 0 on success and negative values for failure
1256 */
iavf_add_fdir_ethtool(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1257 static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1258 {
1259 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1260 struct iavf_fdir_fltr *fltr;
1261 int err;
1262
1263 netdev_assert_locked(adapter->netdev);
1264
1265 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1266 return -EOPNOTSUPP;
1267
1268 if (fsp->flow_type & FLOW_MAC_EXT)
1269 return -EINVAL;
1270
1271 spin_lock_bh(&adapter->fdir_fltr_lock);
1272 if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
1273 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
1274 spin_unlock_bh(&adapter->fdir_fltr_lock);
1275 return -EEXIST;
1276 }
1277 spin_unlock_bh(&adapter->fdir_fltr_lock);
1278
1279 fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1280 if (!fltr)
1281 return -ENOMEM;
1282
1283 err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
1284 if (!err)
1285 err = iavf_fdir_add_fltr(adapter, fltr);
1286
1287 if (err)
1288 kfree(fltr);
1289
1290 return err;
1291 }
1292
1293 /**
1294 * iavf_del_fdir_ethtool - delete Flow Director filter
1295 * @adapter: pointer to the VF adapter structure
1296 * @cmd: command to delete Flow Director filter
1297 *
1298 * Returns 0 on success and negative values for failure
1299 */
iavf_del_fdir_ethtool(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1300 static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1301 {
1302 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1303
1304 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1305 return -EOPNOTSUPP;
1306
1307 return iavf_fdir_del_fltr(adapter, false, fsp->location);
1308 }
1309
1310 /**
1311 * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
1312 * @cmd: ethtool rxnfc command
1313 *
1314 * This function parses the rxnfc command and returns intended
1315 * header types for RSS configuration
1316 */
iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc * cmd)1317 static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
1318 {
1319 u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
1320
1321 switch (cmd->flow_type) {
1322 case TCP_V4_FLOW:
1323 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1324 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1325 break;
1326 case UDP_V4_FLOW:
1327 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1328 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1329 break;
1330 case SCTP_V4_FLOW:
1331 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1332 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1333 break;
1334 case TCP_V6_FLOW:
1335 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1336 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1337 break;
1338 case UDP_V6_FLOW:
1339 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1340 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1341 break;
1342 case SCTP_V6_FLOW:
1343 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1344 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1345 break;
1346 default:
1347 break;
1348 }
1349
1350 return hdrs;
1351 }
1352
1353 /**
1354 * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
1355 * @cmd: ethtool rxnfc command
1356 * @symm: true if Symmetric Topelitz is set
1357 *
1358 * This function parses the rxnfc command and returns intended hash fields for
1359 * RSS configuration
1360 */
iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc * cmd,bool symm)1361 static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
1362 {
1363 u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
1364
1365 if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
1366 switch (cmd->flow_type) {
1367 case TCP_V4_FLOW:
1368 case UDP_V4_FLOW:
1369 case SCTP_V4_FLOW:
1370 if (cmd->data & RXH_IP_SRC)
1371 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
1372 if (cmd->data & RXH_IP_DST)
1373 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
1374 break;
1375 case TCP_V6_FLOW:
1376 case UDP_V6_FLOW:
1377 case SCTP_V6_FLOW:
1378 if (cmd->data & RXH_IP_SRC)
1379 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
1380 if (cmd->data & RXH_IP_DST)
1381 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
1382 break;
1383 default:
1384 break;
1385 }
1386 }
1387
1388 if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
1389 switch (cmd->flow_type) {
1390 case TCP_V4_FLOW:
1391 case TCP_V6_FLOW:
1392 if (cmd->data & RXH_L4_B_0_1)
1393 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
1394 if (cmd->data & RXH_L4_B_2_3)
1395 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
1396 break;
1397 case UDP_V4_FLOW:
1398 case UDP_V6_FLOW:
1399 if (cmd->data & RXH_L4_B_0_1)
1400 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
1401 if (cmd->data & RXH_L4_B_2_3)
1402 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
1403 break;
1404 case SCTP_V4_FLOW:
1405 case SCTP_V6_FLOW:
1406 if (cmd->data & RXH_L4_B_0_1)
1407 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
1408 if (cmd->data & RXH_L4_B_2_3)
1409 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
1410 break;
1411 default:
1412 break;
1413 }
1414 }
1415
1416 return hfld;
1417 }
1418
1419 /**
1420 * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
1421 * @adapter: pointer to the VF adapter structure
1422 * @cmd: ethtool rxnfc command
1423 *
1424 * Returns Success if the flow input set is supported.
1425 */
1426 static int
iavf_set_adv_rss_hash_opt(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1427 iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
1428 struct ethtool_rxnfc *cmd)
1429 {
1430 struct iavf_adv_rss *rss_old, *rss_new;
1431 bool rss_new_add = false;
1432 bool symm = false;
1433 u64 hash_flds;
1434 int err = 0;
1435 u32 hdrs;
1436
1437 netdev_assert_locked(adapter->netdev);
1438
1439 if (!ADV_RSS_SUPPORT(adapter))
1440 return -EOPNOTSUPP;
1441
1442 symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
1443
1444 hdrs = iavf_adv_rss_parse_hdrs(cmd);
1445 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1446 return -EINVAL;
1447
1448 hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
1449 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1450 return -EINVAL;
1451
1452 rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
1453 if (!rss_new)
1454 return -ENOMEM;
1455
1456 if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
1457 symm)) {
1458 kfree(rss_new);
1459 return -EINVAL;
1460 }
1461
1462 spin_lock_bh(&adapter->adv_rss_lock);
1463 rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1464 if (rss_old) {
1465 if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
1466 err = -EBUSY;
1467 } else if (rss_old->hash_flds != hash_flds ||
1468 rss_old->symm != symm) {
1469 rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
1470 rss_old->hash_flds = hash_flds;
1471 rss_old->symm = symm;
1472 memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
1473 sizeof(rss_new->cfg_msg));
1474 } else {
1475 err = -EEXIST;
1476 }
1477 } else {
1478 rss_new_add = true;
1479 rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
1480 rss_new->packet_hdrs = hdrs;
1481 rss_new->hash_flds = hash_flds;
1482 rss_new->symm = symm;
1483 list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
1484 }
1485 spin_unlock_bh(&adapter->adv_rss_lock);
1486
1487 if (!err)
1488 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
1489
1490 if (!rss_new_add)
1491 kfree(rss_new);
1492
1493 return err;
1494 }
1495
1496 /**
1497 * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
1498 * @adapter: pointer to the VF adapter structure
1499 * @cmd: ethtool rxnfc command
1500 *
1501 * Returns Success if the flow input set is supported.
1502 */
1503 static int
iavf_get_adv_rss_hash_opt(struct iavf_adapter * adapter,struct ethtool_rxnfc * cmd)1504 iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
1505 struct ethtool_rxnfc *cmd)
1506 {
1507 struct iavf_adv_rss *rss;
1508 u64 hash_flds;
1509 u32 hdrs;
1510
1511 if (!ADV_RSS_SUPPORT(adapter))
1512 return -EOPNOTSUPP;
1513
1514 cmd->data = 0;
1515
1516 hdrs = iavf_adv_rss_parse_hdrs(cmd);
1517 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1518 return -EINVAL;
1519
1520 spin_lock_bh(&adapter->adv_rss_lock);
1521 rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1522 if (rss)
1523 hash_flds = rss->hash_flds;
1524 else
1525 hash_flds = IAVF_ADV_RSS_HASH_INVALID;
1526 spin_unlock_bh(&adapter->adv_rss_lock);
1527
1528 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1529 return -EINVAL;
1530
1531 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
1532 IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
1533 cmd->data |= (u64)RXH_IP_SRC;
1534
1535 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
1536 IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
1537 cmd->data |= (u64)RXH_IP_DST;
1538
1539 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
1540 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
1541 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
1542 cmd->data |= (u64)RXH_L4_B_0_1;
1543
1544 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
1545 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
1546 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
1547 cmd->data |= (u64)RXH_L4_B_2_3;
1548
1549 return 0;
1550 }
1551
1552 /**
1553 * iavf_set_rxnfc - command to set Rx flow rules.
1554 * @netdev: network interface device structure
1555 * @cmd: ethtool rxnfc command
1556 *
1557 * Returns 0 for success and negative values for errors
1558 */
iavf_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1559 static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1560 {
1561 struct iavf_adapter *adapter = netdev_priv(netdev);
1562 int ret = -EOPNOTSUPP;
1563
1564 switch (cmd->cmd) {
1565 case ETHTOOL_SRXCLSRLINS:
1566 ret = iavf_add_fdir_ethtool(adapter, cmd);
1567 break;
1568 case ETHTOOL_SRXCLSRLDEL:
1569 ret = iavf_del_fdir_ethtool(adapter, cmd);
1570 break;
1571 case ETHTOOL_SRXFH:
1572 ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
1573 break;
1574 default:
1575 break;
1576 }
1577
1578 return ret;
1579 }
1580
1581 /**
1582 * iavf_get_rxnfc - command to get RX flow classification rules
1583 * @netdev: network interface device structure
1584 * @cmd: ethtool rxnfc command
1585 * @rule_locs: pointer to store rule locations
1586 *
1587 * Returns Success if the command is supported.
1588 **/
iavf_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1589 static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1590 u32 *rule_locs)
1591 {
1592 struct iavf_adapter *adapter = netdev_priv(netdev);
1593 int ret = -EOPNOTSUPP;
1594
1595 switch (cmd->cmd) {
1596 case ETHTOOL_GRXRINGS:
1597 cmd->data = adapter->num_active_queues;
1598 ret = 0;
1599 break;
1600 case ETHTOOL_GRXCLSRLCNT:
1601 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1602 break;
1603 spin_lock_bh(&adapter->fdir_fltr_lock);
1604 cmd->rule_cnt = adapter->fdir_active_fltr;
1605 spin_unlock_bh(&adapter->fdir_fltr_lock);
1606 cmd->data = IAVF_MAX_FDIR_FILTERS;
1607 ret = 0;
1608 break;
1609 case ETHTOOL_GRXCLSRULE:
1610 ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
1611 break;
1612 case ETHTOOL_GRXCLSRLALL:
1613 ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
1614 break;
1615 case ETHTOOL_GRXFH:
1616 ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
1617 break;
1618 default:
1619 break;
1620 }
1621
1622 return ret;
1623 }
1624 /**
1625 * iavf_get_channels: get the number of channels supported by the device
1626 * @netdev: network interface device structure
1627 * @ch: channel information structure
1628 *
1629 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
1630 * queue pair. Report one extra channel to match our "other" MSI-X vector.
1631 **/
iavf_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1632 static void iavf_get_channels(struct net_device *netdev,
1633 struct ethtool_channels *ch)
1634 {
1635 struct iavf_adapter *adapter = netdev_priv(netdev);
1636
1637 /* Report maximum channels */
1638 ch->max_combined = adapter->vsi_res->num_queue_pairs;
1639
1640 ch->max_other = NONQ_VECS;
1641 ch->other_count = NONQ_VECS;
1642
1643 ch->combined_count = adapter->num_active_queues;
1644 }
1645
1646 /**
1647 * iavf_set_channels: set the new channel count
1648 * @netdev: network interface device structure
1649 * @ch: channel information structure
1650 *
1651 * Negotiate a new number of channels with the PF then do a reset. During
1652 * reset we'll realloc queues and fix the RSS table. Returns 0 on success,
1653 * negative on failure.
1654 **/
iavf_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1655 static int iavf_set_channels(struct net_device *netdev,
1656 struct ethtool_channels *ch)
1657 {
1658 struct iavf_adapter *adapter = netdev_priv(netdev);
1659 u32 num_req = ch->combined_count;
1660 int ret = 0;
1661
1662 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1663 adapter->num_tc) {
1664 dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
1665 return -EINVAL;
1666 }
1667
1668 /* All of these should have already been checked by ethtool before this
1669 * even gets to us, but just to be sure.
1670 */
1671 if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
1672 return -EINVAL;
1673
1674 if (num_req == adapter->num_active_queues)
1675 return 0;
1676
1677 if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
1678 return -EINVAL;
1679
1680 adapter->num_req_queues = num_req;
1681 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1682 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
1683
1684 ret = iavf_wait_for_reset(adapter);
1685 if (ret)
1686 netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
1687
1688 return ret;
1689 }
1690
1691 /**
1692 * iavf_get_rxfh_key_size - get the RSS hash key size
1693 * @netdev: network interface device structure
1694 *
1695 * Returns the table size.
1696 **/
iavf_get_rxfh_key_size(struct net_device * netdev)1697 static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
1698 {
1699 struct iavf_adapter *adapter = netdev_priv(netdev);
1700
1701 return adapter->rss_key_size;
1702 }
1703
1704 /**
1705 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
1706 * @netdev: network interface device structure
1707 *
1708 * Returns the table size.
1709 **/
iavf_get_rxfh_indir_size(struct net_device * netdev)1710 static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
1711 {
1712 struct iavf_adapter *adapter = netdev_priv(netdev);
1713
1714 return adapter->rss_lut_size;
1715 }
1716
1717 /**
1718 * iavf_get_rxfh - get the rx flow hash indirection table
1719 * @netdev: network interface device structure
1720 * @rxfh: pointer to param struct (indir, key, hfunc)
1721 *
1722 * Reads the indirection table directly from the hardware. Always returns 0.
1723 **/
iavf_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1724 static int iavf_get_rxfh(struct net_device *netdev,
1725 struct ethtool_rxfh_param *rxfh)
1726 {
1727 struct iavf_adapter *adapter = netdev_priv(netdev);
1728 u16 i;
1729
1730 rxfh->hfunc = ETH_RSS_HASH_TOP;
1731 if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
1732 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
1733
1734 if (rxfh->key)
1735 memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
1736
1737 if (rxfh->indir)
1738 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
1739 for (i = 0; i < adapter->rss_lut_size; i++)
1740 rxfh->indir[i] = (u32)adapter->rss_lut[i];
1741
1742 return 0;
1743 }
1744
1745 /**
1746 * iavf_set_rxfh - set the rx flow hash indirection table
1747 * @netdev: network interface device structure
1748 * @rxfh: pointer to param struct (indir, key, hfunc)
1749 * @extack: extended ACK from the Netlink message
1750 *
1751 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
1752 * returns 0 after programming the table.
1753 **/
iavf_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1754 static int iavf_set_rxfh(struct net_device *netdev,
1755 struct ethtool_rxfh_param *rxfh,
1756 struct netlink_ext_ack *extack)
1757 {
1758 struct iavf_adapter *adapter = netdev_priv(netdev);
1759 u16 i;
1760
1761 /* Only support toeplitz hash function */
1762 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1763 rxfh->hfunc != ETH_RSS_HASH_TOP)
1764 return -EOPNOTSUPP;
1765
1766 if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
1767 adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
1768 if (!ADV_RSS_SUPPORT(adapter))
1769 return -EOPNOTSUPP;
1770 adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
1771 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
1772 } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
1773 adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
1774 adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
1775 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
1776 }
1777
1778 if (!rxfh->key && !rxfh->indir)
1779 return 0;
1780
1781 if (rxfh->key)
1782 memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
1783
1784 if (rxfh->indir) {
1785 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
1786 for (i = 0; i < adapter->rss_lut_size; i++)
1787 adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
1788 }
1789
1790 return iavf_config_rss(adapter);
1791 }
1792
1793 static const struct ethtool_ops iavf_ethtool_ops = {
1794 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1795 ETHTOOL_COALESCE_USE_ADAPTIVE,
1796 .supported_input_xfrm = RXH_XFRM_SYM_XOR,
1797 .get_drvinfo = iavf_get_drvinfo,
1798 .get_link = ethtool_op_get_link,
1799 .get_ringparam = iavf_get_ringparam,
1800 .set_ringparam = iavf_set_ringparam,
1801 .get_strings = iavf_get_strings,
1802 .get_ethtool_stats = iavf_get_ethtool_stats,
1803 .get_sset_count = iavf_get_sset_count,
1804 .get_msglevel = iavf_get_msglevel,
1805 .set_msglevel = iavf_set_msglevel,
1806 .get_coalesce = iavf_get_coalesce,
1807 .set_coalesce = iavf_set_coalesce,
1808 .get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1809 .set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1810 .set_rxnfc = iavf_set_rxnfc,
1811 .get_rxnfc = iavf_get_rxnfc,
1812 .get_rxfh_indir_size = iavf_get_rxfh_indir_size,
1813 .get_rxfh = iavf_get_rxfh,
1814 .set_rxfh = iavf_set_rxfh,
1815 .get_channels = iavf_get_channels,
1816 .set_channels = iavf_set_channels,
1817 .get_rxfh_key_size = iavf_get_rxfh_key_size,
1818 .get_link_ksettings = iavf_get_link_ksettings,
1819 };
1820
1821 /**
1822 * iavf_set_ethtool_ops - Initialize ethtool ops struct
1823 * @netdev: network interface device structure
1824 *
1825 * Sets ethtool ops struct in our netdev so that ethtool can call
1826 * our functions.
1827 **/
iavf_set_ethtool_ops(struct net_device * netdev)1828 void iavf_set_ethtool_ops(struct net_device *netdev)
1829 {
1830 netdev->ethtool_ops = &iavf_ethtool_ops;
1831 }
1832