1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/uaccess.h> 6 7 #include <net/netdev_lock.h> 8 9 /* ethtool support for iavf */ 10 #include "iavf.h" 11 12 /* ethtool statistics helpers */ 13 14 /** 15 * struct iavf_stats - definition for an ethtool statistic 16 * @stat_string: statistic name to display in ethtool -S output 17 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) 18 * @stat_offset: offsetof() the stat from a base pointer 19 * 20 * This structure defines a statistic to be added to the ethtool stats buffer. 21 * It defines a statistic as offset from a common base pointer. Stats should 22 * be defined in constant arrays using the IAVF_STAT macro, with every element 23 * of the array using the same _type for calculating the sizeof_stat and 24 * stat_offset. 25 * 26 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or 27 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from 28 * the iavf_add_ethtool_stat() helper function. 29 * 30 * The @stat_string is interpreted as a format string, allowing formatted 31 * values to be inserted while looping over multiple structures for a given 32 * statistics array. Thus, every statistic string in an array should have the 33 * same type and number of format specifiers, to be formatted by variadic 34 * arguments to the iavf_add_stat_string() helper function. 35 */ 36 struct iavf_stats { 37 char stat_string[ETH_GSTRING_LEN]; 38 int sizeof_stat; 39 int stat_offset; 40 }; 41 42 /* Helper macro to define an iavf_stat structure with proper size and type. 43 * Use this when defining constant statistics arrays. Note that @_type expects 44 * only a type name and is used multiple times. 45 */ 46 #define IAVF_STAT(_type, _name, _stat) { \ 47 .stat_string = _name, \ 48 .sizeof_stat = sizeof_field(_type, _stat), \ 49 .stat_offset = offsetof(_type, _stat) \ 50 } 51 52 /* Helper macro for defining some statistics related to queues */ 53 #define IAVF_QUEUE_STAT(_name, _stat) \ 54 IAVF_STAT(struct iavf_ring, _name, _stat) 55 56 /* Stats associated with a Tx or Rx ring */ 57 static const struct iavf_stats iavf_gstrings_queue_stats[] = { 58 IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), 59 IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), 60 }; 61 62 /** 63 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer 64 * @data: location to store the stat value 65 * @pointer: basis for where to copy from 66 * @stat: the stat definition 67 * 68 * Copies the stat data defined by the pointer and stat structure pair into 69 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and 70 * iavf_add_queue_stats. If the pointer is null, data will be zero'd. 71 */ 72 static void 73 iavf_add_one_ethtool_stat(u64 *data, void *pointer, 74 const struct iavf_stats *stat) 75 { 76 char *p; 77 78 if (!pointer) { 79 /* ensure that the ethtool data buffer is zero'd for any stats 80 * which don't have a valid pointer. 81 */ 82 *data = 0; 83 return; 84 } 85 86 p = (char *)pointer + stat->stat_offset; 87 switch (stat->sizeof_stat) { 88 case sizeof(u64): 89 *data = *((u64 *)p); 90 break; 91 case sizeof(u32): 92 *data = *((u32 *)p); 93 break; 94 case sizeof(u16): 95 *data = *((u16 *)p); 96 break; 97 case sizeof(u8): 98 *data = *((u8 *)p); 99 break; 100 default: 101 WARN_ONCE(1, "unexpected stat size for %s", 102 stat->stat_string); 103 *data = 0; 104 } 105 } 106 107 /** 108 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer 109 * @data: ethtool stats buffer 110 * @pointer: location to copy stats from 111 * @stats: array of stats to copy 112 * @size: the size of the stats definition 113 * 114 * Copy the stats defined by the stats array using the pointer as a base into 115 * the data buffer supplied by ethtool. Updates the data pointer to point to 116 * the next empty location for successive calls to __iavf_add_ethtool_stats. 117 * If pointer is null, set the data values to zero and update the pointer to 118 * skip these stats. 119 */ 120 static void 121 __iavf_add_ethtool_stats(u64 **data, void *pointer, 122 const struct iavf_stats stats[], 123 const unsigned int size) 124 { 125 unsigned int i; 126 127 for (i = 0; i < size; i++) 128 iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); 129 } 130 131 /** 132 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer 133 * @data: ethtool stats buffer 134 * @pointer: location where stats are stored 135 * @stats: static const array of stat definitions 136 * 137 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static 138 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by 139 * ensuring that we pass the size associated with the given stats array. 140 * 141 * The parameter @stats is evaluated twice, so parameters with side effects 142 * should be avoided. 143 */ 144 #define iavf_add_ethtool_stats(data, pointer, stats) \ 145 __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) 146 147 /** 148 * iavf_add_queue_stats - copy queue statistics into supplied buffer 149 * @data: ethtool stats buffer 150 * @ring: the ring to copy 151 * 152 * Queue statistics must be copied while protected by 153 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats. 154 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the 155 * ring pointer is null, zero out the queue stat values and update the data 156 * pointer. Otherwise safely copy the stats from the ring into the supplied 157 * buffer and update the data pointer when finished. 158 * 159 * This function expects to be called while under rcu_read_lock(). 160 */ 161 static void 162 iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) 163 { 164 const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); 165 const struct iavf_stats *stats = iavf_gstrings_queue_stats; 166 unsigned int start; 167 unsigned int i; 168 169 /* To avoid invalid statistics values, ensure that we keep retrying 170 * the copy until we get a consistent value according to 171 * u64_stats_fetch_retry. But first, make sure our ring is 172 * non-null before attempting to access its syncp. 173 */ 174 do { 175 start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); 176 for (i = 0; i < size; i++) 177 iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); 178 } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); 179 180 /* Once we successfully copy the stats in, update the data pointer */ 181 *data += size; 182 } 183 184 /** 185 * __iavf_add_stat_strings - copy stat strings into ethtool buffer 186 * @p: ethtool supplied buffer 187 * @stats: stat definitions array 188 * @size: size of the stats array 189 * 190 * Format and copy the strings described by stats into the buffer pointed at 191 * by p. 192 */ 193 static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], 194 const unsigned int size, ...) 195 { 196 unsigned int i; 197 198 for (i = 0; i < size; i++) { 199 va_list args; 200 201 va_start(args, size); 202 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); 203 *p += ETH_GSTRING_LEN; 204 va_end(args); 205 } 206 } 207 208 /** 209 * iavf_add_stat_strings - copy stat strings into ethtool buffer 210 * @p: ethtool supplied buffer 211 * @stats: stat definitions array 212 * 213 * Format and copy the strings described by the const static stats value into 214 * the buffer pointed at by p. 215 * 216 * The parameter @stats is evaluated twice, so parameters with side effects 217 * should be avoided. Additionally, stats must be an array such that 218 * ARRAY_SIZE can be called on it. 219 */ 220 #define iavf_add_stat_strings(p, stats, ...) \ 221 __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) 222 223 #define VF_STAT(_name, _stat) \ 224 IAVF_STAT(struct iavf_adapter, _name, _stat) 225 226 static const struct iavf_stats iavf_gstrings_stats[] = { 227 VF_STAT("rx_bytes", current_stats.rx_bytes), 228 VF_STAT("rx_unicast", current_stats.rx_unicast), 229 VF_STAT("rx_multicast", current_stats.rx_multicast), 230 VF_STAT("rx_broadcast", current_stats.rx_broadcast), 231 VF_STAT("rx_discards", current_stats.rx_discards), 232 VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), 233 VF_STAT("tx_bytes", current_stats.tx_bytes), 234 VF_STAT("tx_unicast", current_stats.tx_unicast), 235 VF_STAT("tx_multicast", current_stats.tx_multicast), 236 VF_STAT("tx_broadcast", current_stats.tx_broadcast), 237 VF_STAT("tx_discards", current_stats.tx_discards), 238 VF_STAT("tx_errors", current_stats.tx_errors), 239 }; 240 241 #define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) 242 243 #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) 244 245 /** 246 * iavf_get_link_ksettings - Get Link Speed and Duplex settings 247 * @netdev: network interface device structure 248 * @cmd: ethtool command 249 * 250 * Reports speed/duplex settings. Because this is a VF, we don't know what 251 * kind of link we really have, so we fake it. 252 */ 253 static int iavf_get_link_ksettings(struct net_device *netdev, 254 struct ethtool_link_ksettings *cmd) 255 { 256 struct iavf_adapter *adapter = netdev_priv(netdev); 257 258 ethtool_link_ksettings_zero_link_mode(cmd, supported); 259 cmd->base.autoneg = AUTONEG_DISABLE; 260 cmd->base.port = PORT_NONE; 261 cmd->base.duplex = DUPLEX_FULL; 262 263 if (ADV_LINK_SUPPORT(adapter)) { 264 if (adapter->link_speed_mbps && 265 adapter->link_speed_mbps < U32_MAX) 266 cmd->base.speed = adapter->link_speed_mbps; 267 else 268 cmd->base.speed = SPEED_UNKNOWN; 269 270 return 0; 271 } 272 273 switch (adapter->link_speed) { 274 case VIRTCHNL_LINK_SPEED_40GB: 275 cmd->base.speed = SPEED_40000; 276 break; 277 case VIRTCHNL_LINK_SPEED_25GB: 278 cmd->base.speed = SPEED_25000; 279 break; 280 case VIRTCHNL_LINK_SPEED_20GB: 281 cmd->base.speed = SPEED_20000; 282 break; 283 case VIRTCHNL_LINK_SPEED_10GB: 284 cmd->base.speed = SPEED_10000; 285 break; 286 case VIRTCHNL_LINK_SPEED_5GB: 287 cmd->base.speed = SPEED_5000; 288 break; 289 case VIRTCHNL_LINK_SPEED_2_5GB: 290 cmd->base.speed = SPEED_2500; 291 break; 292 case VIRTCHNL_LINK_SPEED_1GB: 293 cmd->base.speed = SPEED_1000; 294 break; 295 case VIRTCHNL_LINK_SPEED_100MB: 296 cmd->base.speed = SPEED_100; 297 break; 298 default: 299 break; 300 } 301 302 return 0; 303 } 304 305 /** 306 * iavf_get_sset_count - Get length of string set 307 * @netdev: network interface device structure 308 * @sset: id of string set 309 * 310 * Reports size of various string tables. 311 */ 312 static int iavf_get_sset_count(struct net_device *netdev, int sset) 313 { 314 /* Report the maximum number queues, even if not every queue is 315 * currently configured. Since allocation of queues is in pairs, 316 * use netdev->num_tx_queues * 2. The num_tx_queues is set at 317 * device creation and never changes. 318 */ 319 320 if (sset == ETH_SS_STATS) 321 return IAVF_STATS_LEN + 322 (IAVF_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues); 323 else 324 return -EINVAL; 325 } 326 327 /** 328 * iavf_get_ethtool_stats - report device statistics 329 * @netdev: network interface device structure 330 * @stats: ethtool statistics structure 331 * @data: pointer to data buffer 332 * 333 * All statistics are added to the data buffer as an array of u64. 334 */ 335 static void iavf_get_ethtool_stats(struct net_device *netdev, 336 struct ethtool_stats *stats, u64 *data) 337 { 338 struct iavf_adapter *adapter = netdev_priv(netdev); 339 unsigned int i; 340 341 /* Explicitly request stats refresh */ 342 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS); 343 344 iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); 345 346 rcu_read_lock(); 347 /* Use num_tx_queues to report stats for the maximum number of queues. 348 * Queues beyond num_active_queues will report zero. 349 */ 350 for (i = 0; i < netdev->num_tx_queues; i++) { 351 struct iavf_ring *tx_ring = NULL, *rx_ring = NULL; 352 353 if (i < adapter->num_active_queues) { 354 tx_ring = &adapter->tx_rings[i]; 355 rx_ring = &adapter->rx_rings[i]; 356 } 357 358 iavf_add_queue_stats(&data, tx_ring); 359 iavf_add_queue_stats(&data, rx_ring); 360 } 361 rcu_read_unlock(); 362 } 363 364 /** 365 * iavf_get_stat_strings - Get stat strings 366 * @netdev: network interface device structure 367 * @data: buffer for string data 368 * 369 * Builds the statistics string table 370 */ 371 static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) 372 { 373 unsigned int i; 374 375 iavf_add_stat_strings(&data, iavf_gstrings_stats); 376 377 /* Queues are always allocated in pairs, so we just use 378 * num_tx_queues for both Tx and Rx queues. 379 */ 380 for (i = 0; i < netdev->num_tx_queues; i++) { 381 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, 382 "tx", i); 383 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, 384 "rx", i); 385 } 386 } 387 388 /** 389 * iavf_get_strings - Get string set 390 * @netdev: network interface device structure 391 * @sset: id of string set 392 * @data: buffer for string data 393 * 394 * Builds string tables for various string sets 395 */ 396 static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 397 { 398 switch (sset) { 399 case ETH_SS_STATS: 400 iavf_get_stat_strings(netdev, data); 401 break; 402 default: 403 break; 404 } 405 } 406 407 /** 408 * iavf_get_msglevel - Get debug message level 409 * @netdev: network interface device structure 410 * 411 * Return: current debug message level. 412 */ 413 static u32 iavf_get_msglevel(struct net_device *netdev) 414 { 415 struct iavf_adapter *adapter = netdev_priv(netdev); 416 417 return adapter->msg_enable; 418 } 419 420 /** 421 * iavf_set_msglevel - Set debug message level 422 * @netdev: network interface device structure 423 * @data: message level 424 * 425 * Set current debug message level. Higher values cause the driver to 426 * be noisier. 427 */ 428 static void iavf_set_msglevel(struct net_device *netdev, u32 data) 429 { 430 struct iavf_adapter *adapter = netdev_priv(netdev); 431 432 if (IAVF_DEBUG_USER & data) 433 adapter->hw.debug_mask = data; 434 adapter->msg_enable = data; 435 } 436 437 /** 438 * iavf_get_drvinfo - Get driver info 439 * @netdev: network interface device structure 440 * @drvinfo: ethool driver info structure 441 * 442 * Fills @drvinfo with information about the driver and device. 443 */ 444 static void iavf_get_drvinfo(struct net_device *netdev, 445 struct ethtool_drvinfo *drvinfo) 446 { 447 struct iavf_adapter *adapter = netdev_priv(netdev); 448 449 strscpy(drvinfo->driver, iavf_driver_name, 32); 450 strscpy(drvinfo->fw_version, "N/A", 4); 451 strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 452 } 453 454 /** 455 * iavf_get_ringparam - Get ring parameters 456 * @netdev: network interface device structure 457 * @ring: ethtool ringparam structure 458 * @kernel_ring: ethtool extenal ringparam structure 459 * @extack: netlink extended ACK report struct 460 * 461 * Fills @ring with current ring parameters. TX and RX rings are reported 462 * separately, but the number of rings is not reported. 463 */ 464 static void iavf_get_ringparam(struct net_device *netdev, 465 struct ethtool_ringparam *ring, 466 struct kernel_ethtool_ringparam *kernel_ring, 467 struct netlink_ext_ack *extack) 468 { 469 struct iavf_adapter *adapter = netdev_priv(netdev); 470 471 ring->rx_max_pending = IAVF_MAX_RXD; 472 ring->tx_max_pending = IAVF_MAX_TXD; 473 ring->rx_pending = adapter->rx_desc_count; 474 ring->tx_pending = adapter->tx_desc_count; 475 } 476 477 /** 478 * iavf_set_ringparam - Set ring parameters 479 * @netdev: network interface device structure 480 * @ring: ethtool ringparam structure 481 * @kernel_ring: ethtool external ringparam structure 482 * @extack: netlink extended ACK report struct 483 * 484 * Sets ring parameters. TX and RX rings are controlled separately, but the 485 * number of rings is not specified, so all rings get the same settings. 486 */ 487 static int iavf_set_ringparam(struct net_device *netdev, 488 struct ethtool_ringparam *ring, 489 struct kernel_ethtool_ringparam *kernel_ring, 490 struct netlink_ext_ack *extack) 491 { 492 struct iavf_adapter *adapter = netdev_priv(netdev); 493 u32 new_rx_count, new_tx_count; 494 495 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 496 return -EINVAL; 497 498 if (ring->tx_pending > IAVF_MAX_TXD || 499 ring->tx_pending < IAVF_MIN_TXD || 500 ring->rx_pending > IAVF_MAX_RXD || 501 ring->rx_pending < IAVF_MIN_RXD) { 502 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 503 ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, 504 IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); 505 return -EINVAL; 506 } 507 508 new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); 509 if (new_tx_count != ring->tx_pending) 510 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", 511 new_tx_count); 512 513 new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); 514 if (new_rx_count != ring->rx_pending) 515 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", 516 new_rx_count); 517 518 /* if nothing to do return success */ 519 if ((new_tx_count == adapter->tx_desc_count) && 520 (new_rx_count == adapter->rx_desc_count)) { 521 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); 522 return 0; 523 } 524 525 if (new_tx_count != adapter->tx_desc_count) { 526 netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", 527 adapter->tx_desc_count, new_tx_count); 528 adapter->tx_desc_count = new_tx_count; 529 } 530 531 if (new_rx_count != adapter->rx_desc_count) { 532 netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", 533 adapter->rx_desc_count, new_rx_count); 534 adapter->rx_desc_count = new_rx_count; 535 } 536 537 if (netif_running(netdev)) { 538 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 539 iavf_reset_step(adapter); 540 } 541 542 return 0; 543 } 544 545 /** 546 * __iavf_get_coalesce - get per-queue coalesce settings 547 * @netdev: the netdev to check 548 * @ec: ethtool coalesce data structure 549 * @queue: which queue to pick 550 * 551 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs 552 * are per queue. If queue is <0 then we default to queue 0 as the 553 * representative value. 554 */ 555 static int __iavf_get_coalesce(struct net_device *netdev, 556 struct ethtool_coalesce *ec, int queue) 557 { 558 struct iavf_adapter *adapter = netdev_priv(netdev); 559 struct iavf_ring *rx_ring, *tx_ring; 560 561 /* Rx and Tx usecs per queue value. If user doesn't specify the 562 * queue, return queue 0's value to represent. 563 */ 564 if (queue < 0) 565 queue = 0; 566 else if (queue >= adapter->num_active_queues) 567 return -EINVAL; 568 569 rx_ring = &adapter->rx_rings[queue]; 570 tx_ring = &adapter->tx_rings[queue]; 571 572 if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) 573 ec->use_adaptive_rx_coalesce = 1; 574 575 if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) 576 ec->use_adaptive_tx_coalesce = 1; 577 578 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 579 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 580 581 return 0; 582 } 583 584 /** 585 * iavf_get_coalesce - Get interrupt coalescing settings 586 * @netdev: network interface device structure 587 * @ec: ethtool coalesce structure 588 * @kernel_coal: ethtool CQE mode setting structure 589 * @extack: extack for reporting error messages 590 * 591 * Fills @ec with current coalescing settings. This is referred to elsewhere 592 * in the driver as Interrupt Throttle Rate, as this is how the hardware 593 * describes this functionality. Note that if per-queue settings have been 594 * modified this only represents the settings of queue 0. 595 */ 596 static int iavf_get_coalesce(struct net_device *netdev, 597 struct ethtool_coalesce *ec, 598 struct kernel_ethtool_coalesce *kernel_coal, 599 struct netlink_ext_ack *extack) 600 { 601 return __iavf_get_coalesce(netdev, ec, -1); 602 } 603 604 /** 605 * iavf_get_per_queue_coalesce - get coalesce values for specific queue 606 * @netdev: netdev to read 607 * @ec: coalesce settings from ethtool 608 * @queue: the queue to read 609 * 610 * Read specific queue's coalesce settings. 611 */ 612 static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, 613 struct ethtool_coalesce *ec) 614 { 615 return __iavf_get_coalesce(netdev, ec, queue); 616 } 617 618 /** 619 * iavf_set_itr_per_queue - set ITR values for specific queue 620 * @adapter: the VF adapter struct to set values for 621 * @ec: coalesce settings from ethtool 622 * @queue: the queue to modify 623 * 624 * Change the ITR settings for a specific queue. 625 */ 626 static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, 627 struct ethtool_coalesce *ec, int queue) 628 { 629 struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; 630 struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; 631 struct iavf_q_vector *q_vector; 632 u16 itr_setting; 633 634 itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 635 636 if (ec->rx_coalesce_usecs != itr_setting && 637 ec->use_adaptive_rx_coalesce) { 638 netif_info(adapter, drv, adapter->netdev, 639 "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); 640 return -EINVAL; 641 } 642 643 itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 644 645 if (ec->tx_coalesce_usecs != itr_setting && 646 ec->use_adaptive_tx_coalesce) { 647 netif_info(adapter, drv, adapter->netdev, 648 "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); 649 return -EINVAL; 650 } 651 652 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); 653 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); 654 655 rx_ring->itr_setting |= IAVF_ITR_DYNAMIC; 656 if (!ec->use_adaptive_rx_coalesce) 657 rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; 658 659 tx_ring->itr_setting |= IAVF_ITR_DYNAMIC; 660 if (!ec->use_adaptive_tx_coalesce) 661 tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; 662 663 q_vector = rx_ring->q_vector; 664 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 665 666 q_vector = tx_ring->q_vector; 667 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 668 669 /* The interrupt handler itself will take care of programming 670 * the Tx and Rx ITR values based on the values we have entered 671 * into the q_vector, no need to write the values now. 672 */ 673 return 0; 674 } 675 676 /** 677 * __iavf_set_coalesce - set coalesce settings for particular queue 678 * @netdev: the netdev to change 679 * @ec: ethtool coalesce settings 680 * @queue: the queue to change 681 * 682 * Sets the coalesce settings for a particular queue. 683 */ 684 static int __iavf_set_coalesce(struct net_device *netdev, 685 struct ethtool_coalesce *ec, int queue) 686 { 687 struct iavf_adapter *adapter = netdev_priv(netdev); 688 int i; 689 690 if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) { 691 netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); 692 return -EINVAL; 693 } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) { 694 netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); 695 return -EINVAL; 696 } 697 698 /* Rx and Tx usecs has per queue value. If user doesn't specify the 699 * queue, apply to all queues. 700 */ 701 if (queue < 0) { 702 for (i = 0; i < adapter->num_active_queues; i++) 703 if (iavf_set_itr_per_queue(adapter, ec, i)) 704 return -EINVAL; 705 } else if (queue < adapter->num_active_queues) { 706 if (iavf_set_itr_per_queue(adapter, ec, queue)) 707 return -EINVAL; 708 } else { 709 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", 710 adapter->num_active_queues - 1); 711 return -EINVAL; 712 } 713 714 return 0; 715 } 716 717 /** 718 * iavf_set_coalesce - Set interrupt coalescing settings 719 * @netdev: network interface device structure 720 * @ec: ethtool coalesce structure 721 * @kernel_coal: ethtool CQE mode setting structure 722 * @extack: extack for reporting error messages 723 * 724 * Change current coalescing settings for every queue. 725 */ 726 static int iavf_set_coalesce(struct net_device *netdev, 727 struct ethtool_coalesce *ec, 728 struct kernel_ethtool_coalesce *kernel_coal, 729 struct netlink_ext_ack *extack) 730 { 731 return __iavf_set_coalesce(netdev, ec, -1); 732 } 733 734 /** 735 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings 736 * @netdev: the netdev to change 737 * @ec: ethtool's coalesce settings 738 * @queue: the queue to modify 739 * 740 * Modifies a specific queue's coalesce settings. 741 */ 742 static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, 743 struct ethtool_coalesce *ec) 744 { 745 return __iavf_set_coalesce(netdev, ec, queue); 746 } 747 748 /** 749 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool 750 * flow type values 751 * @flow: filter type to be converted 752 * 753 * Returns the corresponding ethtool flow type. 754 */ 755 static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) 756 { 757 switch (flow) { 758 case IAVF_FDIR_FLOW_IPV4_TCP: 759 return TCP_V4_FLOW; 760 case IAVF_FDIR_FLOW_IPV4_UDP: 761 return UDP_V4_FLOW; 762 case IAVF_FDIR_FLOW_IPV4_SCTP: 763 return SCTP_V4_FLOW; 764 case IAVF_FDIR_FLOW_IPV4_AH: 765 return AH_V4_FLOW; 766 case IAVF_FDIR_FLOW_IPV4_ESP: 767 return ESP_V4_FLOW; 768 case IAVF_FDIR_FLOW_IPV4_OTHER: 769 return IPV4_USER_FLOW; 770 case IAVF_FDIR_FLOW_IPV6_TCP: 771 return TCP_V6_FLOW; 772 case IAVF_FDIR_FLOW_IPV6_UDP: 773 return UDP_V6_FLOW; 774 case IAVF_FDIR_FLOW_IPV6_SCTP: 775 return SCTP_V6_FLOW; 776 case IAVF_FDIR_FLOW_IPV6_AH: 777 return AH_V6_FLOW; 778 case IAVF_FDIR_FLOW_IPV6_ESP: 779 return ESP_V6_FLOW; 780 case IAVF_FDIR_FLOW_IPV6_OTHER: 781 return IPV6_USER_FLOW; 782 case IAVF_FDIR_FLOW_NON_IP_L2: 783 return ETHER_FLOW; 784 default: 785 /* 0 is undefined ethtool flow */ 786 return 0; 787 } 788 } 789 790 /** 791 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 792 * @eth: Ethtool flow type to be converted 793 * 794 * Returns flow enum 795 */ 796 static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) 797 { 798 switch (eth) { 799 case TCP_V4_FLOW: 800 return IAVF_FDIR_FLOW_IPV4_TCP; 801 case UDP_V4_FLOW: 802 return IAVF_FDIR_FLOW_IPV4_UDP; 803 case SCTP_V4_FLOW: 804 return IAVF_FDIR_FLOW_IPV4_SCTP; 805 case AH_V4_FLOW: 806 return IAVF_FDIR_FLOW_IPV4_AH; 807 case ESP_V4_FLOW: 808 return IAVF_FDIR_FLOW_IPV4_ESP; 809 case IPV4_USER_FLOW: 810 return IAVF_FDIR_FLOW_IPV4_OTHER; 811 case TCP_V6_FLOW: 812 return IAVF_FDIR_FLOW_IPV6_TCP; 813 case UDP_V6_FLOW: 814 return IAVF_FDIR_FLOW_IPV6_UDP; 815 case SCTP_V6_FLOW: 816 return IAVF_FDIR_FLOW_IPV6_SCTP; 817 case AH_V6_FLOW: 818 return IAVF_FDIR_FLOW_IPV6_AH; 819 case ESP_V6_FLOW: 820 return IAVF_FDIR_FLOW_IPV6_ESP; 821 case IPV6_USER_FLOW: 822 return IAVF_FDIR_FLOW_IPV6_OTHER; 823 case ETHER_FLOW: 824 return IAVF_FDIR_FLOW_NON_IP_L2; 825 default: 826 return IAVF_FDIR_FLOW_NONE; 827 } 828 } 829 830 /** 831 * iavf_is_mask_valid - check mask field set 832 * @mask: full mask to check 833 * @field: field for which mask should be valid 834 * 835 * If the mask is fully set return true. If it is not valid for field return 836 * false. 837 */ 838 static bool iavf_is_mask_valid(u64 mask, u64 field) 839 { 840 return (mask & field) == field; 841 } 842 843 /** 844 * iavf_parse_rx_flow_user_data - deconstruct user-defined data 845 * @fsp: pointer to ethtool Rx flow specification 846 * @fltr: pointer to Flow Director filter for userdef data storage 847 * 848 * Returns 0 on success, negative error value on failure 849 */ 850 static int 851 iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 852 struct iavf_fdir_fltr *fltr) 853 { 854 struct iavf_flex_word *flex; 855 int i, cnt = 0; 856 857 if (!(fsp->flow_type & FLOW_EXT)) 858 return 0; 859 860 for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) { 861 #define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0) 862 #define IAVF_USERDEF_FLEX_OFFS_S 16 863 #define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S) 864 #define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) 865 u32 value = be32_to_cpu(fsp->h_ext.data[i]); 866 u32 mask = be32_to_cpu(fsp->m_ext.data[i]); 867 868 if (!value || !mask) 869 continue; 870 871 if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M)) 872 return -EINVAL; 873 874 /* 504 is the maximum value for offsets, and offset is measured 875 * from the start of the MAC address. 876 */ 877 #define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 878 flex = &fltr->flex_words[cnt++]; 879 flex->word = value & IAVF_USERDEF_FLEX_WORD_M; 880 flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value); 881 if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) 882 return -EINVAL; 883 } 884 885 fltr->flex_cnt = cnt; 886 887 return 0; 888 } 889 890 /** 891 * iavf_fill_rx_flow_ext_data - fill the additional data 892 * @fsp: pointer to ethtool Rx flow specification 893 * @fltr: pointer to Flow Director filter to get additional data 894 */ 895 static void 896 iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, 897 struct iavf_fdir_fltr *fltr) 898 { 899 if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) 900 return; 901 902 fsp->flow_type |= FLOW_EXT; 903 904 memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); 905 memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); 906 } 907 908 /** 909 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data 910 * @adapter: the VF adapter structure that contains filter list 911 * @cmd: ethtool command data structure to receive the filter data 912 * 913 * Returns 0 as expected for success by ethtool 914 */ 915 static int 916 iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, 917 struct ethtool_rxnfc *cmd) 918 { 919 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 920 struct iavf_fdir_fltr *rule = NULL; 921 int ret = 0; 922 923 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 924 return -EOPNOTSUPP; 925 926 spin_lock_bh(&adapter->fdir_fltr_lock); 927 928 rule = iavf_find_fdir_fltr(adapter, false, fsp->location); 929 if (!rule) { 930 ret = -EINVAL; 931 goto release_lock; 932 } 933 934 fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); 935 936 memset(&fsp->m_u, 0, sizeof(fsp->m_u)); 937 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); 938 939 switch (fsp->flow_type) { 940 case TCP_V4_FLOW: 941 case UDP_V4_FLOW: 942 case SCTP_V4_FLOW: 943 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; 944 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; 945 fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; 946 fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; 947 fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; 948 fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; 949 fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; 950 fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; 951 fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; 952 fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; 953 break; 954 case AH_V4_FLOW: 955 case ESP_V4_FLOW: 956 fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; 957 fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; 958 fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; 959 fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; 960 fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; 961 fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; 962 fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; 963 fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; 964 break; 965 case IPV4_USER_FLOW: 966 fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; 967 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; 968 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; 969 fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; 970 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 971 fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; 972 fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; 973 fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; 974 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; 975 fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; 976 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; 977 fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; 978 break; 979 case TCP_V6_FLOW: 980 case UDP_V6_FLOW: 981 case SCTP_V6_FLOW: 982 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, 983 sizeof(struct in6_addr)); 984 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, 985 sizeof(struct in6_addr)); 986 fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; 987 fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; 988 fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; 989 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, 990 sizeof(struct in6_addr)); 991 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, 992 sizeof(struct in6_addr)); 993 fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; 994 fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; 995 fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; 996 break; 997 case AH_V6_FLOW: 998 case ESP_V6_FLOW: 999 memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, 1000 sizeof(struct in6_addr)); 1001 memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, 1002 sizeof(struct in6_addr)); 1003 fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; 1004 fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; 1005 memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, 1006 sizeof(struct in6_addr)); 1007 memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, 1008 sizeof(struct in6_addr)); 1009 fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; 1010 fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; 1011 break; 1012 case IPV6_USER_FLOW: 1013 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, 1014 sizeof(struct in6_addr)); 1015 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, 1016 sizeof(struct in6_addr)); 1017 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; 1018 fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; 1019 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; 1020 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, 1021 sizeof(struct in6_addr)); 1022 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, 1023 sizeof(struct in6_addr)); 1024 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; 1025 fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; 1026 fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; 1027 break; 1028 case ETHER_FLOW: 1029 fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; 1030 fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; 1031 break; 1032 default: 1033 ret = -EINVAL; 1034 break; 1035 } 1036 1037 iavf_fill_rx_flow_ext_data(fsp, rule); 1038 1039 if (rule->action == VIRTCHNL_ACTION_DROP) 1040 fsp->ring_cookie = RX_CLS_FLOW_DISC; 1041 else 1042 fsp->ring_cookie = rule->q_index; 1043 1044 release_lock: 1045 spin_unlock_bh(&adapter->fdir_fltr_lock); 1046 return ret; 1047 } 1048 1049 /** 1050 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters 1051 * @adapter: the VF adapter structure containing the filter list 1052 * @cmd: ethtool command data structure 1053 * @rule_locs: ethtool array passed in from OS to receive filter IDs 1054 * 1055 * Returns 0 as expected for success by ethtool 1056 */ 1057 static int 1058 iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, 1059 u32 *rule_locs) 1060 { 1061 struct iavf_fdir_fltr *fltr; 1062 unsigned int cnt = 0; 1063 int val = 0; 1064 1065 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1066 return -EOPNOTSUPP; 1067 1068 cmd->data = IAVF_MAX_FDIR_FILTERS; 1069 1070 spin_lock_bh(&adapter->fdir_fltr_lock); 1071 1072 list_for_each_entry(fltr, &adapter->fdir_list_head, list) { 1073 if (iavf_is_raw_fdir(fltr)) 1074 continue; 1075 1076 if (cnt == cmd->rule_cnt) { 1077 val = -EMSGSIZE; 1078 goto release_lock; 1079 } 1080 rule_locs[cnt] = fltr->loc; 1081 cnt++; 1082 } 1083 1084 release_lock: 1085 spin_unlock_bh(&adapter->fdir_fltr_lock); 1086 if (!val) 1087 cmd->rule_cnt = cnt; 1088 1089 return val; 1090 } 1091 1092 /** 1093 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter 1094 * @adapter: pointer to the VF adapter structure 1095 * @fsp: pointer to ethtool Rx flow specification 1096 * @fltr: filter structure 1097 */ 1098 static int 1099 iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, 1100 struct iavf_fdir_fltr *fltr) 1101 { 1102 u32 flow_type, q_index = 0; 1103 enum virtchnl_action act; 1104 int err; 1105 1106 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 1107 act = VIRTCHNL_ACTION_DROP; 1108 } else { 1109 q_index = fsp->ring_cookie; 1110 if (q_index >= adapter->num_active_queues) 1111 return -EINVAL; 1112 1113 act = VIRTCHNL_ACTION_QUEUE; 1114 } 1115 1116 fltr->action = act; 1117 fltr->loc = fsp->location; 1118 fltr->q_index = q_index; 1119 1120 if (fsp->flow_type & FLOW_EXT) { 1121 memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, 1122 sizeof(fltr->ext_data.usr_def)); 1123 memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, 1124 sizeof(fltr->ext_mask.usr_def)); 1125 } 1126 1127 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); 1128 fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); 1129 1130 switch (flow_type) { 1131 case TCP_V4_FLOW: 1132 case UDP_V4_FLOW: 1133 case SCTP_V4_FLOW: 1134 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; 1135 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 1136 fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; 1137 fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1138 fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; 1139 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; 1140 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; 1141 fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; 1142 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 1143 fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; 1144 fltr->ip_ver = 4; 1145 break; 1146 case AH_V4_FLOW: 1147 case ESP_V4_FLOW: 1148 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; 1149 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; 1150 fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; 1151 fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; 1152 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; 1153 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; 1154 fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; 1155 fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; 1156 fltr->ip_ver = 4; 1157 break; 1158 case IPV4_USER_FLOW: 1159 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; 1160 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; 1161 fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; 1162 fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; 1163 fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; 1164 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; 1165 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; 1166 fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; 1167 fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; 1168 fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; 1169 fltr->ip_ver = 4; 1170 break; 1171 case TCP_V6_FLOW: 1172 case UDP_V6_FLOW: 1173 case SCTP_V6_FLOW: 1174 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1175 sizeof(struct in6_addr)); 1176 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1177 sizeof(struct in6_addr)); 1178 fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; 1179 fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; 1180 fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; 1181 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1182 sizeof(struct in6_addr)); 1183 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1184 sizeof(struct in6_addr)); 1185 fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; 1186 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; 1187 fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; 1188 fltr->ip_ver = 6; 1189 break; 1190 case AH_V6_FLOW: 1191 case ESP_V6_FLOW: 1192 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, 1193 sizeof(struct in6_addr)); 1194 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, 1195 sizeof(struct in6_addr)); 1196 fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; 1197 fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; 1198 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, 1199 sizeof(struct in6_addr)); 1200 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, 1201 sizeof(struct in6_addr)); 1202 fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; 1203 fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; 1204 fltr->ip_ver = 6; 1205 break; 1206 case IPV6_USER_FLOW: 1207 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1208 sizeof(struct in6_addr)); 1209 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1210 sizeof(struct in6_addr)); 1211 fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; 1212 fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; 1213 fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; 1214 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1215 sizeof(struct in6_addr)); 1216 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1217 sizeof(struct in6_addr)); 1218 fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; 1219 fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; 1220 fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; 1221 fltr->ip_ver = 6; 1222 break; 1223 case ETHER_FLOW: 1224 fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; 1225 fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; 1226 break; 1227 default: 1228 /* not doing un-parsed flow types */ 1229 return -EINVAL; 1230 } 1231 1232 err = iavf_validate_fdir_fltr_masks(adapter, fltr); 1233 if (err) 1234 return err; 1235 1236 if (iavf_fdir_is_dup_fltr(adapter, fltr)) 1237 return -EEXIST; 1238 1239 err = iavf_parse_rx_flow_user_data(fsp, fltr); 1240 if (err) 1241 return err; 1242 1243 return iavf_fill_fdir_add_msg(adapter, fltr); 1244 } 1245 1246 /** 1247 * iavf_add_fdir_ethtool - add Flow Director filter 1248 * @adapter: pointer to the VF adapter structure 1249 * @cmd: command to add Flow Director filter 1250 * 1251 * Returns 0 on success and negative values for failure 1252 */ 1253 static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) 1254 { 1255 struct ethtool_rx_flow_spec *fsp = &cmd->fs; 1256 struct iavf_fdir_fltr *fltr; 1257 int err; 1258 1259 netdev_assert_locked(adapter->netdev); 1260 1261 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1262 return -EOPNOTSUPP; 1263 1264 if (fsp->flow_type & FLOW_MAC_EXT) 1265 return -EINVAL; 1266 1267 spin_lock_bh(&adapter->fdir_fltr_lock); 1268 if (iavf_find_fdir_fltr(adapter, false, fsp->location)) { 1269 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); 1270 spin_unlock_bh(&adapter->fdir_fltr_lock); 1271 return -EEXIST; 1272 } 1273 spin_unlock_bh(&adapter->fdir_fltr_lock); 1274 1275 fltr = kzalloc_obj(*fltr); 1276 if (!fltr) 1277 return -ENOMEM; 1278 1279 err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); 1280 if (!err) 1281 err = iavf_fdir_add_fltr(adapter, fltr); 1282 1283 if (err) 1284 kfree(fltr); 1285 1286 return err; 1287 } 1288 1289 /** 1290 * iavf_del_fdir_ethtool - delete Flow Director filter 1291 * @adapter: pointer to the VF adapter structure 1292 * @cmd: command to delete Flow Director filter 1293 * 1294 * Returns 0 on success and negative values for failure 1295 */ 1296 static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) 1297 { 1298 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1299 1300 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1301 return -EOPNOTSUPP; 1302 1303 return iavf_fdir_del_fltr(adapter, false, fsp->location); 1304 } 1305 1306 static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd) 1307 { 1308 u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE; 1309 1310 switch (cmd->flow_type) { 1311 case TCP_V4_FLOW: 1312 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | 1313 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1314 break; 1315 case UDP_V4_FLOW: 1316 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1317 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1318 break; 1319 case SCTP_V4_FLOW: 1320 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | 1321 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1322 break; 1323 case TCP_V6_FLOW: 1324 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | 1325 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1326 break; 1327 case UDP_V6_FLOW: 1328 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1329 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1330 break; 1331 case SCTP_V6_FLOW: 1332 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | 1333 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1334 break; 1335 case GTPU_V4_FLOW: 1336 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | 1337 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1338 break; 1339 case GTPC_V4_FLOW: 1340 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | 1341 IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1342 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1343 break; 1344 case GTPC_TEID_V4_FLOW: 1345 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | 1346 IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1347 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1348 break; 1349 case GTPU_EH_V4_FLOW: 1350 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | 1351 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1352 break; 1353 case GTPU_UL_V4_FLOW: 1354 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | 1355 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1356 break; 1357 case GTPU_DL_V4_FLOW: 1358 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | 1359 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1360 break; 1361 case GTPU_V6_FLOW: 1362 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_IP | 1363 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1364 break; 1365 case GTPC_V6_FLOW: 1366 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC | 1367 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1368 break; 1369 case GTPC_TEID_V6_FLOW: 1370 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPC_TEID | 1371 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1372 break; 1373 case GTPU_EH_V6_FLOW: 1374 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_EH | 1375 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1376 break; 1377 case GTPU_UL_V6_FLOW: 1378 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_UP | 1379 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1380 break; 1381 case GTPU_DL_V6_FLOW: 1382 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_GTPU_DWN | 1383 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1384 break; 1385 default: 1386 break; 1387 } 1388 1389 return hdrs; 1390 } 1391 1392 static u64 1393 iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) 1394 { 1395 u64 hfld = IAVF_ADV_RSS_HASH_INVALID; 1396 1397 if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) { 1398 switch (cmd->flow_type) { 1399 case TCP_V4_FLOW: 1400 case UDP_V4_FLOW: 1401 case SCTP_V4_FLOW: 1402 case GTPU_V4_FLOW: 1403 case GTPC_V4_FLOW: 1404 case GTPC_TEID_V4_FLOW: 1405 case GTPU_EH_V4_FLOW: 1406 case GTPU_UL_V4_FLOW: 1407 case GTPU_DL_V4_FLOW: 1408 if (cmd->data & RXH_IP_SRC) 1409 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; 1410 if (cmd->data & RXH_IP_DST) 1411 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA; 1412 break; 1413 case TCP_V6_FLOW: 1414 case UDP_V6_FLOW: 1415 case SCTP_V6_FLOW: 1416 case GTPU_V6_FLOW: 1417 case GTPC_V6_FLOW: 1418 case GTPC_TEID_V6_FLOW: 1419 case GTPU_EH_V6_FLOW: 1420 case GTPU_UL_V6_FLOW: 1421 case GTPU_DL_V6_FLOW: 1422 if (cmd->data & RXH_IP_SRC) 1423 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; 1424 if (cmd->data & RXH_IP_DST) 1425 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA; 1426 break; 1427 default: 1428 break; 1429 } 1430 } 1431 1432 if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) { 1433 switch (cmd->flow_type) { 1434 case TCP_V4_FLOW: 1435 case TCP_V6_FLOW: 1436 if (cmd->data & RXH_L4_B_0_1) 1437 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT; 1438 if (cmd->data & RXH_L4_B_2_3) 1439 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT; 1440 break; 1441 case UDP_V4_FLOW: 1442 case UDP_V6_FLOW: 1443 case GTPC_V4_FLOW: 1444 if (cmd->data & RXH_L4_B_0_1) 1445 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; 1446 if (cmd->data & RXH_L4_B_2_3) 1447 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT; 1448 break; 1449 case SCTP_V4_FLOW: 1450 case SCTP_V6_FLOW: 1451 if (cmd->data & RXH_L4_B_0_1) 1452 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT; 1453 if (cmd->data & RXH_L4_B_2_3) 1454 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT; 1455 break; 1456 default: 1457 break; 1458 } 1459 } 1460 if (cmd->data & RXH_GTP_TEID) { 1461 switch (cmd->flow_type) { 1462 case GTPC_TEID_V4_FLOW: 1463 case GTPC_TEID_V6_FLOW: 1464 hfld |= IAVF_ADV_RSS_HASH_FLD_GTPC_TEID; 1465 break; 1466 case GTPU_V4_FLOW: 1467 case GTPU_V6_FLOW: 1468 hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_IP_TEID; 1469 break; 1470 case GTPU_EH_V4_FLOW: 1471 case GTPU_EH_V6_FLOW: 1472 hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_EH_TEID; 1473 break; 1474 case GTPU_UL_V4_FLOW: 1475 case GTPU_UL_V6_FLOW: 1476 hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_UP_TEID; 1477 break; 1478 case GTPU_DL_V4_FLOW: 1479 case GTPU_DL_V6_FLOW: 1480 hfld |= IAVF_ADV_RSS_HASH_FLD_GTPU_DWN_TEID; 1481 break; 1482 default: 1483 break; 1484 } 1485 } 1486 1487 return hfld; 1488 } 1489 1490 static int 1491 iavf_set_rxfh_fields(struct net_device *netdev, 1492 const struct ethtool_rxfh_fields *cmd, 1493 struct netlink_ext_ack *extack) 1494 { 1495 struct iavf_adapter *adapter = netdev_priv(netdev); 1496 struct iavf_adv_rss *rss_old, *rss_new; 1497 bool rss_new_add = false; 1498 bool symm = false; 1499 u64 hash_flds; 1500 int err = 0; 1501 u32 hdrs; 1502 1503 netdev_assert_locked(adapter->netdev); 1504 1505 if (!ADV_RSS_SUPPORT(adapter)) 1506 return -EOPNOTSUPP; 1507 1508 symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC); 1509 1510 hdrs = iavf_adv_rss_parse_hdrs(cmd); 1511 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) 1512 return -EINVAL; 1513 1514 hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm); 1515 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) 1516 return -EINVAL; 1517 1518 rss_new = kzalloc_obj(*rss_new); 1519 if (!rss_new) 1520 return -ENOMEM; 1521 1522 if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds, 1523 symm)) { 1524 kfree(rss_new); 1525 return -EINVAL; 1526 } 1527 1528 spin_lock_bh(&adapter->adv_rss_lock); 1529 rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); 1530 if (rss_old) { 1531 if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { 1532 err = -EBUSY; 1533 } else if (rss_old->hash_flds != hash_flds || 1534 rss_old->symm != symm) { 1535 rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; 1536 rss_old->hash_flds = hash_flds; 1537 rss_old->symm = symm; 1538 memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, 1539 sizeof(rss_new->cfg_msg)); 1540 } else { 1541 err = -EEXIST; 1542 } 1543 } else { 1544 rss_new_add = true; 1545 rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; 1546 rss_new->packet_hdrs = hdrs; 1547 rss_new->hash_flds = hash_flds; 1548 rss_new->symm = symm; 1549 list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); 1550 } 1551 spin_unlock_bh(&adapter->adv_rss_lock); 1552 1553 if (!err) 1554 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); 1555 1556 if (!rss_new_add) 1557 kfree(rss_new); 1558 1559 return err; 1560 } 1561 1562 static int 1563 iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd) 1564 { 1565 struct iavf_adapter *adapter = netdev_priv(netdev); 1566 struct iavf_adv_rss *rss; 1567 u64 hash_flds; 1568 u32 hdrs; 1569 1570 if (!ADV_RSS_SUPPORT(adapter)) 1571 return -EOPNOTSUPP; 1572 1573 cmd->data = 0; 1574 1575 hdrs = iavf_adv_rss_parse_hdrs(cmd); 1576 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) 1577 return -EINVAL; 1578 1579 spin_lock_bh(&adapter->adv_rss_lock); 1580 rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); 1581 if (rss) 1582 hash_flds = rss->hash_flds; 1583 else 1584 hash_flds = IAVF_ADV_RSS_HASH_INVALID; 1585 spin_unlock_bh(&adapter->adv_rss_lock); 1586 1587 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) 1588 return -EINVAL; 1589 1590 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | 1591 IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) 1592 cmd->data |= (u64)RXH_IP_SRC; 1593 1594 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | 1595 IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) 1596 cmd->data |= (u64)RXH_IP_DST; 1597 1598 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | 1599 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | 1600 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) 1601 cmd->data |= (u64)RXH_L4_B_0_1; 1602 1603 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | 1604 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | 1605 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) 1606 cmd->data |= (u64)RXH_L4_B_2_3; 1607 1608 return 0; 1609 } 1610 1611 /** 1612 * iavf_set_rxnfc - command to set Rx flow rules. 1613 * @netdev: network interface device structure 1614 * @cmd: ethtool rxnfc command 1615 * 1616 * Returns 0 for success and negative values for errors 1617 */ 1618 static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 1619 { 1620 struct iavf_adapter *adapter = netdev_priv(netdev); 1621 int ret = -EOPNOTSUPP; 1622 1623 switch (cmd->cmd) { 1624 case ETHTOOL_SRXCLSRLINS: 1625 ret = iavf_add_fdir_ethtool(adapter, cmd); 1626 break; 1627 case ETHTOOL_SRXCLSRLDEL: 1628 ret = iavf_del_fdir_ethtool(adapter, cmd); 1629 break; 1630 default: 1631 break; 1632 } 1633 1634 return ret; 1635 } 1636 1637 /** 1638 * iavf_get_rx_ring_count - get RX ring count 1639 * @netdev: network interface device structure 1640 * 1641 * Return: number of RX rings. 1642 */ 1643 static u32 iavf_get_rx_ring_count(struct net_device *netdev) 1644 { 1645 struct iavf_adapter *adapter = netdev_priv(netdev); 1646 1647 return adapter->num_active_queues; 1648 } 1649 1650 /** 1651 * iavf_get_rxnfc - command to get RX flow classification rules 1652 * @netdev: network interface device structure 1653 * @cmd: ethtool rxnfc command 1654 * @rule_locs: pointer to store rule locations 1655 * 1656 * Return: 0 on success, -EOPNOTSUPP if the command is not supported. 1657 */ 1658 static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 1659 u32 *rule_locs) 1660 { 1661 struct iavf_adapter *adapter = netdev_priv(netdev); 1662 int ret = -EOPNOTSUPP; 1663 1664 switch (cmd->cmd) { 1665 case ETHTOOL_GRXCLSRLCNT: 1666 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1667 break; 1668 spin_lock_bh(&adapter->fdir_fltr_lock); 1669 cmd->rule_cnt = adapter->fdir_active_fltr; 1670 spin_unlock_bh(&adapter->fdir_fltr_lock); 1671 cmd->data = IAVF_MAX_FDIR_FILTERS; 1672 ret = 0; 1673 break; 1674 case ETHTOOL_GRXCLSRULE: 1675 ret = iavf_get_ethtool_fdir_entry(adapter, cmd); 1676 break; 1677 case ETHTOOL_GRXCLSRLALL: 1678 ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); 1679 break; 1680 default: 1681 break; 1682 } 1683 1684 return ret; 1685 } 1686 /** 1687 * iavf_get_channels - get the number of channels supported by the device 1688 * @netdev: network interface device structure 1689 * @ch: channel information structure 1690 * 1691 * For the purposes of our device, we only use combined channels, i.e. a tx/rx 1692 * queue pair. Report one extra channel to match our "other" MSI-X vector. 1693 */ 1694 static void iavf_get_channels(struct net_device *netdev, 1695 struct ethtool_channels *ch) 1696 { 1697 struct iavf_adapter *adapter = netdev_priv(netdev); 1698 1699 /* Report maximum channels */ 1700 ch->max_combined = adapter->vsi_res->num_queue_pairs; 1701 1702 ch->max_other = NONQ_VECS; 1703 ch->other_count = NONQ_VECS; 1704 1705 ch->combined_count = adapter->num_active_queues; 1706 } 1707 1708 /** 1709 * iavf_set_channels - set the new channel count 1710 * @netdev: network interface device structure 1711 * @ch: channel information structure 1712 * 1713 * Negotiate a new number of channels with the PF then do a reset. During 1714 * reset we'll realloc queues and fix the RSS table. 1715 * 1716 * Return: 0 on success, negative on failure. 1717 */ 1718 static int iavf_set_channels(struct net_device *netdev, 1719 struct ethtool_channels *ch) 1720 { 1721 struct iavf_adapter *adapter = netdev_priv(netdev); 1722 u32 num_req = ch->combined_count; 1723 1724 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1725 adapter->num_tc) { 1726 dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); 1727 return -EINVAL; 1728 } 1729 1730 /* All of these should have already been checked by ethtool before this 1731 * even gets to us, but just to be sure. 1732 */ 1733 if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) 1734 return -EINVAL; 1735 1736 if (num_req == adapter->num_active_queues) 1737 return 0; 1738 1739 if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) 1740 return -EINVAL; 1741 1742 adapter->num_req_queues = num_req; 1743 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1744 adapter->flags |= IAVF_FLAG_RESET_NEEDED; 1745 iavf_reset_step(adapter); 1746 1747 return 0; 1748 } 1749 1750 /** 1751 * iavf_get_rxfh_key_size - get the RSS hash key size 1752 * @netdev: network interface device structure 1753 * 1754 * Return: the RSS hash key size. 1755 */ 1756 static u32 iavf_get_rxfh_key_size(struct net_device *netdev) 1757 { 1758 struct iavf_adapter *adapter = netdev_priv(netdev); 1759 1760 return adapter->rss_key_size; 1761 } 1762 1763 /** 1764 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size 1765 * @netdev: network interface device structure 1766 * 1767 * Return: the indirection table size. 1768 */ 1769 static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) 1770 { 1771 struct iavf_adapter *adapter = netdev_priv(netdev); 1772 1773 return adapter->rss_lut_size; 1774 } 1775 1776 /** 1777 * iavf_get_rxfh - get the rx flow hash indirection table 1778 * @netdev: network interface device structure 1779 * @rxfh: pointer to param struct (indir, key, hfunc) 1780 * 1781 * Reads the indirection table directly from the hardware. 1782 * 1783 * Return: 0 always. 1784 */ 1785 static int iavf_get_rxfh(struct net_device *netdev, 1786 struct ethtool_rxfh_param *rxfh) 1787 { 1788 struct iavf_adapter *adapter = netdev_priv(netdev); 1789 u16 i; 1790 1791 rxfh->hfunc = ETH_RSS_HASH_TOP; 1792 if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) 1793 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; 1794 1795 if (rxfh->key) 1796 memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size); 1797 1798 if (rxfh->indir) 1799 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 1800 for (i = 0; i < adapter->rss_lut_size; i++) 1801 rxfh->indir[i] = (u32)adapter->rss_lut[i]; 1802 1803 return 0; 1804 } 1805 1806 /** 1807 * iavf_set_rxfh - set the rx flow hash indirection table 1808 * @netdev: network interface device structure 1809 * @rxfh: pointer to param struct (indir, key, hfunc) 1810 * @extack: extended ACK from the Netlink message 1811 * 1812 * Return: 0 on success, -EOPNOTSUPP if the hash function is not supported, 1813 * -EINVAL if the table specifies an invalid queue id. 1814 */ 1815 static int iavf_set_rxfh(struct net_device *netdev, 1816 struct ethtool_rxfh_param *rxfh, 1817 struct netlink_ext_ack *extack) 1818 { 1819 struct iavf_adapter *adapter = netdev_priv(netdev); 1820 u16 i; 1821 1822 /* Only support toeplitz hash function */ 1823 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 1824 rxfh->hfunc != ETH_RSS_HASH_TOP) 1825 return -EOPNOTSUPP; 1826 1827 if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && 1828 adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) { 1829 if (!ADV_RSS_SUPPORT(adapter)) 1830 return -EOPNOTSUPP; 1831 adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; 1832 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; 1833 } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && 1834 adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) { 1835 adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; 1836 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; 1837 } 1838 1839 if (!rxfh->key && !rxfh->indir) 1840 return 0; 1841 1842 if (rxfh->key) 1843 memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size); 1844 1845 if (rxfh->indir) { 1846 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 1847 for (i = 0; i < adapter->rss_lut_size; i++) 1848 adapter->rss_lut[i] = (u8)(rxfh->indir[i]); 1849 } 1850 1851 return iavf_config_rss(adapter); 1852 } 1853 1854 static const struct ethtool_ops iavf_ethtool_ops = { 1855 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1856 ETHTOOL_COALESCE_USE_ADAPTIVE, 1857 .supported_input_xfrm = RXH_XFRM_SYM_XOR, 1858 .get_drvinfo = iavf_get_drvinfo, 1859 .get_link = ethtool_op_get_link, 1860 .get_ringparam = iavf_get_ringparam, 1861 .set_ringparam = iavf_set_ringparam, 1862 .get_strings = iavf_get_strings, 1863 .get_ethtool_stats = iavf_get_ethtool_stats, 1864 .get_sset_count = iavf_get_sset_count, 1865 .get_msglevel = iavf_get_msglevel, 1866 .set_msglevel = iavf_set_msglevel, 1867 .get_coalesce = iavf_get_coalesce, 1868 .set_coalesce = iavf_set_coalesce, 1869 .get_per_queue_coalesce = iavf_get_per_queue_coalesce, 1870 .set_per_queue_coalesce = iavf_set_per_queue_coalesce, 1871 .set_rxnfc = iavf_set_rxnfc, 1872 .get_rxnfc = iavf_get_rxnfc, 1873 .get_rx_ring_count = iavf_get_rx_ring_count, 1874 .get_rxfh_indir_size = iavf_get_rxfh_indir_size, 1875 .get_rxfh = iavf_get_rxfh, 1876 .set_rxfh = iavf_set_rxfh, 1877 .get_rxfh_fields = iavf_get_rxfh_fields, 1878 .set_rxfh_fields = iavf_set_rxfh_fields, 1879 .get_channels = iavf_get_channels, 1880 .set_channels = iavf_set_channels, 1881 .get_rxfh_key_size = iavf_get_rxfh_key_size, 1882 .get_link_ksettings = iavf_get_link_ksettings, 1883 }; 1884 1885 /** 1886 * iavf_set_ethtool_ops - Initialize ethtool ops struct 1887 * @netdev: network interface device structure 1888 * 1889 * Sets ethtool ops struct in our netdev so that ethtool can call 1890 * our functions. 1891 */ 1892 void iavf_set_ethtool_ops(struct net_device *netdev) 1893 { 1894 netdev->ethtool_ops = &iavf_ethtool_ops; 1895 } 1896