1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/uaccess.h> 6 7 #include <net/netdev_lock.h> 8 9 /* ethtool support for iavf */ 10 #include "iavf.h" 11 12 /* ethtool statistics helpers */ 13 14 /** 15 * struct iavf_stats - definition for an ethtool statistic 16 * @stat_string: statistic name to display in ethtool -S output 17 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) 18 * @stat_offset: offsetof() the stat from a base pointer 19 * 20 * This structure defines a statistic to be added to the ethtool stats buffer. 21 * It defines a statistic as offset from a common base pointer. Stats should 22 * be defined in constant arrays using the IAVF_STAT macro, with every element 23 * of the array using the same _type for calculating the sizeof_stat and 24 * stat_offset. 25 * 26 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or 27 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from 28 * the iavf_add_ethtool_stat() helper function. 29 * 30 * The @stat_string is interpreted as a format string, allowing formatted 31 * values to be inserted while looping over multiple structures for a given 32 * statistics array. Thus, every statistic string in an array should have the 33 * same type and number of format specifiers, to be formatted by variadic 34 * arguments to the iavf_add_stat_string() helper function. 35 **/ 36 struct iavf_stats { 37 char stat_string[ETH_GSTRING_LEN]; 38 int sizeof_stat; 39 int stat_offset; 40 }; 41 42 /* Helper macro to define an iavf_stat structure with proper size and type. 43 * Use this when defining constant statistics arrays. Note that @_type expects 44 * only a type name and is used multiple times. 45 */ 46 #define IAVF_STAT(_type, _name, _stat) { \ 47 .stat_string = _name, \ 48 .sizeof_stat = sizeof_field(_type, _stat), \ 49 .stat_offset = offsetof(_type, _stat) \ 50 } 51 52 /* Helper macro for defining some statistics related to queues */ 53 #define IAVF_QUEUE_STAT(_name, _stat) \ 54 IAVF_STAT(struct iavf_ring, _name, _stat) 55 56 /* Stats associated with a Tx or Rx ring */ 57 static const struct iavf_stats iavf_gstrings_queue_stats[] = { 58 IAVF_QUEUE_STAT("%s-%u.packets", stats.packets), 59 IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes), 60 }; 61 62 /** 63 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer 64 * @data: location to store the stat value 65 * @pointer: basis for where to copy from 66 * @stat: the stat definition 67 * 68 * Copies the stat data defined by the pointer and stat structure pair into 69 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and 70 * iavf_add_queue_stats. If the pointer is null, data will be zero'd. 71 */ 72 static void 73 iavf_add_one_ethtool_stat(u64 *data, void *pointer, 74 const struct iavf_stats *stat) 75 { 76 char *p; 77 78 if (!pointer) { 79 /* ensure that the ethtool data buffer is zero'd for any stats 80 * which don't have a valid pointer. 81 */ 82 *data = 0; 83 return; 84 } 85 86 p = (char *)pointer + stat->stat_offset; 87 switch (stat->sizeof_stat) { 88 case sizeof(u64): 89 *data = *((u64 *)p); 90 break; 91 case sizeof(u32): 92 *data = *((u32 *)p); 93 break; 94 case sizeof(u16): 95 *data = *((u16 *)p); 96 break; 97 case sizeof(u8): 98 *data = *((u8 *)p); 99 break; 100 default: 101 WARN_ONCE(1, "unexpected stat size for %s", 102 stat->stat_string); 103 *data = 0; 104 } 105 } 106 107 /** 108 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer 109 * @data: ethtool stats buffer 110 * @pointer: location to copy stats from 111 * @stats: array of stats to copy 112 * @size: the size of the stats definition 113 * 114 * Copy the stats defined by the stats array using the pointer as a base into 115 * the data buffer supplied by ethtool. Updates the data pointer to point to 116 * the next empty location for successive calls to __iavf_add_ethtool_stats. 117 * If pointer is null, set the data values to zero and update the pointer to 118 * skip these stats. 119 **/ 120 static void 121 __iavf_add_ethtool_stats(u64 **data, void *pointer, 122 const struct iavf_stats stats[], 123 const unsigned int size) 124 { 125 unsigned int i; 126 127 for (i = 0; i < size; i++) 128 iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); 129 } 130 131 /** 132 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer 133 * @data: ethtool stats buffer 134 * @pointer: location where stats are stored 135 * @stats: static const array of stat definitions 136 * 137 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static 138 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by 139 * ensuring that we pass the size associated with the given stats array. 140 * 141 * The parameter @stats is evaluated twice, so parameters with side effects 142 * should be avoided. 143 **/ 144 #define iavf_add_ethtool_stats(data, pointer, stats) \ 145 __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) 146 147 /** 148 * iavf_add_queue_stats - copy queue statistics into supplied buffer 149 * @data: ethtool stats buffer 150 * @ring: the ring to copy 151 * 152 * Queue statistics must be copied while protected by 153 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats. 154 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the 155 * ring pointer is null, zero out the queue stat values and update the data 156 * pointer. Otherwise safely copy the stats from the ring into the supplied 157 * buffer and update the data pointer when finished. 158 * 159 * This function expects to be called while under rcu_read_lock(). 160 **/ 161 static void 162 iavf_add_queue_stats(u64 **data, struct iavf_ring *ring) 163 { 164 const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats); 165 const struct iavf_stats *stats = iavf_gstrings_queue_stats; 166 unsigned int start; 167 unsigned int i; 168 169 /* To avoid invalid statistics values, ensure that we keep retrying 170 * the copy until we get a consistent value according to 171 * u64_stats_fetch_retry. But first, make sure our ring is 172 * non-null before attempting to access its syncp. 173 */ 174 do { 175 start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); 176 for (i = 0; i < size; i++) 177 iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]); 178 } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); 179 180 /* Once we successfully copy the stats in, update the data pointer */ 181 *data += size; 182 } 183 184 /** 185 * __iavf_add_stat_strings - copy stat strings into ethtool buffer 186 * @p: ethtool supplied buffer 187 * @stats: stat definitions array 188 * @size: size of the stats array 189 * 190 * Format and copy the strings described by stats into the buffer pointed at 191 * by p. 192 **/ 193 static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[], 194 const unsigned int size, ...) 195 { 196 unsigned int i; 197 198 for (i = 0; i < size; i++) { 199 va_list args; 200 201 va_start(args, size); 202 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); 203 *p += ETH_GSTRING_LEN; 204 va_end(args); 205 } 206 } 207 208 /** 209 * iavf_add_stat_strings - copy stat strings into ethtool buffer 210 * @p: ethtool supplied buffer 211 * @stats: stat definitions array 212 * 213 * Format and copy the strings described by the const static stats value into 214 * the buffer pointed at by p. 215 * 216 * The parameter @stats is evaluated twice, so parameters with side effects 217 * should be avoided. Additionally, stats must be an array such that 218 * ARRAY_SIZE can be called on it. 219 **/ 220 #define iavf_add_stat_strings(p, stats, ...) \ 221 __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) 222 223 #define VF_STAT(_name, _stat) \ 224 IAVF_STAT(struct iavf_adapter, _name, _stat) 225 226 static const struct iavf_stats iavf_gstrings_stats[] = { 227 VF_STAT("rx_bytes", current_stats.rx_bytes), 228 VF_STAT("rx_unicast", current_stats.rx_unicast), 229 VF_STAT("rx_multicast", current_stats.rx_multicast), 230 VF_STAT("rx_broadcast", current_stats.rx_broadcast), 231 VF_STAT("rx_discards", current_stats.rx_discards), 232 VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), 233 VF_STAT("tx_bytes", current_stats.tx_bytes), 234 VF_STAT("tx_unicast", current_stats.tx_unicast), 235 VF_STAT("tx_multicast", current_stats.tx_multicast), 236 VF_STAT("tx_broadcast", current_stats.tx_broadcast), 237 VF_STAT("tx_discards", current_stats.tx_discards), 238 VF_STAT("tx_errors", current_stats.tx_errors), 239 }; 240 241 #define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats) 242 243 #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) 244 245 /** 246 * iavf_get_link_ksettings - Get Link Speed and Duplex settings 247 * @netdev: network interface device structure 248 * @cmd: ethtool command 249 * 250 * Reports speed/duplex settings. Because this is a VF, we don't know what 251 * kind of link we really have, so we fake it. 252 **/ 253 static int iavf_get_link_ksettings(struct net_device *netdev, 254 struct ethtool_link_ksettings *cmd) 255 { 256 struct iavf_adapter *adapter = netdev_priv(netdev); 257 258 ethtool_link_ksettings_zero_link_mode(cmd, supported); 259 cmd->base.autoneg = AUTONEG_DISABLE; 260 cmd->base.port = PORT_NONE; 261 cmd->base.duplex = DUPLEX_FULL; 262 263 if (ADV_LINK_SUPPORT(adapter)) { 264 if (adapter->link_speed_mbps && 265 adapter->link_speed_mbps < U32_MAX) 266 cmd->base.speed = adapter->link_speed_mbps; 267 else 268 cmd->base.speed = SPEED_UNKNOWN; 269 270 return 0; 271 } 272 273 switch (adapter->link_speed) { 274 case VIRTCHNL_LINK_SPEED_40GB: 275 cmd->base.speed = SPEED_40000; 276 break; 277 case VIRTCHNL_LINK_SPEED_25GB: 278 cmd->base.speed = SPEED_25000; 279 break; 280 case VIRTCHNL_LINK_SPEED_20GB: 281 cmd->base.speed = SPEED_20000; 282 break; 283 case VIRTCHNL_LINK_SPEED_10GB: 284 cmd->base.speed = SPEED_10000; 285 break; 286 case VIRTCHNL_LINK_SPEED_5GB: 287 cmd->base.speed = SPEED_5000; 288 break; 289 case VIRTCHNL_LINK_SPEED_2_5GB: 290 cmd->base.speed = SPEED_2500; 291 break; 292 case VIRTCHNL_LINK_SPEED_1GB: 293 cmd->base.speed = SPEED_1000; 294 break; 295 case VIRTCHNL_LINK_SPEED_100MB: 296 cmd->base.speed = SPEED_100; 297 break; 298 default: 299 break; 300 } 301 302 return 0; 303 } 304 305 /** 306 * iavf_get_sset_count - Get length of string set 307 * @netdev: network interface device structure 308 * @sset: id of string set 309 * 310 * Reports size of various string tables. 311 **/ 312 static int iavf_get_sset_count(struct net_device *netdev, int sset) 313 { 314 /* Report the maximum number queues, even if not every queue is 315 * currently configured. Since allocation of queues is in pairs, 316 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set 317 * at device creation and never changes. 318 */ 319 320 if (sset == ETH_SS_STATS) 321 return IAVF_STATS_LEN + 322 (IAVF_QUEUE_STATS_LEN * 2 * 323 netdev->real_num_tx_queues); 324 else 325 return -EINVAL; 326 } 327 328 /** 329 * iavf_get_ethtool_stats - report device statistics 330 * @netdev: network interface device structure 331 * @stats: ethtool statistics structure 332 * @data: pointer to data buffer 333 * 334 * All statistics are added to the data buffer as an array of u64. 335 **/ 336 static void iavf_get_ethtool_stats(struct net_device *netdev, 337 struct ethtool_stats *stats, u64 *data) 338 { 339 struct iavf_adapter *adapter = netdev_priv(netdev); 340 unsigned int i; 341 342 /* Explicitly request stats refresh */ 343 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS); 344 345 iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); 346 347 rcu_read_lock(); 348 /* As num_active_queues describe both tx and rx queues, we can use 349 * it to iterate over rings' stats. 350 */ 351 for (i = 0; i < adapter->num_active_queues; i++) { 352 struct iavf_ring *ring; 353 354 /* Tx rings stats */ 355 ring = &adapter->tx_rings[i]; 356 iavf_add_queue_stats(&data, ring); 357 358 /* Rx rings stats */ 359 ring = &adapter->rx_rings[i]; 360 iavf_add_queue_stats(&data, ring); 361 } 362 rcu_read_unlock(); 363 } 364 365 /** 366 * iavf_get_stat_strings - Get stat strings 367 * @netdev: network interface device structure 368 * @data: buffer for string data 369 * 370 * Builds the statistics string table 371 **/ 372 static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) 373 { 374 unsigned int i; 375 376 iavf_add_stat_strings(&data, iavf_gstrings_stats); 377 378 /* Queues are always allocated in pairs, so we just use 379 * real_num_tx_queues for both Tx and Rx queues. 380 */ 381 for (i = 0; i < netdev->real_num_tx_queues; i++) { 382 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, 383 "tx", i); 384 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, 385 "rx", i); 386 } 387 } 388 389 /** 390 * iavf_get_strings - Get string set 391 * @netdev: network interface device structure 392 * @sset: id of string set 393 * @data: buffer for string data 394 * 395 * Builds string tables for various string sets 396 **/ 397 static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 398 { 399 switch (sset) { 400 case ETH_SS_STATS: 401 iavf_get_stat_strings(netdev, data); 402 break; 403 default: 404 break; 405 } 406 } 407 408 /** 409 * iavf_get_msglevel - Get debug message level 410 * @netdev: network interface device structure 411 * 412 * Returns current debug message level. 413 **/ 414 static u32 iavf_get_msglevel(struct net_device *netdev) 415 { 416 struct iavf_adapter *adapter = netdev_priv(netdev); 417 418 return adapter->msg_enable; 419 } 420 421 /** 422 * iavf_set_msglevel - Set debug message level 423 * @netdev: network interface device structure 424 * @data: message level 425 * 426 * Set current debug message level. Higher values cause the driver to 427 * be noisier. 428 **/ 429 static void iavf_set_msglevel(struct net_device *netdev, u32 data) 430 { 431 struct iavf_adapter *adapter = netdev_priv(netdev); 432 433 if (IAVF_DEBUG_USER & data) 434 adapter->hw.debug_mask = data; 435 adapter->msg_enable = data; 436 } 437 438 /** 439 * iavf_get_drvinfo - Get driver info 440 * @netdev: network interface device structure 441 * @drvinfo: ethool driver info structure 442 * 443 * Returns information about the driver and device for display to the user. 444 **/ 445 static void iavf_get_drvinfo(struct net_device *netdev, 446 struct ethtool_drvinfo *drvinfo) 447 { 448 struct iavf_adapter *adapter = netdev_priv(netdev); 449 450 strscpy(drvinfo->driver, iavf_driver_name, 32); 451 strscpy(drvinfo->fw_version, "N/A", 4); 452 strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 453 } 454 455 /** 456 * iavf_get_ringparam - Get ring parameters 457 * @netdev: network interface device structure 458 * @ring: ethtool ringparam structure 459 * @kernel_ring: ethtool extenal ringparam structure 460 * @extack: netlink extended ACK report struct 461 * 462 * Returns current ring parameters. TX and RX rings are reported separately, 463 * but the number of rings is not reported. 464 **/ 465 static void iavf_get_ringparam(struct net_device *netdev, 466 struct ethtool_ringparam *ring, 467 struct kernel_ethtool_ringparam *kernel_ring, 468 struct netlink_ext_ack *extack) 469 { 470 struct iavf_adapter *adapter = netdev_priv(netdev); 471 472 ring->rx_max_pending = IAVF_MAX_RXD; 473 ring->tx_max_pending = IAVF_MAX_TXD; 474 ring->rx_pending = adapter->rx_desc_count; 475 ring->tx_pending = adapter->tx_desc_count; 476 } 477 478 /** 479 * iavf_set_ringparam - Set ring parameters 480 * @netdev: network interface device structure 481 * @ring: ethtool ringparam structure 482 * @kernel_ring: ethtool external ringparam structure 483 * @extack: netlink extended ACK report struct 484 * 485 * Sets ring parameters. TX and RX rings are controlled separately, but the 486 * number of rings is not specified, so all rings get the same settings. 487 **/ 488 static int iavf_set_ringparam(struct net_device *netdev, 489 struct ethtool_ringparam *ring, 490 struct kernel_ethtool_ringparam *kernel_ring, 491 struct netlink_ext_ack *extack) 492 { 493 struct iavf_adapter *adapter = netdev_priv(netdev); 494 u32 new_rx_count, new_tx_count; 495 int ret = 0; 496 497 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 498 return -EINVAL; 499 500 if (ring->tx_pending > IAVF_MAX_TXD || 501 ring->tx_pending < IAVF_MIN_TXD || 502 ring->rx_pending > IAVF_MAX_RXD || 503 ring->rx_pending < IAVF_MIN_RXD) { 504 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n", 505 ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD, 506 IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE); 507 return -EINVAL; 508 } 509 510 new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); 511 if (new_tx_count != ring->tx_pending) 512 netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n", 513 new_tx_count); 514 515 new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE); 516 if (new_rx_count != ring->rx_pending) 517 netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n", 518 new_rx_count); 519 520 /* if nothing to do return success */ 521 if ((new_tx_count == adapter->tx_desc_count) && 522 (new_rx_count == adapter->rx_desc_count)) { 523 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n"); 524 return 0; 525 } 526 527 if (new_tx_count != adapter->tx_desc_count) { 528 netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n", 529 adapter->tx_desc_count, new_tx_count); 530 adapter->tx_desc_count = new_tx_count; 531 } 532 533 if (new_rx_count != adapter->rx_desc_count) { 534 netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n", 535 adapter->rx_desc_count, new_rx_count); 536 adapter->rx_desc_count = new_rx_count; 537 } 538 539 if (netif_running(netdev)) { 540 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 541 ret = iavf_wait_for_reset(adapter); 542 if (ret) 543 netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset"); 544 } 545 546 return ret; 547 } 548 549 /** 550 * __iavf_get_coalesce - get per-queue coalesce settings 551 * @netdev: the netdev to check 552 * @ec: ethtool coalesce data structure 553 * @queue: which queue to pick 554 * 555 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs 556 * are per queue. If queue is <0 then we default to queue 0 as the 557 * representative value. 558 **/ 559 static int __iavf_get_coalesce(struct net_device *netdev, 560 struct ethtool_coalesce *ec, int queue) 561 { 562 struct iavf_adapter *adapter = netdev_priv(netdev); 563 struct iavf_ring *rx_ring, *tx_ring; 564 565 /* Rx and Tx usecs per queue value. If user doesn't specify the 566 * queue, return queue 0's value to represent. 567 */ 568 if (queue < 0) 569 queue = 0; 570 else if (queue >= adapter->num_active_queues) 571 return -EINVAL; 572 573 rx_ring = &adapter->rx_rings[queue]; 574 tx_ring = &adapter->tx_rings[queue]; 575 576 if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) 577 ec->use_adaptive_rx_coalesce = 1; 578 579 if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) 580 ec->use_adaptive_tx_coalesce = 1; 581 582 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 583 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 584 585 return 0; 586 } 587 588 /** 589 * iavf_get_coalesce - Get interrupt coalescing settings 590 * @netdev: network interface device structure 591 * @ec: ethtool coalesce structure 592 * @kernel_coal: ethtool CQE mode setting structure 593 * @extack: extack for reporting error messages 594 * 595 * Returns current coalescing settings. This is referred to elsewhere in the 596 * driver as Interrupt Throttle Rate, as this is how the hardware describes 597 * this functionality. Note that if per-queue settings have been modified this 598 * only represents the settings of queue 0. 599 **/ 600 static int iavf_get_coalesce(struct net_device *netdev, 601 struct ethtool_coalesce *ec, 602 struct kernel_ethtool_coalesce *kernel_coal, 603 struct netlink_ext_ack *extack) 604 { 605 return __iavf_get_coalesce(netdev, ec, -1); 606 } 607 608 /** 609 * iavf_get_per_queue_coalesce - get coalesce values for specific queue 610 * @netdev: netdev to read 611 * @ec: coalesce settings from ethtool 612 * @queue: the queue to read 613 * 614 * Read specific queue's coalesce settings. 615 **/ 616 static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue, 617 struct ethtool_coalesce *ec) 618 { 619 return __iavf_get_coalesce(netdev, ec, queue); 620 } 621 622 /** 623 * iavf_set_itr_per_queue - set ITR values for specific queue 624 * @adapter: the VF adapter struct to set values for 625 * @ec: coalesce settings from ethtool 626 * @queue: the queue to modify 627 * 628 * Change the ITR settings for a specific queue. 629 **/ 630 static int iavf_set_itr_per_queue(struct iavf_adapter *adapter, 631 struct ethtool_coalesce *ec, int queue) 632 { 633 struct iavf_ring *rx_ring = &adapter->rx_rings[queue]; 634 struct iavf_ring *tx_ring = &adapter->tx_rings[queue]; 635 struct iavf_q_vector *q_vector; 636 u16 itr_setting; 637 638 itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 639 640 if (ec->rx_coalesce_usecs != itr_setting && 641 ec->use_adaptive_rx_coalesce) { 642 netif_info(adapter, drv, adapter->netdev, 643 "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); 644 return -EINVAL; 645 } 646 647 itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC; 648 649 if (ec->tx_coalesce_usecs != itr_setting && 650 ec->use_adaptive_tx_coalesce) { 651 netif_info(adapter, drv, adapter->netdev, 652 "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); 653 return -EINVAL; 654 } 655 656 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); 657 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); 658 659 rx_ring->itr_setting |= IAVF_ITR_DYNAMIC; 660 if (!ec->use_adaptive_rx_coalesce) 661 rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; 662 663 tx_ring->itr_setting |= IAVF_ITR_DYNAMIC; 664 if (!ec->use_adaptive_tx_coalesce) 665 tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC; 666 667 q_vector = rx_ring->q_vector; 668 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 669 670 q_vector = tx_ring->q_vector; 671 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 672 673 /* The interrupt handler itself will take care of programming 674 * the Tx and Rx ITR values based on the values we have entered 675 * into the q_vector, no need to write the values now. 676 */ 677 return 0; 678 } 679 680 /** 681 * __iavf_set_coalesce - set coalesce settings for particular queue 682 * @netdev: the netdev to change 683 * @ec: ethtool coalesce settings 684 * @queue: the queue to change 685 * 686 * Sets the coalesce settings for a particular queue. 687 **/ 688 static int __iavf_set_coalesce(struct net_device *netdev, 689 struct ethtool_coalesce *ec, int queue) 690 { 691 struct iavf_adapter *adapter = netdev_priv(netdev); 692 int i; 693 694 if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) { 695 netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); 696 return -EINVAL; 697 } else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) { 698 netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); 699 return -EINVAL; 700 } 701 702 /* Rx and Tx usecs has per queue value. If user doesn't specify the 703 * queue, apply to all queues. 704 */ 705 if (queue < 0) { 706 for (i = 0; i < adapter->num_active_queues; i++) 707 if (iavf_set_itr_per_queue(adapter, ec, i)) 708 return -EINVAL; 709 } else if (queue < adapter->num_active_queues) { 710 if (iavf_set_itr_per_queue(adapter, ec, queue)) 711 return -EINVAL; 712 } else { 713 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", 714 adapter->num_active_queues - 1); 715 return -EINVAL; 716 } 717 718 return 0; 719 } 720 721 /** 722 * iavf_set_coalesce - Set interrupt coalescing settings 723 * @netdev: network interface device structure 724 * @ec: ethtool coalesce structure 725 * @kernel_coal: ethtool CQE mode setting structure 726 * @extack: extack for reporting error messages 727 * 728 * Change current coalescing settings for every queue. 729 **/ 730 static int iavf_set_coalesce(struct net_device *netdev, 731 struct ethtool_coalesce *ec, 732 struct kernel_ethtool_coalesce *kernel_coal, 733 struct netlink_ext_ack *extack) 734 { 735 return __iavf_set_coalesce(netdev, ec, -1); 736 } 737 738 /** 739 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings 740 * @netdev: the netdev to change 741 * @ec: ethtool's coalesce settings 742 * @queue: the queue to modify 743 * 744 * Modifies a specific queue's coalesce settings. 745 */ 746 static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue, 747 struct ethtool_coalesce *ec) 748 { 749 return __iavf_set_coalesce(netdev, ec, queue); 750 } 751 752 /** 753 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool 754 * flow type values 755 * @flow: filter type to be converted 756 * 757 * Returns the corresponding ethtool flow type. 758 */ 759 static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) 760 { 761 switch (flow) { 762 case IAVF_FDIR_FLOW_IPV4_TCP: 763 return TCP_V4_FLOW; 764 case IAVF_FDIR_FLOW_IPV4_UDP: 765 return UDP_V4_FLOW; 766 case IAVF_FDIR_FLOW_IPV4_SCTP: 767 return SCTP_V4_FLOW; 768 case IAVF_FDIR_FLOW_IPV4_AH: 769 return AH_V4_FLOW; 770 case IAVF_FDIR_FLOW_IPV4_ESP: 771 return ESP_V4_FLOW; 772 case IAVF_FDIR_FLOW_IPV4_OTHER: 773 return IPV4_USER_FLOW; 774 case IAVF_FDIR_FLOW_IPV6_TCP: 775 return TCP_V6_FLOW; 776 case IAVF_FDIR_FLOW_IPV6_UDP: 777 return UDP_V6_FLOW; 778 case IAVF_FDIR_FLOW_IPV6_SCTP: 779 return SCTP_V6_FLOW; 780 case IAVF_FDIR_FLOW_IPV6_AH: 781 return AH_V6_FLOW; 782 case IAVF_FDIR_FLOW_IPV6_ESP: 783 return ESP_V6_FLOW; 784 case IAVF_FDIR_FLOW_IPV6_OTHER: 785 return IPV6_USER_FLOW; 786 case IAVF_FDIR_FLOW_NON_IP_L2: 787 return ETHER_FLOW; 788 default: 789 /* 0 is undefined ethtool flow */ 790 return 0; 791 } 792 } 793 794 /** 795 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 796 * @eth: Ethtool flow type to be converted 797 * 798 * Returns flow enum 799 */ 800 static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) 801 { 802 switch (eth) { 803 case TCP_V4_FLOW: 804 return IAVF_FDIR_FLOW_IPV4_TCP; 805 case UDP_V4_FLOW: 806 return IAVF_FDIR_FLOW_IPV4_UDP; 807 case SCTP_V4_FLOW: 808 return IAVF_FDIR_FLOW_IPV4_SCTP; 809 case AH_V4_FLOW: 810 return IAVF_FDIR_FLOW_IPV4_AH; 811 case ESP_V4_FLOW: 812 return IAVF_FDIR_FLOW_IPV4_ESP; 813 case IPV4_USER_FLOW: 814 return IAVF_FDIR_FLOW_IPV4_OTHER; 815 case TCP_V6_FLOW: 816 return IAVF_FDIR_FLOW_IPV6_TCP; 817 case UDP_V6_FLOW: 818 return IAVF_FDIR_FLOW_IPV6_UDP; 819 case SCTP_V6_FLOW: 820 return IAVF_FDIR_FLOW_IPV6_SCTP; 821 case AH_V6_FLOW: 822 return IAVF_FDIR_FLOW_IPV6_AH; 823 case ESP_V6_FLOW: 824 return IAVF_FDIR_FLOW_IPV6_ESP; 825 case IPV6_USER_FLOW: 826 return IAVF_FDIR_FLOW_IPV6_OTHER; 827 case ETHER_FLOW: 828 return IAVF_FDIR_FLOW_NON_IP_L2; 829 default: 830 return IAVF_FDIR_FLOW_NONE; 831 } 832 } 833 834 /** 835 * iavf_is_mask_valid - check mask field set 836 * @mask: full mask to check 837 * @field: field for which mask should be valid 838 * 839 * If the mask is fully set return true. If it is not valid for field return 840 * false. 841 */ 842 static bool iavf_is_mask_valid(u64 mask, u64 field) 843 { 844 return (mask & field) == field; 845 } 846 847 /** 848 * iavf_parse_rx_flow_user_data - deconstruct user-defined data 849 * @fsp: pointer to ethtool Rx flow specification 850 * @fltr: pointer to Flow Director filter for userdef data storage 851 * 852 * Returns 0 on success, negative error value on failure 853 */ 854 static int 855 iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 856 struct iavf_fdir_fltr *fltr) 857 { 858 struct iavf_flex_word *flex; 859 int i, cnt = 0; 860 861 if (!(fsp->flow_type & FLOW_EXT)) 862 return 0; 863 864 for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) { 865 #define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0) 866 #define IAVF_USERDEF_FLEX_OFFS_S 16 867 #define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S) 868 #define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0) 869 u32 value = be32_to_cpu(fsp->h_ext.data[i]); 870 u32 mask = be32_to_cpu(fsp->m_ext.data[i]); 871 872 if (!value || !mask) 873 continue; 874 875 if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M)) 876 return -EINVAL; 877 878 /* 504 is the maximum value for offsets, and offset is measured 879 * from the start of the MAC address. 880 */ 881 #define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504 882 flex = &fltr->flex_words[cnt++]; 883 flex->word = value & IAVF_USERDEF_FLEX_WORD_M; 884 flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value); 885 if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL) 886 return -EINVAL; 887 } 888 889 fltr->flex_cnt = cnt; 890 891 return 0; 892 } 893 894 /** 895 * iavf_fill_rx_flow_ext_data - fill the additional data 896 * @fsp: pointer to ethtool Rx flow specification 897 * @fltr: pointer to Flow Director filter to get additional data 898 */ 899 static void 900 iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, 901 struct iavf_fdir_fltr *fltr) 902 { 903 if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) 904 return; 905 906 fsp->flow_type |= FLOW_EXT; 907 908 memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); 909 memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); 910 } 911 912 /** 913 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data 914 * @adapter: the VF adapter structure that contains filter list 915 * @cmd: ethtool command data structure to receive the filter data 916 * 917 * Returns 0 as expected for success by ethtool 918 */ 919 static int 920 iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, 921 struct ethtool_rxnfc *cmd) 922 { 923 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 924 struct iavf_fdir_fltr *rule = NULL; 925 int ret = 0; 926 927 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 928 return -EOPNOTSUPP; 929 930 spin_lock_bh(&adapter->fdir_fltr_lock); 931 932 rule = iavf_find_fdir_fltr(adapter, false, fsp->location); 933 if (!rule) { 934 ret = -EINVAL; 935 goto release_lock; 936 } 937 938 fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); 939 940 memset(&fsp->m_u, 0, sizeof(fsp->m_u)); 941 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); 942 943 switch (fsp->flow_type) { 944 case TCP_V4_FLOW: 945 case UDP_V4_FLOW: 946 case SCTP_V4_FLOW: 947 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; 948 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; 949 fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; 950 fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; 951 fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; 952 fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; 953 fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; 954 fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; 955 fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; 956 fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; 957 break; 958 case AH_V4_FLOW: 959 case ESP_V4_FLOW: 960 fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; 961 fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; 962 fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; 963 fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; 964 fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; 965 fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; 966 fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; 967 fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; 968 break; 969 case IPV4_USER_FLOW: 970 fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; 971 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; 972 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; 973 fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; 974 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 975 fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; 976 fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; 977 fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; 978 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; 979 fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; 980 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF; 981 fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; 982 break; 983 case TCP_V6_FLOW: 984 case UDP_V6_FLOW: 985 case SCTP_V6_FLOW: 986 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, 987 sizeof(struct in6_addr)); 988 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, 989 sizeof(struct in6_addr)); 990 fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; 991 fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; 992 fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; 993 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, 994 sizeof(struct in6_addr)); 995 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, 996 sizeof(struct in6_addr)); 997 fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; 998 fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; 999 fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; 1000 break; 1001 case AH_V6_FLOW: 1002 case ESP_V6_FLOW: 1003 memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, 1004 sizeof(struct in6_addr)); 1005 memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, 1006 sizeof(struct in6_addr)); 1007 fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; 1008 fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; 1009 memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, 1010 sizeof(struct in6_addr)); 1011 memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, 1012 sizeof(struct in6_addr)); 1013 fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; 1014 fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; 1015 break; 1016 case IPV6_USER_FLOW: 1017 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, 1018 sizeof(struct in6_addr)); 1019 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, 1020 sizeof(struct in6_addr)); 1021 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; 1022 fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; 1023 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; 1024 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, 1025 sizeof(struct in6_addr)); 1026 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, 1027 sizeof(struct in6_addr)); 1028 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; 1029 fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; 1030 fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; 1031 break; 1032 case ETHER_FLOW: 1033 fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; 1034 fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; 1035 break; 1036 default: 1037 ret = -EINVAL; 1038 break; 1039 } 1040 1041 iavf_fill_rx_flow_ext_data(fsp, rule); 1042 1043 if (rule->action == VIRTCHNL_ACTION_DROP) 1044 fsp->ring_cookie = RX_CLS_FLOW_DISC; 1045 else 1046 fsp->ring_cookie = rule->q_index; 1047 1048 release_lock: 1049 spin_unlock_bh(&adapter->fdir_fltr_lock); 1050 return ret; 1051 } 1052 1053 /** 1054 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters 1055 * @adapter: the VF adapter structure containing the filter list 1056 * @cmd: ethtool command data structure 1057 * @rule_locs: ethtool array passed in from OS to receive filter IDs 1058 * 1059 * Returns 0 as expected for success by ethtool 1060 */ 1061 static int 1062 iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, 1063 u32 *rule_locs) 1064 { 1065 struct iavf_fdir_fltr *fltr; 1066 unsigned int cnt = 0; 1067 int val = 0; 1068 1069 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1070 return -EOPNOTSUPP; 1071 1072 cmd->data = IAVF_MAX_FDIR_FILTERS; 1073 1074 spin_lock_bh(&adapter->fdir_fltr_lock); 1075 1076 list_for_each_entry(fltr, &adapter->fdir_list_head, list) { 1077 if (iavf_is_raw_fdir(fltr)) 1078 continue; 1079 1080 if (cnt == cmd->rule_cnt) { 1081 val = -EMSGSIZE; 1082 goto release_lock; 1083 } 1084 rule_locs[cnt] = fltr->loc; 1085 cnt++; 1086 } 1087 1088 release_lock: 1089 spin_unlock_bh(&adapter->fdir_fltr_lock); 1090 if (!val) 1091 cmd->rule_cnt = cnt; 1092 1093 return val; 1094 } 1095 1096 /** 1097 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter 1098 * @adapter: pointer to the VF adapter structure 1099 * @fsp: pointer to ethtool Rx flow specification 1100 * @fltr: filter structure 1101 */ 1102 static int 1103 iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, 1104 struct iavf_fdir_fltr *fltr) 1105 { 1106 u32 flow_type, q_index = 0; 1107 enum virtchnl_action act; 1108 int err; 1109 1110 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 1111 act = VIRTCHNL_ACTION_DROP; 1112 } else { 1113 q_index = fsp->ring_cookie; 1114 if (q_index >= adapter->num_active_queues) 1115 return -EINVAL; 1116 1117 act = VIRTCHNL_ACTION_QUEUE; 1118 } 1119 1120 fltr->action = act; 1121 fltr->loc = fsp->location; 1122 fltr->q_index = q_index; 1123 1124 if (fsp->flow_type & FLOW_EXT) { 1125 memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, 1126 sizeof(fltr->ext_data.usr_def)); 1127 memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, 1128 sizeof(fltr->ext_mask.usr_def)); 1129 } 1130 1131 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); 1132 fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); 1133 1134 switch (flow_type) { 1135 case TCP_V4_FLOW: 1136 case UDP_V4_FLOW: 1137 case SCTP_V4_FLOW: 1138 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; 1139 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 1140 fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; 1141 fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; 1142 fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; 1143 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; 1144 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; 1145 fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; 1146 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; 1147 fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; 1148 fltr->ip_ver = 4; 1149 break; 1150 case AH_V4_FLOW: 1151 case ESP_V4_FLOW: 1152 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; 1153 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; 1154 fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; 1155 fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; 1156 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; 1157 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; 1158 fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; 1159 fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; 1160 fltr->ip_ver = 4; 1161 break; 1162 case IPV4_USER_FLOW: 1163 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; 1164 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; 1165 fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; 1166 fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; 1167 fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; 1168 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; 1169 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; 1170 fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; 1171 fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; 1172 fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; 1173 fltr->ip_ver = 4; 1174 break; 1175 case TCP_V6_FLOW: 1176 case UDP_V6_FLOW: 1177 case SCTP_V6_FLOW: 1178 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1179 sizeof(struct in6_addr)); 1180 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1181 sizeof(struct in6_addr)); 1182 fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; 1183 fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; 1184 fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; 1185 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1186 sizeof(struct in6_addr)); 1187 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1188 sizeof(struct in6_addr)); 1189 fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; 1190 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; 1191 fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; 1192 fltr->ip_ver = 6; 1193 break; 1194 case AH_V6_FLOW: 1195 case ESP_V6_FLOW: 1196 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, 1197 sizeof(struct in6_addr)); 1198 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, 1199 sizeof(struct in6_addr)); 1200 fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; 1201 fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; 1202 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, 1203 sizeof(struct in6_addr)); 1204 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, 1205 sizeof(struct in6_addr)); 1206 fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; 1207 fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; 1208 fltr->ip_ver = 6; 1209 break; 1210 case IPV6_USER_FLOW: 1211 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, 1212 sizeof(struct in6_addr)); 1213 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, 1214 sizeof(struct in6_addr)); 1215 fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; 1216 fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; 1217 fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; 1218 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, 1219 sizeof(struct in6_addr)); 1220 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, 1221 sizeof(struct in6_addr)); 1222 fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; 1223 fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; 1224 fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; 1225 fltr->ip_ver = 6; 1226 break; 1227 case ETHER_FLOW: 1228 fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; 1229 fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; 1230 break; 1231 default: 1232 /* not doing un-parsed flow types */ 1233 return -EINVAL; 1234 } 1235 1236 err = iavf_validate_fdir_fltr_masks(adapter, fltr); 1237 if (err) 1238 return err; 1239 1240 if (iavf_fdir_is_dup_fltr(adapter, fltr)) 1241 return -EEXIST; 1242 1243 err = iavf_parse_rx_flow_user_data(fsp, fltr); 1244 if (err) 1245 return err; 1246 1247 return iavf_fill_fdir_add_msg(adapter, fltr); 1248 } 1249 1250 /** 1251 * iavf_add_fdir_ethtool - add Flow Director filter 1252 * @adapter: pointer to the VF adapter structure 1253 * @cmd: command to add Flow Director filter 1254 * 1255 * Returns 0 on success and negative values for failure 1256 */ 1257 static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) 1258 { 1259 struct ethtool_rx_flow_spec *fsp = &cmd->fs; 1260 struct iavf_fdir_fltr *fltr; 1261 int err; 1262 1263 netdev_assert_locked(adapter->netdev); 1264 1265 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1266 return -EOPNOTSUPP; 1267 1268 if (fsp->flow_type & FLOW_MAC_EXT) 1269 return -EINVAL; 1270 1271 spin_lock_bh(&adapter->fdir_fltr_lock); 1272 if (iavf_find_fdir_fltr(adapter, false, fsp->location)) { 1273 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); 1274 spin_unlock_bh(&adapter->fdir_fltr_lock); 1275 return -EEXIST; 1276 } 1277 spin_unlock_bh(&adapter->fdir_fltr_lock); 1278 1279 fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); 1280 if (!fltr) 1281 return -ENOMEM; 1282 1283 err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); 1284 if (!err) 1285 err = iavf_fdir_add_fltr(adapter, fltr); 1286 1287 if (err) 1288 kfree(fltr); 1289 1290 return err; 1291 } 1292 1293 /** 1294 * iavf_del_fdir_ethtool - delete Flow Director filter 1295 * @adapter: pointer to the VF adapter structure 1296 * @cmd: command to delete Flow Director filter 1297 * 1298 * Returns 0 on success and negative values for failure 1299 */ 1300 static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) 1301 { 1302 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 1303 1304 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1305 return -EOPNOTSUPP; 1306 1307 return iavf_fdir_del_fltr(adapter, false, fsp->location); 1308 } 1309 1310 static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd) 1311 { 1312 u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE; 1313 1314 switch (cmd->flow_type) { 1315 case TCP_V4_FLOW: 1316 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | 1317 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1318 break; 1319 case UDP_V4_FLOW: 1320 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1321 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1322 break; 1323 case SCTP_V4_FLOW: 1324 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | 1325 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4; 1326 break; 1327 case TCP_V6_FLOW: 1328 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | 1329 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1330 break; 1331 case UDP_V6_FLOW: 1332 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | 1333 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1334 break; 1335 case SCTP_V6_FLOW: 1336 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP | 1337 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6; 1338 break; 1339 default: 1340 break; 1341 } 1342 1343 return hdrs; 1344 } 1345 1346 static u64 1347 iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm) 1348 { 1349 u64 hfld = IAVF_ADV_RSS_HASH_INVALID; 1350 1351 if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) { 1352 switch (cmd->flow_type) { 1353 case TCP_V4_FLOW: 1354 case UDP_V4_FLOW: 1355 case SCTP_V4_FLOW: 1356 if (cmd->data & RXH_IP_SRC) 1357 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA; 1358 if (cmd->data & RXH_IP_DST) 1359 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA; 1360 break; 1361 case TCP_V6_FLOW: 1362 case UDP_V6_FLOW: 1363 case SCTP_V6_FLOW: 1364 if (cmd->data & RXH_IP_SRC) 1365 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA; 1366 if (cmd->data & RXH_IP_DST) 1367 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA; 1368 break; 1369 default: 1370 break; 1371 } 1372 } 1373 1374 if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) { 1375 switch (cmd->flow_type) { 1376 case TCP_V4_FLOW: 1377 case TCP_V6_FLOW: 1378 if (cmd->data & RXH_L4_B_0_1) 1379 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT; 1380 if (cmd->data & RXH_L4_B_2_3) 1381 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT; 1382 break; 1383 case UDP_V4_FLOW: 1384 case UDP_V6_FLOW: 1385 if (cmd->data & RXH_L4_B_0_1) 1386 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT; 1387 if (cmd->data & RXH_L4_B_2_3) 1388 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT; 1389 break; 1390 case SCTP_V4_FLOW: 1391 case SCTP_V6_FLOW: 1392 if (cmd->data & RXH_L4_B_0_1) 1393 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT; 1394 if (cmd->data & RXH_L4_B_2_3) 1395 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT; 1396 break; 1397 default: 1398 break; 1399 } 1400 } 1401 1402 return hfld; 1403 } 1404 1405 static int 1406 iavf_set_rxfh_fields(struct net_device *netdev, 1407 const struct ethtool_rxfh_fields *cmd, 1408 struct netlink_ext_ack *extack) 1409 { 1410 struct iavf_adapter *adapter = netdev_priv(netdev); 1411 struct iavf_adv_rss *rss_old, *rss_new; 1412 bool rss_new_add = false; 1413 bool symm = false; 1414 u64 hash_flds; 1415 int err = 0; 1416 u32 hdrs; 1417 1418 netdev_assert_locked(adapter->netdev); 1419 1420 if (!ADV_RSS_SUPPORT(adapter)) 1421 return -EOPNOTSUPP; 1422 1423 symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC); 1424 1425 hdrs = iavf_adv_rss_parse_hdrs(cmd); 1426 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) 1427 return -EINVAL; 1428 1429 hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm); 1430 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) 1431 return -EINVAL; 1432 1433 rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL); 1434 if (!rss_new) 1435 return -ENOMEM; 1436 1437 if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds, 1438 symm)) { 1439 kfree(rss_new); 1440 return -EINVAL; 1441 } 1442 1443 spin_lock_bh(&adapter->adv_rss_lock); 1444 rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); 1445 if (rss_old) { 1446 if (rss_old->state != IAVF_ADV_RSS_ACTIVE) { 1447 err = -EBUSY; 1448 } else if (rss_old->hash_flds != hash_flds || 1449 rss_old->symm != symm) { 1450 rss_old->state = IAVF_ADV_RSS_ADD_REQUEST; 1451 rss_old->hash_flds = hash_flds; 1452 rss_old->symm = symm; 1453 memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, 1454 sizeof(rss_new->cfg_msg)); 1455 } else { 1456 err = -EEXIST; 1457 } 1458 } else { 1459 rss_new_add = true; 1460 rss_new->state = IAVF_ADV_RSS_ADD_REQUEST; 1461 rss_new->packet_hdrs = hdrs; 1462 rss_new->hash_flds = hash_flds; 1463 rss_new->symm = symm; 1464 list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); 1465 } 1466 spin_unlock_bh(&adapter->adv_rss_lock); 1467 1468 if (!err) 1469 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); 1470 1471 if (!rss_new_add) 1472 kfree(rss_new); 1473 1474 return err; 1475 } 1476 1477 static int 1478 iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd) 1479 { 1480 struct iavf_adapter *adapter = netdev_priv(netdev); 1481 struct iavf_adv_rss *rss; 1482 u64 hash_flds; 1483 u32 hdrs; 1484 1485 if (!ADV_RSS_SUPPORT(adapter)) 1486 return -EOPNOTSUPP; 1487 1488 cmd->data = 0; 1489 1490 hdrs = iavf_adv_rss_parse_hdrs(cmd); 1491 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE) 1492 return -EINVAL; 1493 1494 spin_lock_bh(&adapter->adv_rss_lock); 1495 rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); 1496 if (rss) 1497 hash_flds = rss->hash_flds; 1498 else 1499 hash_flds = IAVF_ADV_RSS_HASH_INVALID; 1500 spin_unlock_bh(&adapter->adv_rss_lock); 1501 1502 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID) 1503 return -EINVAL; 1504 1505 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA | 1506 IAVF_ADV_RSS_HASH_FLD_IPV6_SA)) 1507 cmd->data |= (u64)RXH_IP_SRC; 1508 1509 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA | 1510 IAVF_ADV_RSS_HASH_FLD_IPV6_DA)) 1511 cmd->data |= (u64)RXH_IP_DST; 1512 1513 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT | 1514 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT | 1515 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)) 1516 cmd->data |= (u64)RXH_L4_B_0_1; 1517 1518 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT | 1519 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT | 1520 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)) 1521 cmd->data |= (u64)RXH_L4_B_2_3; 1522 1523 return 0; 1524 } 1525 1526 /** 1527 * iavf_set_rxnfc - command to set Rx flow rules. 1528 * @netdev: network interface device structure 1529 * @cmd: ethtool rxnfc command 1530 * 1531 * Returns 0 for success and negative values for errors 1532 */ 1533 static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 1534 { 1535 struct iavf_adapter *adapter = netdev_priv(netdev); 1536 int ret = -EOPNOTSUPP; 1537 1538 switch (cmd->cmd) { 1539 case ETHTOOL_SRXCLSRLINS: 1540 ret = iavf_add_fdir_ethtool(adapter, cmd); 1541 break; 1542 case ETHTOOL_SRXCLSRLDEL: 1543 ret = iavf_del_fdir_ethtool(adapter, cmd); 1544 break; 1545 default: 1546 break; 1547 } 1548 1549 return ret; 1550 } 1551 1552 /** 1553 * iavf_get_rxnfc - command to get RX flow classification rules 1554 * @netdev: network interface device structure 1555 * @cmd: ethtool rxnfc command 1556 * @rule_locs: pointer to store rule locations 1557 * 1558 * Returns Success if the command is supported. 1559 **/ 1560 static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 1561 u32 *rule_locs) 1562 { 1563 struct iavf_adapter *adapter = netdev_priv(netdev); 1564 int ret = -EOPNOTSUPP; 1565 1566 switch (cmd->cmd) { 1567 case ETHTOOL_GRXRINGS: 1568 cmd->data = adapter->num_active_queues; 1569 ret = 0; 1570 break; 1571 case ETHTOOL_GRXCLSRLCNT: 1572 if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) 1573 break; 1574 spin_lock_bh(&adapter->fdir_fltr_lock); 1575 cmd->rule_cnt = adapter->fdir_active_fltr; 1576 spin_unlock_bh(&adapter->fdir_fltr_lock); 1577 cmd->data = IAVF_MAX_FDIR_FILTERS; 1578 ret = 0; 1579 break; 1580 case ETHTOOL_GRXCLSRULE: 1581 ret = iavf_get_ethtool_fdir_entry(adapter, cmd); 1582 break; 1583 case ETHTOOL_GRXCLSRLALL: 1584 ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); 1585 break; 1586 default: 1587 break; 1588 } 1589 1590 return ret; 1591 } 1592 /** 1593 * iavf_get_channels: get the number of channels supported by the device 1594 * @netdev: network interface device structure 1595 * @ch: channel information structure 1596 * 1597 * For the purposes of our device, we only use combined channels, i.e. a tx/rx 1598 * queue pair. Report one extra channel to match our "other" MSI-X vector. 1599 **/ 1600 static void iavf_get_channels(struct net_device *netdev, 1601 struct ethtool_channels *ch) 1602 { 1603 struct iavf_adapter *adapter = netdev_priv(netdev); 1604 1605 /* Report maximum channels */ 1606 ch->max_combined = adapter->vsi_res->num_queue_pairs; 1607 1608 ch->max_other = NONQ_VECS; 1609 ch->other_count = NONQ_VECS; 1610 1611 ch->combined_count = adapter->num_active_queues; 1612 } 1613 1614 /** 1615 * iavf_set_channels: set the new channel count 1616 * @netdev: network interface device structure 1617 * @ch: channel information structure 1618 * 1619 * Negotiate a new number of channels with the PF then do a reset. During 1620 * reset we'll realloc queues and fix the RSS table. Returns 0 on success, 1621 * negative on failure. 1622 **/ 1623 static int iavf_set_channels(struct net_device *netdev, 1624 struct ethtool_channels *ch) 1625 { 1626 struct iavf_adapter *adapter = netdev_priv(netdev); 1627 u32 num_req = ch->combined_count; 1628 int ret = 0; 1629 1630 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1631 adapter->num_tc) { 1632 dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n"); 1633 return -EINVAL; 1634 } 1635 1636 /* All of these should have already been checked by ethtool before this 1637 * even gets to us, but just to be sure. 1638 */ 1639 if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs) 1640 return -EINVAL; 1641 1642 if (num_req == adapter->num_active_queues) 1643 return 0; 1644 1645 if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) 1646 return -EINVAL; 1647 1648 adapter->num_req_queues = num_req; 1649 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1650 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 1651 1652 ret = iavf_wait_for_reset(adapter); 1653 if (ret) 1654 netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset"); 1655 1656 return ret; 1657 } 1658 1659 /** 1660 * iavf_get_rxfh_key_size - get the RSS hash key size 1661 * @netdev: network interface device structure 1662 * 1663 * Returns the table size. 1664 **/ 1665 static u32 iavf_get_rxfh_key_size(struct net_device *netdev) 1666 { 1667 struct iavf_adapter *adapter = netdev_priv(netdev); 1668 1669 return adapter->rss_key_size; 1670 } 1671 1672 /** 1673 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size 1674 * @netdev: network interface device structure 1675 * 1676 * Returns the table size. 1677 **/ 1678 static u32 iavf_get_rxfh_indir_size(struct net_device *netdev) 1679 { 1680 struct iavf_adapter *adapter = netdev_priv(netdev); 1681 1682 return adapter->rss_lut_size; 1683 } 1684 1685 /** 1686 * iavf_get_rxfh - get the rx flow hash indirection table 1687 * @netdev: network interface device structure 1688 * @rxfh: pointer to param struct (indir, key, hfunc) 1689 * 1690 * Reads the indirection table directly from the hardware. Always returns 0. 1691 **/ 1692 static int iavf_get_rxfh(struct net_device *netdev, 1693 struct ethtool_rxfh_param *rxfh) 1694 { 1695 struct iavf_adapter *adapter = netdev_priv(netdev); 1696 u16 i; 1697 1698 rxfh->hfunc = ETH_RSS_HASH_TOP; 1699 if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) 1700 rxfh->input_xfrm |= RXH_XFRM_SYM_XOR; 1701 1702 if (rxfh->key) 1703 memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size); 1704 1705 if (rxfh->indir) 1706 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 1707 for (i = 0; i < adapter->rss_lut_size; i++) 1708 rxfh->indir[i] = (u32)adapter->rss_lut[i]; 1709 1710 return 0; 1711 } 1712 1713 /** 1714 * iavf_set_rxfh - set the rx flow hash indirection table 1715 * @netdev: network interface device structure 1716 * @rxfh: pointer to param struct (indir, key, hfunc) 1717 * @extack: extended ACK from the Netlink message 1718 * 1719 * Returns -EINVAL if the table specifies an invalid queue id, otherwise 1720 * returns 0 after programming the table. 1721 **/ 1722 static int iavf_set_rxfh(struct net_device *netdev, 1723 struct ethtool_rxfh_param *rxfh, 1724 struct netlink_ext_ack *extack) 1725 { 1726 struct iavf_adapter *adapter = netdev_priv(netdev); 1727 u16 i; 1728 1729 /* Only support toeplitz hash function */ 1730 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 1731 rxfh->hfunc != ETH_RSS_HASH_TOP) 1732 return -EOPNOTSUPP; 1733 1734 if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && 1735 adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) { 1736 if (!ADV_RSS_SUPPORT(adapter)) 1737 return -EOPNOTSUPP; 1738 adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC; 1739 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; 1740 } else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) && 1741 adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) { 1742 adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC; 1743 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC; 1744 } 1745 1746 if (!rxfh->key && !rxfh->indir) 1747 return 0; 1748 1749 if (rxfh->key) 1750 memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size); 1751 1752 if (rxfh->indir) { 1753 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 1754 for (i = 0; i < adapter->rss_lut_size; i++) 1755 adapter->rss_lut[i] = (u8)(rxfh->indir[i]); 1756 } 1757 1758 return iavf_config_rss(adapter); 1759 } 1760 1761 static const struct ethtool_ops iavf_ethtool_ops = { 1762 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 1763 ETHTOOL_COALESCE_USE_ADAPTIVE, 1764 .supported_input_xfrm = RXH_XFRM_SYM_XOR, 1765 .get_drvinfo = iavf_get_drvinfo, 1766 .get_link = ethtool_op_get_link, 1767 .get_ringparam = iavf_get_ringparam, 1768 .set_ringparam = iavf_set_ringparam, 1769 .get_strings = iavf_get_strings, 1770 .get_ethtool_stats = iavf_get_ethtool_stats, 1771 .get_sset_count = iavf_get_sset_count, 1772 .get_msglevel = iavf_get_msglevel, 1773 .set_msglevel = iavf_set_msglevel, 1774 .get_coalesce = iavf_get_coalesce, 1775 .set_coalesce = iavf_set_coalesce, 1776 .get_per_queue_coalesce = iavf_get_per_queue_coalesce, 1777 .set_per_queue_coalesce = iavf_set_per_queue_coalesce, 1778 .set_rxnfc = iavf_set_rxnfc, 1779 .get_rxnfc = iavf_get_rxnfc, 1780 .get_rxfh_indir_size = iavf_get_rxfh_indir_size, 1781 .get_rxfh = iavf_get_rxfh, 1782 .set_rxfh = iavf_set_rxfh, 1783 .get_rxfh_fields = iavf_get_rxfh_fields, 1784 .set_rxfh_fields = iavf_set_rxfh_fields, 1785 .get_channels = iavf_get_channels, 1786 .set_channels = iavf_set_channels, 1787 .get_rxfh_key_size = iavf_get_rxfh_key_size, 1788 .get_link_ksettings = iavf_get_link_ksettings, 1789 }; 1790 1791 /** 1792 * iavf_set_ethtool_ops - Initialize ethtool ops struct 1793 * @netdev: network interface device structure 1794 * 1795 * Sets ethtool ops struct in our netdev so that ethtool can call 1796 * our functions. 1797 **/ 1798 void iavf_set_ethtool_ops(struct net_device *netdev) 1799 { 1800 netdev->ethtool_ops = &iavf_ethtool_ops; 1801 } 1802