1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/pci.h> 5 #include <linux/phy.h> 6 #include <linux/ethtool.h> 7 8 #include "wx_type.h" 9 #include "wx_ethtool.h" 10 #include "wx_hw.h" 11 #include "wx_lib.h" 12 13 struct wx_stats { 14 char stat_string[ETH_GSTRING_LEN]; 15 size_t sizeof_stat; 16 off_t stat_offset; 17 }; 18 19 #define WX_STAT(str, m) { \ 20 .stat_string = str, \ 21 .sizeof_stat = sizeof(((struct wx *)0)->m), \ 22 .stat_offset = offsetof(struct wx, m) } 23 24 static const struct wx_stats wx_gstrings_stats[] = { 25 WX_STAT("rx_dma_pkts", stats.gprc), 26 WX_STAT("tx_dma_pkts", stats.gptc), 27 WX_STAT("rx_dma_bytes", stats.gorc), 28 WX_STAT("tx_dma_bytes", stats.gotc), 29 WX_STAT("rx_total_pkts", stats.tpr), 30 WX_STAT("tx_total_pkts", stats.tpt), 31 WX_STAT("rx_long_length_count", stats.roc), 32 WX_STAT("rx_short_length_count", stats.ruc), 33 WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), 34 WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), 35 WX_STAT("os2bmc_tx_by_host", stats.o2bspc), 36 WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), 37 WX_STAT("rx_no_dma_resources", stats.rdmdrop), 38 WX_STAT("tx_busy", tx_busy), 39 WX_STAT("non_eop_descs", non_eop_descs), 40 WX_STAT("tx_restart_queue", restart_queue), 41 WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), 42 WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), 43 WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), 44 WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), 45 WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), 46 WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 47 }; 48 49 static const struct wx_stats wx_gstrings_fdir_stats[] = { 50 WX_STAT("fdir_match", stats.fdirmatch), 51 WX_STAT("fdir_miss", stats.fdirmiss), 52 }; 53 54 static const struct wx_stats wx_gstrings_rsc_stats[] = { 55 WX_STAT("rsc_aggregated", rsc_count), 56 WX_STAT("rsc_flushed", rsc_flush), 57 }; 58 59 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so 60 * we set the num_rx_queues to evaluate to num_tx_queues. This is 61 * used because we do not have a good way to get the max number of 62 * rx queues with CONFIG_RPS disabled. 63 */ 64 #define WX_NUM_RX_QUEUES netdev->num_tx_queues 65 #define WX_NUM_TX_QUEUES netdev->num_tx_queues 66 67 #define WX_QUEUE_STATS_LEN ( \ 68 (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ 69 (sizeof(struct wx_queue_stats) / sizeof(u64))) 70 #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) 71 #define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) 72 #define WX_RSC_STATS_LEN ARRAY_SIZE(wx_gstrings_rsc_stats) 73 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) 74 75 int wx_get_sset_count(struct net_device *netdev, int sset) 76 { 77 struct wx *wx = netdev_priv(netdev); 78 int len = WX_STATS_LEN; 79 80 switch (sset) { 81 case ETH_SS_STATS: 82 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 83 len += WX_FDIR_STATS_LEN; 84 if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) 85 len += WX_RSC_STATS_LEN; 86 return len; 87 default: 88 return -EOPNOTSUPP; 89 } 90 } 91 EXPORT_SYMBOL(wx_get_sset_count); 92 93 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 94 { 95 struct wx *wx = netdev_priv(netdev); 96 u8 *p = data; 97 int i; 98 99 switch (stringset) { 100 case ETH_SS_STATS: 101 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) 102 ethtool_puts(&p, wx_gstrings_stats[i].stat_string); 103 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { 104 for (i = 0; i < WX_FDIR_STATS_LEN; i++) 105 ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); 106 } 107 if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { 108 for (i = 0; i < WX_RSC_STATS_LEN; i++) 109 ethtool_puts(&p, wx_gstrings_rsc_stats[i].stat_string); 110 } 111 for (i = 0; i < netdev->num_tx_queues; i++) { 112 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 113 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 114 } 115 for (i = 0; i < WX_NUM_RX_QUEUES; i++) { 116 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 117 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 118 } 119 break; 120 } 121 } 122 EXPORT_SYMBOL(wx_get_strings); 123 124 void wx_get_ethtool_stats(struct net_device *netdev, 125 struct ethtool_stats *stats, u64 *data) 126 { 127 struct wx *wx = netdev_priv(netdev); 128 struct wx_ring *ring; 129 unsigned int start; 130 int i, j, k; 131 char *p; 132 133 wx_update_stats(wx); 134 135 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { 136 p = (char *)wx + wx_gstrings_stats[i].stat_offset; 137 data[i] = (wx_gstrings_stats[i].sizeof_stat == 138 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 139 } 140 141 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { 142 for (k = 0; k < WX_FDIR_STATS_LEN; k++) { 143 p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; 144 data[i++] = *(u64 *)p; 145 } 146 } 147 148 if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { 149 for (k = 0; k < WX_RSC_STATS_LEN; k++) { 150 p = (char *)wx + wx_gstrings_rsc_stats[k].stat_offset; 151 data[i++] = *(u64 *)p; 152 } 153 } 154 155 for (j = 0; j < netdev->num_tx_queues; j++) { 156 ring = wx->tx_ring[j]; 157 if (!ring) { 158 data[i++] = 0; 159 data[i++] = 0; 160 continue; 161 } 162 163 do { 164 start = u64_stats_fetch_begin(&ring->syncp); 165 data[i] = ring->stats.packets; 166 data[i + 1] = ring->stats.bytes; 167 } while (u64_stats_fetch_retry(&ring->syncp, start)); 168 i += 2; 169 } 170 for (j = 0; j < WX_NUM_RX_QUEUES; j++) { 171 ring = wx->rx_ring[j]; 172 if (!ring) { 173 data[i++] = 0; 174 data[i++] = 0; 175 continue; 176 } 177 178 do { 179 start = u64_stats_fetch_begin(&ring->syncp); 180 data[i] = ring->stats.packets; 181 data[i + 1] = ring->stats.bytes; 182 } while (u64_stats_fetch_retry(&ring->syncp, start)); 183 i += 2; 184 } 185 } 186 EXPORT_SYMBOL(wx_get_ethtool_stats); 187 188 void wx_get_mac_stats(struct net_device *netdev, 189 struct ethtool_eth_mac_stats *mac_stats) 190 { 191 struct wx *wx = netdev_priv(netdev); 192 struct wx_hw_stats *hwstats; 193 194 wx_update_stats(wx); 195 196 hwstats = &wx->stats; 197 mac_stats->MulticastFramesXmittedOK = hwstats->mptc; 198 mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; 199 mac_stats->MulticastFramesReceivedOK = hwstats->mprc; 200 mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; 201 } 202 EXPORT_SYMBOL(wx_get_mac_stats); 203 204 void wx_get_pause_stats(struct net_device *netdev, 205 struct ethtool_pause_stats *stats) 206 { 207 struct wx *wx = netdev_priv(netdev); 208 struct wx_hw_stats *hwstats; 209 210 wx_update_stats(wx); 211 212 hwstats = &wx->stats; 213 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; 214 stats->rx_pause_frames = hwstats->lxonoffrxc; 215 } 216 EXPORT_SYMBOL(wx_get_pause_stats); 217 218 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) 219 { 220 unsigned int stats_len = WX_STATS_LEN; 221 struct wx *wx = netdev_priv(netdev); 222 223 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 224 stats_len += WX_FDIR_STATS_LEN; 225 226 strscpy(info->driver, wx->driver_name, sizeof(info->driver)); 227 strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); 228 strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); 229 if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { 230 info->n_stats = stats_len - 231 (WX_NUM_TX_QUEUES - wx->num_tx_queues) * 232 (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; 233 } else { 234 info->n_stats = stats_len; 235 } 236 } 237 EXPORT_SYMBOL(wx_get_drvinfo); 238 239 int wx_nway_reset(struct net_device *netdev) 240 { 241 struct wx *wx = netdev_priv(netdev); 242 243 return phylink_ethtool_nway_reset(wx->phylink); 244 } 245 EXPORT_SYMBOL(wx_nway_reset); 246 247 int wx_get_link_ksettings(struct net_device *netdev, 248 struct ethtool_link_ksettings *cmd) 249 { 250 struct wx *wx = netdev_priv(netdev); 251 252 return phylink_ethtool_ksettings_get(wx->phylink, cmd); 253 } 254 EXPORT_SYMBOL(wx_get_link_ksettings); 255 256 int wx_set_link_ksettings(struct net_device *netdev, 257 const struct ethtool_link_ksettings *cmd) 258 { 259 struct wx *wx = netdev_priv(netdev); 260 261 return phylink_ethtool_ksettings_set(wx->phylink, cmd); 262 } 263 EXPORT_SYMBOL(wx_set_link_ksettings); 264 265 void wx_get_pauseparam(struct net_device *netdev, 266 struct ethtool_pauseparam *pause) 267 { 268 struct wx *wx = netdev_priv(netdev); 269 270 phylink_ethtool_get_pauseparam(wx->phylink, pause); 271 } 272 EXPORT_SYMBOL(wx_get_pauseparam); 273 274 int wx_set_pauseparam(struct net_device *netdev, 275 struct ethtool_pauseparam *pause) 276 { 277 struct wx *wx = netdev_priv(netdev); 278 279 return phylink_ethtool_set_pauseparam(wx->phylink, pause); 280 } 281 EXPORT_SYMBOL(wx_set_pauseparam); 282 283 void wx_get_ringparam(struct net_device *netdev, 284 struct ethtool_ringparam *ring, 285 struct kernel_ethtool_ringparam *kernel_ring, 286 struct netlink_ext_ack *extack) 287 { 288 struct wx *wx = netdev_priv(netdev); 289 290 ring->rx_max_pending = WX_MAX_RXD; 291 ring->tx_max_pending = WX_MAX_TXD; 292 ring->rx_mini_max_pending = 0; 293 ring->rx_jumbo_max_pending = 0; 294 ring->rx_pending = wx->rx_ring_count; 295 ring->tx_pending = wx->tx_ring_count; 296 ring->rx_mini_pending = 0; 297 ring->rx_jumbo_pending = 0; 298 } 299 EXPORT_SYMBOL(wx_get_ringparam); 300 301 int wx_get_coalesce(struct net_device *netdev, 302 struct ethtool_coalesce *ec, 303 struct kernel_ethtool_coalesce *kernel_coal, 304 struct netlink_ext_ack *extack) 305 { 306 struct wx *wx = netdev_priv(netdev); 307 308 ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; 309 /* only valid if in constant ITR mode */ 310 if (wx->rx_itr_setting <= 1) 311 ec->rx_coalesce_usecs = wx->rx_itr_setting; 312 else 313 ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; 314 315 if (wx->adaptive_itr) { 316 ec->use_adaptive_rx_coalesce = 1; 317 ec->use_adaptive_tx_coalesce = 1; 318 } 319 320 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 321 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) 322 return 0; 323 324 /* only valid if in constant ITR mode */ 325 if (wx->tx_itr_setting <= 1) 326 ec->tx_coalesce_usecs = wx->tx_itr_setting; 327 else 328 ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; 329 330 return 0; 331 } 332 EXPORT_SYMBOL(wx_get_coalesce); 333 334 static void wx_update_rsc(struct wx *wx) 335 { 336 struct net_device *netdev = wx->netdev; 337 bool need_reset = false; 338 339 /* nothing to do if LRO or RSC are not enabled */ 340 if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags) || 341 !(netdev->features & NETIF_F_LRO)) 342 return; 343 344 /* check the feature flag value and enable RSC if necessary */ 345 if (wx->rx_itr_setting == 1 || 346 wx->rx_itr_setting > WX_MIN_RSC_ITR) { 347 if (!test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { 348 set_bit(WX_FLAG_RSC_ENABLED, wx->flags); 349 dev_info(&wx->pdev->dev, 350 "rx-usecs value high enough to re-enable RSC\n"); 351 352 need_reset = true; 353 } 354 /* if interrupt rate is too high then disable RSC */ 355 } else if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) { 356 clear_bit(WX_FLAG_RSC_ENABLED, wx->flags); 357 dev_info(&wx->pdev->dev, 358 "rx-usecs set too low, disabling RSC\n"); 359 360 need_reset = true; 361 } 362 363 /* reset the device to apply the new RSC setting */ 364 if (need_reset && wx->do_reset) 365 wx->do_reset(netdev); 366 } 367 368 int wx_set_coalesce(struct net_device *netdev, 369 struct ethtool_coalesce *ec, 370 struct kernel_ethtool_coalesce *kernel_coal, 371 struct netlink_ext_ack *extack) 372 { 373 struct wx *wx = netdev_priv(netdev); 374 u16 tx_itr_param, rx_itr_param; 375 struct wx_q_vector *q_vector; 376 u16 max_eitr; 377 int i; 378 379 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { 380 /* reject Tx specific changes in case of mixed RxTx vectors */ 381 if (ec->tx_coalesce_usecs) 382 return -EOPNOTSUPP; 383 } 384 385 if (ec->tx_max_coalesced_frames_irq > U16_MAX || 386 !ec->tx_max_coalesced_frames_irq) 387 return -EINVAL; 388 389 wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; 390 391 switch (wx->mac.type) { 392 case wx_mac_sp: 393 max_eitr = WX_SP_MAX_EITR; 394 rx_itr_param = WX_20K_ITR; 395 tx_itr_param = WX_12K_ITR; 396 break; 397 case wx_mac_aml: 398 case wx_mac_aml40: 399 max_eitr = WX_AML_MAX_EITR; 400 rx_itr_param = WX_20K_ITR; 401 tx_itr_param = WX_12K_ITR; 402 break; 403 default: 404 max_eitr = WX_EM_MAX_EITR; 405 rx_itr_param = WX_7K_ITR; 406 tx_itr_param = WX_7K_ITR; 407 break; 408 } 409 410 if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || 411 (ec->tx_coalesce_usecs > (max_eitr >> 2))) 412 return -EINVAL; 413 414 if (ec->use_adaptive_rx_coalesce) { 415 wx->adaptive_itr = true; 416 wx->rx_itr_setting = 1; 417 wx->tx_itr_setting = 1; 418 return 0; 419 } 420 421 if (ec->rx_coalesce_usecs > 1) 422 wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; 423 else 424 wx->rx_itr_setting = ec->rx_coalesce_usecs; 425 426 if (ec->tx_coalesce_usecs > 1) 427 wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; 428 else 429 wx->tx_itr_setting = ec->tx_coalesce_usecs; 430 431 if (wx->adaptive_itr) { 432 wx->adaptive_itr = false; 433 wx->rx_itr_setting = rx_itr_param; 434 wx->tx_itr_setting = tx_itr_param; 435 } else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) { 436 wx->adaptive_itr = true; 437 } 438 439 if (wx->rx_itr_setting != 1) 440 rx_itr_param = wx->rx_itr_setting; 441 442 if (wx->tx_itr_setting != 1) 443 tx_itr_param = wx->tx_itr_setting; 444 445 /* mixed Rx/Tx */ 446 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) 447 wx->tx_itr_setting = wx->rx_itr_setting; 448 449 for (i = 0; i < wx->num_q_vectors; i++) { 450 q_vector = wx->q_vector[i]; 451 if (q_vector->tx.count && !q_vector->rx.count) 452 /* tx only */ 453 q_vector->itr = tx_itr_param; 454 else 455 /* rx only or mixed */ 456 q_vector->itr = rx_itr_param; 457 wx_write_eitr(q_vector); 458 } 459 460 wx_update_rsc(wx); 461 462 return 0; 463 } 464 EXPORT_SYMBOL(wx_set_coalesce); 465 466 static unsigned int wx_max_channels(struct wx *wx) 467 { 468 unsigned int max_combined; 469 470 if (!wx->msix_q_entries) { 471 /* We only support one q_vector without MSI-X */ 472 max_combined = 1; 473 } else { 474 /* support up to max allowed queues with RSS */ 475 if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) 476 max_combined = 63; 477 else 478 max_combined = 8; 479 } 480 481 return max_combined; 482 } 483 484 void wx_get_channels(struct net_device *dev, 485 struct ethtool_channels *ch) 486 { 487 struct wx *wx = netdev_priv(dev); 488 489 /* report maximum channels */ 490 ch->max_combined = wx_max_channels(wx); 491 492 /* report info for other vector */ 493 if (wx->msix_q_entries) { 494 ch->max_other = 1; 495 ch->other_count = 1; 496 } 497 498 /* record RSS queues */ 499 ch->combined_count = wx->ring_feature[RING_F_RSS].indices; 500 501 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 502 ch->combined_count = wx->ring_feature[RING_F_FDIR].indices; 503 } 504 EXPORT_SYMBOL(wx_get_channels); 505 506 int wx_set_channels(struct net_device *dev, 507 struct ethtool_channels *ch) 508 { 509 unsigned int count = ch->combined_count; 510 struct wx *wx = netdev_priv(dev); 511 512 /* verify other_count has not changed */ 513 if (ch->other_count != 1) 514 return -EINVAL; 515 516 /* verify the number of channels does not exceed hardware limits */ 517 if (count > wx_max_channels(wx)) 518 return -EINVAL; 519 520 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 521 wx->ring_feature[RING_F_FDIR].limit = count; 522 523 wx->ring_feature[RING_F_RSS].limit = count; 524 525 return 0; 526 } 527 EXPORT_SYMBOL(wx_set_channels); 528 529 u32 wx_rss_indir_size(struct net_device *netdev) 530 { 531 struct wx *wx = netdev_priv(netdev); 532 533 return wx_rss_indir_tbl_entries(wx); 534 } 535 EXPORT_SYMBOL(wx_rss_indir_size); 536 537 u32 wx_get_rxfh_key_size(struct net_device *netdev) 538 { 539 return WX_RSS_KEY_SIZE; 540 } 541 EXPORT_SYMBOL(wx_get_rxfh_key_size); 542 543 static void wx_get_reta(struct wx *wx, u32 *indir) 544 { 545 u32 reta_size = wx_rss_indir_tbl_entries(wx); 546 u16 rss_m = wx->ring_feature[RING_F_RSS].mask; 547 548 if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) 549 rss_m = wx->ring_feature[RING_F_RSS].indices - 1; 550 551 for (u32 i = 0; i < reta_size; i++) 552 indir[i] = wx->rss_indir_tbl[i] & rss_m; 553 } 554 555 int wx_get_rxfh(struct net_device *netdev, 556 struct ethtool_rxfh_param *rxfh) 557 { 558 struct wx *wx = netdev_priv(netdev); 559 560 rxfh->hfunc = ETH_RSS_HASH_TOP; 561 562 if (rxfh->indir) 563 wx_get_reta(wx, rxfh->indir); 564 565 if (rxfh->key) 566 memcpy(rxfh->key, wx->rss_key, WX_RSS_KEY_SIZE); 567 568 return 0; 569 } 570 EXPORT_SYMBOL(wx_get_rxfh); 571 572 int wx_set_rxfh(struct net_device *netdev, 573 struct ethtool_rxfh_param *rxfh, 574 struct netlink_ext_ack *extack) 575 { 576 struct wx *wx = netdev_priv(netdev); 577 u32 reta_entries, i; 578 579 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && 580 rxfh->hfunc != ETH_RSS_HASH_TOP) 581 return -EOPNOTSUPP; 582 583 reta_entries = wx_rss_indir_tbl_entries(wx); 584 /* Fill out the redirection table */ 585 if (rxfh->indir) { 586 for (i = 0; i < reta_entries; i++) 587 wx->rss_indir_tbl[i] = rxfh->indir[i]; 588 589 wx_store_reta(wx); 590 } 591 592 /* Fill out the rss hash key */ 593 if (rxfh->key) { 594 memcpy(wx->rss_key, rxfh->key, WX_RSS_KEY_SIZE); 595 wx_store_rsskey(wx); 596 } 597 598 return 0; 599 } 600 EXPORT_SYMBOL(wx_set_rxfh); 601 602 static const struct wx_rss_flow_map rss_flow_table[] = { 603 { TCP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_TCP }, 604 { TCP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_TCP }, 605 { UDP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_UDP }, 606 { UDP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_UDP }, 607 { SCTP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_SCTP }, 608 { SCTP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_SCTP }, 609 }; 610 611 int wx_get_rxfh_fields(struct net_device *dev, 612 struct ethtool_rxfh_fields *nfc) 613 { 614 struct wx *wx = netdev_priv(dev); 615 616 nfc->data = RXH_IP_SRC | RXH_IP_DST; 617 618 for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) { 619 const struct wx_rss_flow_map *entry = &rss_flow_table[i]; 620 621 if (entry->flow_type == nfc->flow_type) { 622 if (wx->rss_flags & entry->flag) 623 nfc->data |= entry->data; 624 break; 625 } 626 } 627 628 return 0; 629 } 630 EXPORT_SYMBOL(wx_get_rxfh_fields); 631 632 int wx_set_rxfh_fields(struct net_device *dev, 633 const struct ethtool_rxfh_fields *nfc, 634 struct netlink_ext_ack *extack) 635 { 636 struct wx *wx = netdev_priv(dev); 637 u8 flags = wx->rss_flags; 638 639 if (!(nfc->data & RXH_IP_SRC) || 640 !(nfc->data & RXH_IP_DST)) 641 return -EINVAL; 642 643 for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) { 644 const struct wx_rss_flow_map *entry = &rss_flow_table[i]; 645 646 if (entry->flow_type == nfc->flow_type) { 647 if (nfc->data & entry->data) 648 flags |= entry->flag; 649 else 650 flags &= ~entry->flag; 651 652 if (flags != wx->rss_flags) { 653 wx->rss_flags = flags; 654 wx_config_rss_field(wx); 655 } 656 657 return 0; 658 } 659 } 660 661 return -EINVAL; 662 } 663 EXPORT_SYMBOL(wx_set_rxfh_fields); 664 665 u32 wx_get_msglevel(struct net_device *netdev) 666 { 667 struct wx *wx = netdev_priv(netdev); 668 669 return wx->msg_enable; 670 } 671 EXPORT_SYMBOL(wx_get_msglevel); 672 673 void wx_set_msglevel(struct net_device *netdev, u32 data) 674 { 675 struct wx *wx = netdev_priv(netdev); 676 677 wx->msg_enable = data; 678 } 679 EXPORT_SYMBOL(wx_set_msglevel); 680 681 int wx_get_ts_info(struct net_device *dev, 682 struct kernel_ethtool_ts_info *info) 683 { 684 struct wx *wx = netdev_priv(dev); 685 686 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 687 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 688 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 689 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 690 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 691 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 692 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 693 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 694 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 695 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 696 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 697 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 698 699 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 700 SOF_TIMESTAMPING_TX_HARDWARE | 701 SOF_TIMESTAMPING_RX_HARDWARE | 702 SOF_TIMESTAMPING_RAW_HARDWARE; 703 704 if (wx->ptp_clock) 705 info->phc_index = ptp_clock_index(wx->ptp_clock); 706 else 707 info->phc_index = -1; 708 709 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 710 BIT(HWTSTAMP_TX_ON); 711 712 return 0; 713 } 714 EXPORT_SYMBOL(wx_get_ts_info); 715 716 void wx_get_ptp_stats(struct net_device *dev, 717 struct ethtool_ts_stats *ts_stats) 718 { 719 struct wx *wx = netdev_priv(dev); 720 721 if (wx->ptp_clock) { 722 ts_stats->pkts = wx->tx_hwtstamp_pkts; 723 ts_stats->lost = wx->tx_hwtstamp_timeouts + 724 wx->tx_hwtstamp_skipped + 725 wx->rx_hwtstamp_cleared; 726 ts_stats->err = wx->tx_hwtstamp_errors; 727 } 728 } 729 EXPORT_SYMBOL(wx_get_ptp_stats); 730 731 static int wx_get_link_ksettings_vf(struct net_device *netdev, 732 struct ethtool_link_ksettings *cmd) 733 { 734 struct wx *wx = netdev_priv(netdev); 735 736 ethtool_link_ksettings_zero_link_mode(cmd, supported); 737 cmd->base.autoneg = AUTONEG_DISABLE; 738 cmd->base.port = PORT_NONE; 739 cmd->base.duplex = DUPLEX_FULL; 740 cmd->base.speed = wx->speed; 741 742 return 0; 743 } 744 745 static const struct ethtool_ops wx_ethtool_ops_vf = { 746 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 747 ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ | 748 ETHTOOL_COALESCE_USE_ADAPTIVE, 749 .get_drvinfo = wx_get_drvinfo, 750 .get_link = ethtool_op_get_link, 751 .get_ringparam = wx_get_ringparam, 752 .get_msglevel = wx_get_msglevel, 753 .get_coalesce = wx_get_coalesce, 754 .get_ts_info = ethtool_op_get_ts_info, 755 .get_link_ksettings = wx_get_link_ksettings_vf, 756 }; 757 758 void wx_set_ethtool_ops_vf(struct net_device *netdev) 759 { 760 netdev->ethtool_ops = &wx_ethtool_ops_vf; 761 } 762 EXPORT_SYMBOL(wx_set_ethtool_ops_vf); 763