1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/pci.h> 5 #include <linux/phy.h> 6 #include <linux/ethtool.h> 7 8 #include "wx_type.h" 9 #include "wx_ethtool.h" 10 #include "wx_hw.h" 11 #include "wx_lib.h" 12 13 struct wx_stats { 14 char stat_string[ETH_GSTRING_LEN]; 15 size_t sizeof_stat; 16 off_t stat_offset; 17 }; 18 19 #define WX_STAT(str, m) { \ 20 .stat_string = str, \ 21 .sizeof_stat = sizeof(((struct wx *)0)->m), \ 22 .stat_offset = offsetof(struct wx, m) } 23 24 static const struct wx_stats wx_gstrings_stats[] = { 25 WX_STAT("rx_dma_pkts", stats.gprc), 26 WX_STAT("tx_dma_pkts", stats.gptc), 27 WX_STAT("rx_dma_bytes", stats.gorc), 28 WX_STAT("tx_dma_bytes", stats.gotc), 29 WX_STAT("rx_total_pkts", stats.tpr), 30 WX_STAT("tx_total_pkts", stats.tpt), 31 WX_STAT("rx_long_length_count", stats.roc), 32 WX_STAT("rx_short_length_count", stats.ruc), 33 WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), 34 WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), 35 WX_STAT("os2bmc_tx_by_host", stats.o2bspc), 36 WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), 37 WX_STAT("rx_no_dma_resources", stats.rdmdrop), 38 WX_STAT("tx_busy", tx_busy), 39 WX_STAT("non_eop_descs", non_eop_descs), 40 WX_STAT("tx_restart_queue", restart_queue), 41 WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), 42 WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), 43 WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), 44 WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), 45 WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), 46 WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 47 }; 48 49 static const struct wx_stats wx_gstrings_fdir_stats[] = { 50 WX_STAT("fdir_match", stats.fdirmatch), 51 WX_STAT("fdir_miss", stats.fdirmiss), 52 }; 53 54 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so 55 * we set the num_rx_queues to evaluate to num_tx_queues. This is 56 * used because we do not have a good way to get the max number of 57 * rx queues with CONFIG_RPS disabled. 58 */ 59 #define WX_NUM_RX_QUEUES netdev->num_tx_queues 60 #define WX_NUM_TX_QUEUES netdev->num_tx_queues 61 62 #define WX_QUEUE_STATS_LEN ( \ 63 (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ 64 (sizeof(struct wx_queue_stats) / sizeof(u64))) 65 #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) 66 #define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) 67 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) 68 69 int wx_get_sset_count(struct net_device *netdev, int sset) 70 { 71 struct wx *wx = netdev_priv(netdev); 72 73 switch (sset) { 74 case ETH_SS_STATS: 75 return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ? 76 WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; 77 default: 78 return -EOPNOTSUPP; 79 } 80 } 81 EXPORT_SYMBOL(wx_get_sset_count); 82 83 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 84 { 85 struct wx *wx = netdev_priv(netdev); 86 u8 *p = data; 87 int i; 88 89 switch (stringset) { 90 case ETH_SS_STATS: 91 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) 92 ethtool_puts(&p, wx_gstrings_stats[i].stat_string); 93 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { 94 for (i = 0; i < WX_FDIR_STATS_LEN; i++) 95 ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); 96 } 97 for (i = 0; i < netdev->num_tx_queues; i++) { 98 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 99 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 100 } 101 for (i = 0; i < WX_NUM_RX_QUEUES; i++) { 102 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 103 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 104 } 105 break; 106 } 107 } 108 EXPORT_SYMBOL(wx_get_strings); 109 110 void wx_get_ethtool_stats(struct net_device *netdev, 111 struct ethtool_stats *stats, u64 *data) 112 { 113 struct wx *wx = netdev_priv(netdev); 114 struct wx_ring *ring; 115 unsigned int start; 116 int i, j, k; 117 char *p; 118 119 wx_update_stats(wx); 120 121 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { 122 p = (char *)wx + wx_gstrings_stats[i].stat_offset; 123 data[i] = (wx_gstrings_stats[i].sizeof_stat == 124 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 125 } 126 127 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { 128 for (k = 0; k < WX_FDIR_STATS_LEN; k++) { 129 p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; 130 data[i++] = *(u64 *)p; 131 } 132 } 133 134 for (j = 0; j < netdev->num_tx_queues; j++) { 135 ring = wx->tx_ring[j]; 136 if (!ring) { 137 data[i++] = 0; 138 data[i++] = 0; 139 continue; 140 } 141 142 do { 143 start = u64_stats_fetch_begin(&ring->syncp); 144 data[i] = ring->stats.packets; 145 data[i + 1] = ring->stats.bytes; 146 } while (u64_stats_fetch_retry(&ring->syncp, start)); 147 i += 2; 148 } 149 for (j = 0; j < WX_NUM_RX_QUEUES; j++) { 150 ring = wx->rx_ring[j]; 151 if (!ring) { 152 data[i++] = 0; 153 data[i++] = 0; 154 continue; 155 } 156 157 do { 158 start = u64_stats_fetch_begin(&ring->syncp); 159 data[i] = ring->stats.packets; 160 data[i + 1] = ring->stats.bytes; 161 } while (u64_stats_fetch_retry(&ring->syncp, start)); 162 i += 2; 163 } 164 } 165 EXPORT_SYMBOL(wx_get_ethtool_stats); 166 167 void wx_get_mac_stats(struct net_device *netdev, 168 struct ethtool_eth_mac_stats *mac_stats) 169 { 170 struct wx *wx = netdev_priv(netdev); 171 struct wx_hw_stats *hwstats; 172 173 wx_update_stats(wx); 174 175 hwstats = &wx->stats; 176 mac_stats->MulticastFramesXmittedOK = hwstats->mptc; 177 mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; 178 mac_stats->MulticastFramesReceivedOK = hwstats->mprc; 179 mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; 180 } 181 EXPORT_SYMBOL(wx_get_mac_stats); 182 183 void wx_get_pause_stats(struct net_device *netdev, 184 struct ethtool_pause_stats *stats) 185 { 186 struct wx *wx = netdev_priv(netdev); 187 struct wx_hw_stats *hwstats; 188 189 wx_update_stats(wx); 190 191 hwstats = &wx->stats; 192 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; 193 stats->rx_pause_frames = hwstats->lxonoffrxc; 194 } 195 EXPORT_SYMBOL(wx_get_pause_stats); 196 197 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) 198 { 199 unsigned int stats_len = WX_STATS_LEN; 200 struct wx *wx = netdev_priv(netdev); 201 202 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 203 stats_len += WX_FDIR_STATS_LEN; 204 205 strscpy(info->driver, wx->driver_name, sizeof(info->driver)); 206 strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); 207 strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); 208 if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { 209 info->n_stats = stats_len - 210 (WX_NUM_TX_QUEUES - wx->num_tx_queues) * 211 (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; 212 } else { 213 info->n_stats = stats_len; 214 } 215 } 216 EXPORT_SYMBOL(wx_get_drvinfo); 217 218 int wx_nway_reset(struct net_device *netdev) 219 { 220 struct wx *wx = netdev_priv(netdev); 221 222 if (wx->mac.type == wx_mac_aml40) 223 return -EOPNOTSUPP; 224 225 return phylink_ethtool_nway_reset(wx->phylink); 226 } 227 EXPORT_SYMBOL(wx_nway_reset); 228 229 int wx_get_link_ksettings(struct net_device *netdev, 230 struct ethtool_link_ksettings *cmd) 231 { 232 struct wx *wx = netdev_priv(netdev); 233 234 return phylink_ethtool_ksettings_get(wx->phylink, cmd); 235 } 236 EXPORT_SYMBOL(wx_get_link_ksettings); 237 238 int wx_set_link_ksettings(struct net_device *netdev, 239 const struct ethtool_link_ksettings *cmd) 240 { 241 struct wx *wx = netdev_priv(netdev); 242 243 if (wx->mac.type == wx_mac_aml40) 244 return -EOPNOTSUPP; 245 246 return phylink_ethtool_ksettings_set(wx->phylink, cmd); 247 } 248 EXPORT_SYMBOL(wx_set_link_ksettings); 249 250 void wx_get_pauseparam(struct net_device *netdev, 251 struct ethtool_pauseparam *pause) 252 { 253 struct wx *wx = netdev_priv(netdev); 254 255 if (wx->mac.type == wx_mac_aml40) 256 return; 257 258 phylink_ethtool_get_pauseparam(wx->phylink, pause); 259 } 260 EXPORT_SYMBOL(wx_get_pauseparam); 261 262 int wx_set_pauseparam(struct net_device *netdev, 263 struct ethtool_pauseparam *pause) 264 { 265 struct wx *wx = netdev_priv(netdev); 266 267 if (wx->mac.type == wx_mac_aml40) 268 return -EOPNOTSUPP; 269 270 return phylink_ethtool_set_pauseparam(wx->phylink, pause); 271 } 272 EXPORT_SYMBOL(wx_set_pauseparam); 273 274 void wx_get_ringparam(struct net_device *netdev, 275 struct ethtool_ringparam *ring, 276 struct kernel_ethtool_ringparam *kernel_ring, 277 struct netlink_ext_ack *extack) 278 { 279 struct wx *wx = netdev_priv(netdev); 280 281 ring->rx_max_pending = WX_MAX_RXD; 282 ring->tx_max_pending = WX_MAX_TXD; 283 ring->rx_mini_max_pending = 0; 284 ring->rx_jumbo_max_pending = 0; 285 ring->rx_pending = wx->rx_ring_count; 286 ring->tx_pending = wx->tx_ring_count; 287 ring->rx_mini_pending = 0; 288 ring->rx_jumbo_pending = 0; 289 } 290 EXPORT_SYMBOL(wx_get_ringparam); 291 292 int wx_get_coalesce(struct net_device *netdev, 293 struct ethtool_coalesce *ec, 294 struct kernel_ethtool_coalesce *kernel_coal, 295 struct netlink_ext_ack *extack) 296 { 297 struct wx *wx = netdev_priv(netdev); 298 299 ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; 300 /* only valid if in constant ITR mode */ 301 if (wx->rx_itr_setting <= 1) 302 ec->rx_coalesce_usecs = wx->rx_itr_setting; 303 else 304 ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; 305 306 if (wx->adaptive_itr) { 307 ec->use_adaptive_rx_coalesce = 1; 308 ec->use_adaptive_tx_coalesce = 1; 309 } 310 311 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 312 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) 313 return 0; 314 315 /* only valid if in constant ITR mode */ 316 if (wx->tx_itr_setting <= 1) 317 ec->tx_coalesce_usecs = wx->tx_itr_setting; 318 else 319 ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; 320 321 return 0; 322 } 323 EXPORT_SYMBOL(wx_get_coalesce); 324 325 int wx_set_coalesce(struct net_device *netdev, 326 struct ethtool_coalesce *ec, 327 struct kernel_ethtool_coalesce *kernel_coal, 328 struct netlink_ext_ack *extack) 329 { 330 struct wx *wx = netdev_priv(netdev); 331 u16 tx_itr_param, rx_itr_param; 332 struct wx_q_vector *q_vector; 333 u16 max_eitr; 334 int i; 335 336 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { 337 /* reject Tx specific changes in case of mixed RxTx vectors */ 338 if (ec->tx_coalesce_usecs) 339 return -EOPNOTSUPP; 340 } 341 342 if (ec->tx_max_coalesced_frames_irq > U16_MAX || 343 !ec->tx_max_coalesced_frames_irq) 344 return -EINVAL; 345 346 wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; 347 348 switch (wx->mac.type) { 349 case wx_mac_sp: 350 max_eitr = WX_SP_MAX_EITR; 351 rx_itr_param = WX_20K_ITR; 352 tx_itr_param = WX_12K_ITR; 353 break; 354 case wx_mac_aml: 355 case wx_mac_aml40: 356 max_eitr = WX_AML_MAX_EITR; 357 rx_itr_param = WX_20K_ITR; 358 tx_itr_param = WX_12K_ITR; 359 break; 360 default: 361 max_eitr = WX_EM_MAX_EITR; 362 rx_itr_param = WX_7K_ITR; 363 tx_itr_param = WX_7K_ITR; 364 break; 365 } 366 367 if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || 368 (ec->tx_coalesce_usecs > (max_eitr >> 2))) 369 return -EINVAL; 370 371 if (ec->use_adaptive_rx_coalesce) { 372 wx->adaptive_itr = true; 373 wx->rx_itr_setting = 1; 374 wx->tx_itr_setting = 1; 375 return 0; 376 } 377 378 if (ec->rx_coalesce_usecs > 1) 379 wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; 380 else 381 wx->rx_itr_setting = ec->rx_coalesce_usecs; 382 383 if (ec->tx_coalesce_usecs > 1) 384 wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; 385 else 386 wx->tx_itr_setting = ec->tx_coalesce_usecs; 387 388 if (wx->adaptive_itr) { 389 wx->adaptive_itr = false; 390 wx->rx_itr_setting = rx_itr_param; 391 wx->tx_itr_setting = tx_itr_param; 392 } else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) { 393 wx->adaptive_itr = true; 394 } 395 396 if (wx->rx_itr_setting != 1) 397 rx_itr_param = wx->rx_itr_setting; 398 399 if (wx->tx_itr_setting != 1) 400 tx_itr_param = wx->tx_itr_setting; 401 402 /* mixed Rx/Tx */ 403 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) 404 wx->tx_itr_setting = wx->rx_itr_setting; 405 406 for (i = 0; i < wx->num_q_vectors; i++) { 407 q_vector = wx->q_vector[i]; 408 if (q_vector->tx.count && !q_vector->rx.count) 409 /* tx only */ 410 q_vector->itr = tx_itr_param; 411 else 412 /* rx only or mixed */ 413 q_vector->itr = rx_itr_param; 414 wx_write_eitr(q_vector); 415 } 416 417 return 0; 418 } 419 EXPORT_SYMBOL(wx_set_coalesce); 420 421 static unsigned int wx_max_channels(struct wx *wx) 422 { 423 unsigned int max_combined; 424 425 if (!wx->msix_q_entries) { 426 /* We only support one q_vector without MSI-X */ 427 max_combined = 1; 428 } else { 429 /* support up to max allowed queues with RSS */ 430 if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) 431 max_combined = 63; 432 else 433 max_combined = 8; 434 } 435 436 return max_combined; 437 } 438 439 void wx_get_channels(struct net_device *dev, 440 struct ethtool_channels *ch) 441 { 442 struct wx *wx = netdev_priv(dev); 443 444 /* report maximum channels */ 445 ch->max_combined = wx_max_channels(wx); 446 447 /* report info for other vector */ 448 if (wx->msix_q_entries) { 449 ch->max_other = 1; 450 ch->other_count = 1; 451 } 452 453 /* record RSS queues */ 454 ch->combined_count = wx->ring_feature[RING_F_RSS].indices; 455 456 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 457 ch->combined_count = wx->ring_feature[RING_F_FDIR].indices; 458 } 459 EXPORT_SYMBOL(wx_get_channels); 460 461 int wx_set_channels(struct net_device *dev, 462 struct ethtool_channels *ch) 463 { 464 unsigned int count = ch->combined_count; 465 struct wx *wx = netdev_priv(dev); 466 467 /* verify other_count has not changed */ 468 if (ch->other_count != 1) 469 return -EINVAL; 470 471 /* verify the number of channels does not exceed hardware limits */ 472 if (count > wx_max_channels(wx)) 473 return -EINVAL; 474 475 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 476 wx->ring_feature[RING_F_FDIR].limit = count; 477 478 wx->ring_feature[RING_F_RSS].limit = count; 479 480 return 0; 481 } 482 EXPORT_SYMBOL(wx_set_channels); 483 484 u32 wx_get_msglevel(struct net_device *netdev) 485 { 486 struct wx *wx = netdev_priv(netdev); 487 488 return wx->msg_enable; 489 } 490 EXPORT_SYMBOL(wx_get_msglevel); 491 492 void wx_set_msglevel(struct net_device *netdev, u32 data) 493 { 494 struct wx *wx = netdev_priv(netdev); 495 496 wx->msg_enable = data; 497 } 498 EXPORT_SYMBOL(wx_set_msglevel); 499 500 int wx_get_ts_info(struct net_device *dev, 501 struct kernel_ethtool_ts_info *info) 502 { 503 struct wx *wx = netdev_priv(dev); 504 505 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 506 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 507 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 508 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 509 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 510 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 511 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 512 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 513 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 514 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 515 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 516 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 517 518 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 519 SOF_TIMESTAMPING_TX_HARDWARE | 520 SOF_TIMESTAMPING_RX_HARDWARE | 521 SOF_TIMESTAMPING_RAW_HARDWARE; 522 523 if (wx->ptp_clock) 524 info->phc_index = ptp_clock_index(wx->ptp_clock); 525 else 526 info->phc_index = -1; 527 528 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 529 BIT(HWTSTAMP_TX_ON); 530 531 return 0; 532 } 533 EXPORT_SYMBOL(wx_get_ts_info); 534 535 void wx_get_ptp_stats(struct net_device *dev, 536 struct ethtool_ts_stats *ts_stats) 537 { 538 struct wx *wx = netdev_priv(dev); 539 540 if (wx->ptp_clock) { 541 ts_stats->pkts = wx->tx_hwtstamp_pkts; 542 ts_stats->lost = wx->tx_hwtstamp_timeouts + 543 wx->tx_hwtstamp_skipped + 544 wx->rx_hwtstamp_cleared; 545 ts_stats->err = wx->tx_hwtstamp_errors; 546 } 547 } 548 EXPORT_SYMBOL(wx_get_ptp_stats); 549