1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */ 3 4 #include <linux/pci.h> 5 #include <linux/phy.h> 6 #include <linux/ethtool.h> 7 8 #include "wx_type.h" 9 #include "wx_ethtool.h" 10 #include "wx_hw.h" 11 #include "wx_lib.h" 12 13 struct wx_stats { 14 char stat_string[ETH_GSTRING_LEN]; 15 size_t sizeof_stat; 16 off_t stat_offset; 17 }; 18 19 #define WX_STAT(str, m) { \ 20 .stat_string = str, \ 21 .sizeof_stat = sizeof(((struct wx *)0)->m), \ 22 .stat_offset = offsetof(struct wx, m) } 23 24 static const struct wx_stats wx_gstrings_stats[] = { 25 WX_STAT("rx_dma_pkts", stats.gprc), 26 WX_STAT("tx_dma_pkts", stats.gptc), 27 WX_STAT("rx_dma_bytes", stats.gorc), 28 WX_STAT("tx_dma_bytes", stats.gotc), 29 WX_STAT("rx_total_pkts", stats.tpr), 30 WX_STAT("tx_total_pkts", stats.tpt), 31 WX_STAT("rx_long_length_count", stats.roc), 32 WX_STAT("rx_short_length_count", stats.ruc), 33 WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), 34 WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), 35 WX_STAT("os2bmc_tx_by_host", stats.o2bspc), 36 WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), 37 WX_STAT("rx_no_dma_resources", stats.rdmdrop), 38 WX_STAT("tx_busy", tx_busy), 39 WX_STAT("non_eop_descs", non_eop_descs), 40 WX_STAT("tx_restart_queue", restart_queue), 41 WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), 42 WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), 43 WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), 44 WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), 45 WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), 46 WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 47 }; 48 49 static const struct wx_stats wx_gstrings_fdir_stats[] = { 50 WX_STAT("fdir_match", stats.fdirmatch), 51 WX_STAT("fdir_miss", stats.fdirmiss), 52 }; 53 54 /* drivers allocates num_tx_queues and num_rx_queues symmetrically so 55 * we set the num_rx_queues to evaluate to num_tx_queues. This is 56 * used because we do not have a good way to get the max number of 57 * rx queues with CONFIG_RPS disabled. 58 */ 59 #define WX_NUM_RX_QUEUES netdev->num_tx_queues 60 #define WX_NUM_TX_QUEUES netdev->num_tx_queues 61 62 #define WX_QUEUE_STATS_LEN ( \ 63 (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ 64 (sizeof(struct wx_queue_stats) / sizeof(u64))) 65 #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) 66 #define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) 67 #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) 68 69 int wx_get_sset_count(struct net_device *netdev, int sset) 70 { 71 struct wx *wx = netdev_priv(netdev); 72 73 switch (sset) { 74 case ETH_SS_STATS: 75 return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ? 76 WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; 77 default: 78 return -EOPNOTSUPP; 79 } 80 } 81 EXPORT_SYMBOL(wx_get_sset_count); 82 83 void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 84 { 85 struct wx *wx = netdev_priv(netdev); 86 u8 *p = data; 87 int i; 88 89 switch (stringset) { 90 case ETH_SS_STATS: 91 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) 92 ethtool_puts(&p, wx_gstrings_stats[i].stat_string); 93 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { 94 for (i = 0; i < WX_FDIR_STATS_LEN; i++) 95 ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); 96 } 97 for (i = 0; i < netdev->num_tx_queues; i++) { 98 ethtool_sprintf(&p, "tx_queue_%u_packets", i); 99 ethtool_sprintf(&p, "tx_queue_%u_bytes", i); 100 } 101 for (i = 0; i < WX_NUM_RX_QUEUES; i++) { 102 ethtool_sprintf(&p, "rx_queue_%u_packets", i); 103 ethtool_sprintf(&p, "rx_queue_%u_bytes", i); 104 } 105 break; 106 } 107 } 108 EXPORT_SYMBOL(wx_get_strings); 109 110 void wx_get_ethtool_stats(struct net_device *netdev, 111 struct ethtool_stats *stats, u64 *data) 112 { 113 struct wx *wx = netdev_priv(netdev); 114 struct wx_ring *ring; 115 unsigned int start; 116 int i, j, k; 117 char *p; 118 119 wx_update_stats(wx); 120 121 for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { 122 p = (char *)wx + wx_gstrings_stats[i].stat_offset; 123 data[i] = (wx_gstrings_stats[i].sizeof_stat == 124 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 125 } 126 127 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { 128 for (k = 0; k < WX_FDIR_STATS_LEN; k++) { 129 p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; 130 data[i++] = *(u64 *)p; 131 } 132 } 133 134 for (j = 0; j < netdev->num_tx_queues; j++) { 135 ring = wx->tx_ring[j]; 136 if (!ring) { 137 data[i++] = 0; 138 data[i++] = 0; 139 continue; 140 } 141 142 do { 143 start = u64_stats_fetch_begin(&ring->syncp); 144 data[i] = ring->stats.packets; 145 data[i + 1] = ring->stats.bytes; 146 } while (u64_stats_fetch_retry(&ring->syncp, start)); 147 i += 2; 148 } 149 for (j = 0; j < WX_NUM_RX_QUEUES; j++) { 150 ring = wx->rx_ring[j]; 151 if (!ring) { 152 data[i++] = 0; 153 data[i++] = 0; 154 continue; 155 } 156 157 do { 158 start = u64_stats_fetch_begin(&ring->syncp); 159 data[i] = ring->stats.packets; 160 data[i + 1] = ring->stats.bytes; 161 } while (u64_stats_fetch_retry(&ring->syncp, start)); 162 i += 2; 163 } 164 } 165 EXPORT_SYMBOL(wx_get_ethtool_stats); 166 167 void wx_get_mac_stats(struct net_device *netdev, 168 struct ethtool_eth_mac_stats *mac_stats) 169 { 170 struct wx *wx = netdev_priv(netdev); 171 struct wx_hw_stats *hwstats; 172 173 wx_update_stats(wx); 174 175 hwstats = &wx->stats; 176 mac_stats->MulticastFramesXmittedOK = hwstats->mptc; 177 mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; 178 mac_stats->MulticastFramesReceivedOK = hwstats->mprc; 179 mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; 180 } 181 EXPORT_SYMBOL(wx_get_mac_stats); 182 183 void wx_get_pause_stats(struct net_device *netdev, 184 struct ethtool_pause_stats *stats) 185 { 186 struct wx *wx = netdev_priv(netdev); 187 struct wx_hw_stats *hwstats; 188 189 wx_update_stats(wx); 190 191 hwstats = &wx->stats; 192 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; 193 stats->rx_pause_frames = hwstats->lxonoffrxc; 194 } 195 EXPORT_SYMBOL(wx_get_pause_stats); 196 197 void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) 198 { 199 unsigned int stats_len = WX_STATS_LEN; 200 struct wx *wx = netdev_priv(netdev); 201 202 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 203 stats_len += WX_FDIR_STATS_LEN; 204 205 strscpy(info->driver, wx->driver_name, sizeof(info->driver)); 206 strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); 207 strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); 208 if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { 209 info->n_stats = stats_len - 210 (WX_NUM_TX_QUEUES - wx->num_tx_queues) * 211 (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; 212 } else { 213 info->n_stats = stats_len; 214 } 215 } 216 EXPORT_SYMBOL(wx_get_drvinfo); 217 218 int wx_nway_reset(struct net_device *netdev) 219 { 220 struct wx *wx = netdev_priv(netdev); 221 222 if (wx->mac.type == wx_mac_aml40) 223 return -EOPNOTSUPP; 224 225 return phylink_ethtool_nway_reset(wx->phylink); 226 } 227 EXPORT_SYMBOL(wx_nway_reset); 228 229 int wx_get_link_ksettings(struct net_device *netdev, 230 struct ethtool_link_ksettings *cmd) 231 { 232 struct wx *wx = netdev_priv(netdev); 233 234 return phylink_ethtool_ksettings_get(wx->phylink, cmd); 235 } 236 EXPORT_SYMBOL(wx_get_link_ksettings); 237 238 int wx_set_link_ksettings(struct net_device *netdev, 239 const struct ethtool_link_ksettings *cmd) 240 { 241 struct wx *wx = netdev_priv(netdev); 242 243 if (wx->mac.type == wx_mac_aml40) 244 return -EOPNOTSUPP; 245 246 return phylink_ethtool_ksettings_set(wx->phylink, cmd); 247 } 248 EXPORT_SYMBOL(wx_set_link_ksettings); 249 250 void wx_get_pauseparam(struct net_device *netdev, 251 struct ethtool_pauseparam *pause) 252 { 253 struct wx *wx = netdev_priv(netdev); 254 255 if (wx->mac.type == wx_mac_aml40) 256 return; 257 258 phylink_ethtool_get_pauseparam(wx->phylink, pause); 259 } 260 EXPORT_SYMBOL(wx_get_pauseparam); 261 262 int wx_set_pauseparam(struct net_device *netdev, 263 struct ethtool_pauseparam *pause) 264 { 265 struct wx *wx = netdev_priv(netdev); 266 267 if (wx->mac.type == wx_mac_aml40) 268 return -EOPNOTSUPP; 269 270 return phylink_ethtool_set_pauseparam(wx->phylink, pause); 271 } 272 EXPORT_SYMBOL(wx_set_pauseparam); 273 274 void wx_get_ringparam(struct net_device *netdev, 275 struct ethtool_ringparam *ring, 276 struct kernel_ethtool_ringparam *kernel_ring, 277 struct netlink_ext_ack *extack) 278 { 279 struct wx *wx = netdev_priv(netdev); 280 281 ring->rx_max_pending = WX_MAX_RXD; 282 ring->tx_max_pending = WX_MAX_TXD; 283 ring->rx_mini_max_pending = 0; 284 ring->rx_jumbo_max_pending = 0; 285 ring->rx_pending = wx->rx_ring_count; 286 ring->tx_pending = wx->tx_ring_count; 287 ring->rx_mini_pending = 0; 288 ring->rx_jumbo_pending = 0; 289 } 290 EXPORT_SYMBOL(wx_get_ringparam); 291 292 int wx_get_coalesce(struct net_device *netdev, 293 struct ethtool_coalesce *ec, 294 struct kernel_ethtool_coalesce *kernel_coal, 295 struct netlink_ext_ack *extack) 296 { 297 struct wx *wx = netdev_priv(netdev); 298 299 ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; 300 /* only valid if in constant ITR mode */ 301 if (wx->rx_itr_setting <= 1) 302 ec->rx_coalesce_usecs = wx->rx_itr_setting; 303 else 304 ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; 305 306 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 307 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) 308 return 0; 309 310 /* only valid if in constant ITR mode */ 311 if (wx->tx_itr_setting <= 1) 312 ec->tx_coalesce_usecs = wx->tx_itr_setting; 313 else 314 ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; 315 316 return 0; 317 } 318 EXPORT_SYMBOL(wx_get_coalesce); 319 320 int wx_set_coalesce(struct net_device *netdev, 321 struct ethtool_coalesce *ec, 322 struct kernel_ethtool_coalesce *kernel_coal, 323 struct netlink_ext_ack *extack) 324 { 325 struct wx *wx = netdev_priv(netdev); 326 u16 tx_itr_param, rx_itr_param; 327 struct wx_q_vector *q_vector; 328 u16 max_eitr; 329 int i; 330 331 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { 332 /* reject Tx specific changes in case of mixed RxTx vectors */ 333 if (ec->tx_coalesce_usecs) 334 return -EOPNOTSUPP; 335 } 336 337 if (ec->tx_max_coalesced_frames_irq) 338 wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; 339 340 switch (wx->mac.type) { 341 case wx_mac_sp: 342 max_eitr = WX_SP_MAX_EITR; 343 break; 344 case wx_mac_aml: 345 case wx_mac_aml40: 346 max_eitr = WX_AML_MAX_EITR; 347 break; 348 default: 349 max_eitr = WX_EM_MAX_EITR; 350 break; 351 } 352 353 if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || 354 (ec->tx_coalesce_usecs > (max_eitr >> 2))) 355 return -EINVAL; 356 357 if (ec->rx_coalesce_usecs > 1) 358 wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; 359 else 360 wx->rx_itr_setting = ec->rx_coalesce_usecs; 361 362 if (wx->rx_itr_setting == 1) 363 rx_itr_param = WX_20K_ITR; 364 else 365 rx_itr_param = wx->rx_itr_setting; 366 367 if (ec->tx_coalesce_usecs > 1) 368 wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; 369 else 370 wx->tx_itr_setting = ec->tx_coalesce_usecs; 371 372 if (wx->tx_itr_setting == 1) { 373 switch (wx->mac.type) { 374 case wx_mac_sp: 375 case wx_mac_aml: 376 case wx_mac_aml40: 377 tx_itr_param = WX_12K_ITR; 378 break; 379 default: 380 tx_itr_param = WX_20K_ITR; 381 break; 382 } 383 } else { 384 tx_itr_param = wx->tx_itr_setting; 385 } 386 387 /* mixed Rx/Tx */ 388 if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) 389 wx->tx_itr_setting = wx->rx_itr_setting; 390 391 for (i = 0; i < wx->num_q_vectors; i++) { 392 q_vector = wx->q_vector[i]; 393 if (q_vector->tx.count && !q_vector->rx.count) 394 /* tx only */ 395 q_vector->itr = tx_itr_param; 396 else 397 /* rx only or mixed */ 398 q_vector->itr = rx_itr_param; 399 wx_write_eitr(q_vector); 400 } 401 402 return 0; 403 } 404 EXPORT_SYMBOL(wx_set_coalesce); 405 406 static unsigned int wx_max_channels(struct wx *wx) 407 { 408 unsigned int max_combined; 409 410 if (!wx->msix_q_entries) { 411 /* We only support one q_vector without MSI-X */ 412 max_combined = 1; 413 } else { 414 /* support up to max allowed queues with RSS */ 415 if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) 416 max_combined = 63; 417 else 418 max_combined = 8; 419 } 420 421 return max_combined; 422 } 423 424 void wx_get_channels(struct net_device *dev, 425 struct ethtool_channels *ch) 426 { 427 struct wx *wx = netdev_priv(dev); 428 429 /* report maximum channels */ 430 ch->max_combined = wx_max_channels(wx); 431 432 /* report info for other vector */ 433 if (wx->msix_q_entries) { 434 ch->max_other = 1; 435 ch->other_count = 1; 436 } 437 438 /* record RSS queues */ 439 ch->combined_count = wx->ring_feature[RING_F_RSS].indices; 440 441 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 442 ch->combined_count = wx->ring_feature[RING_F_FDIR].indices; 443 } 444 EXPORT_SYMBOL(wx_get_channels); 445 446 int wx_set_channels(struct net_device *dev, 447 struct ethtool_channels *ch) 448 { 449 unsigned int count = ch->combined_count; 450 struct wx *wx = netdev_priv(dev); 451 452 /* verify other_count has not changed */ 453 if (ch->other_count != 1) 454 return -EINVAL; 455 456 /* verify the number of channels does not exceed hardware limits */ 457 if (count > wx_max_channels(wx)) 458 return -EINVAL; 459 460 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) 461 wx->ring_feature[RING_F_FDIR].limit = count; 462 463 wx->ring_feature[RING_F_RSS].limit = count; 464 465 return 0; 466 } 467 EXPORT_SYMBOL(wx_set_channels); 468 469 u32 wx_get_msglevel(struct net_device *netdev) 470 { 471 struct wx *wx = netdev_priv(netdev); 472 473 return wx->msg_enable; 474 } 475 EXPORT_SYMBOL(wx_get_msglevel); 476 477 void wx_set_msglevel(struct net_device *netdev, u32 data) 478 { 479 struct wx *wx = netdev_priv(netdev); 480 481 wx->msg_enable = data; 482 } 483 EXPORT_SYMBOL(wx_set_msglevel); 484 485 int wx_get_ts_info(struct net_device *dev, 486 struct kernel_ethtool_ts_info *info) 487 { 488 struct wx *wx = netdev_priv(dev); 489 490 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 491 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 492 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 493 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 494 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 495 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 496 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 497 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 498 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 499 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 500 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 501 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); 502 503 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 504 SOF_TIMESTAMPING_TX_HARDWARE | 505 SOF_TIMESTAMPING_RX_HARDWARE | 506 SOF_TIMESTAMPING_RAW_HARDWARE; 507 508 if (wx->ptp_clock) 509 info->phc_index = ptp_clock_index(wx->ptp_clock); 510 else 511 info->phc_index = -1; 512 513 info->tx_types = BIT(HWTSTAMP_TX_OFF) | 514 BIT(HWTSTAMP_TX_ON); 515 516 return 0; 517 } 518 EXPORT_SYMBOL(wx_get_ts_info); 519 520 void wx_get_ptp_stats(struct net_device *dev, 521 struct ethtool_ts_stats *ts_stats) 522 { 523 struct wx *wx = netdev_priv(dev); 524 525 if (wx->ptp_clock) { 526 ts_stats->pkts = wx->tx_hwtstamp_pkts; 527 ts_stats->lost = wx->tx_hwtstamp_timeouts + 528 wx->tx_hwtstamp_skipped + 529 wx->rx_hwtstamp_cleared; 530 ts_stats->err = wx->tx_hwtstamp_errors; 531 } 532 } 533 EXPORT_SYMBOL(wx_get_ptp_stats); 534