1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments Ethernet Switch Driver ethtool intf 4 * 5 * Copyright (C) 2019 Texas Instruments 6 */ 7 8 #include <linux/if_ether.h> 9 #include <linux/if_vlan.h> 10 #include <linux/kmemleak.h> 11 #include <linux/module.h> 12 #include <linux/netdevice.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/phy.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/skbuff.h> 17 18 #include "cpsw.h" 19 #include "cpts.h" 20 #include "cpsw_ale.h" 21 #include "cpsw_priv.h" 22 #include "davinci_cpdma.h" 23 24 struct cpsw_hw_stats { 25 u32 rxgoodframes; 26 u32 rxbroadcastframes; 27 u32 rxmulticastframes; 28 u32 rxpauseframes; 29 u32 rxcrcerrors; 30 u32 rxaligncodeerrors; 31 u32 rxoversizedframes; 32 u32 rxjabberframes; 33 u32 rxundersizedframes; 34 u32 rxfragments; 35 u32 __pad_0[2]; 36 u32 rxoctets; 37 u32 txgoodframes; 38 u32 txbroadcastframes; 39 u32 txmulticastframes; 40 u32 txpauseframes; 41 u32 txdeferredframes; 42 u32 txcollisionframes; 43 u32 txsinglecollframes; 44 u32 txmultcollframes; 45 u32 txexcessivecollisions; 46 u32 txlatecollisions; 47 u32 txunderrun; 48 u32 txcarriersenseerrors; 49 u32 txoctets; 50 u32 octetframes64; 51 u32 octetframes65t127; 52 u32 octetframes128t255; 53 u32 octetframes256t511; 54 u32 octetframes512t1023; 55 u32 octetframes1024tup; 56 u32 netoctets; 57 u32 rxsofoverruns; 58 u32 rxmofoverruns; 59 u32 rxdmaoverruns; 60 }; 61 62 struct cpsw_stats { 63 char stat_string[ETH_GSTRING_LEN]; 64 int type; 65 int sizeof_stat; 66 int stat_offset; 67 }; 68 69 enum { 70 CPSW_STATS, 71 CPDMA_RX_STATS, 72 CPDMA_TX_STATS, 73 }; 74 75 #define CPSW_STAT(m) CPSW_STATS, \ 76 sizeof_field(struct cpsw_hw_stats, m), \ 77 offsetof(struct cpsw_hw_stats, m) 78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ 79 sizeof_field(struct cpdma_chan_stats, m), \ 80 offsetof(struct cpdma_chan_stats, m) 81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ 82 sizeof_field(struct cpdma_chan_stats, m), \ 83 offsetof(struct cpdma_chan_stats, m) 84 85 static const struct cpsw_stats cpsw_gstrings_stats[] = { 86 { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, 87 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, 88 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, 89 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, 90 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, 91 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, 92 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, 93 { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, 94 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, 95 { "Rx Fragments", CPSW_STAT(rxfragments) }, 96 { "Rx Octets", CPSW_STAT(rxoctets) }, 97 { "Good Tx Frames", CPSW_STAT(txgoodframes) }, 98 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, 99 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, 100 { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, 101 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, 102 { "Collisions", CPSW_STAT(txcollisionframes) }, 103 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, 104 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, 105 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, 106 { "Late Collisions", CPSW_STAT(txlatecollisions) }, 107 { "Tx Underrun", CPSW_STAT(txunderrun) }, 108 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, 109 { "Tx Octets", CPSW_STAT(txoctets) }, 110 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, 111 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, 112 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, 113 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, 114 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, 115 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, 116 { "Net Octets", CPSW_STAT(netoctets) }, 117 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, 118 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, 119 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, 120 }; 121 122 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { 123 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) }, 124 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, 125 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, 126 { "misqueued", CPDMA_RX_STAT(misqueued) }, 127 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, 128 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, 129 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, 130 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, 131 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, 132 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, 133 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) }, 134 { "requeue", CPDMA_RX_STAT(requeue) }, 135 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, 136 }; 137 138 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats) 139 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats) 140 141 u32 cpsw_get_msglevel(struct net_device *ndev) 142 { 143 struct cpsw_priv *priv = netdev_priv(ndev); 144 145 return priv->msg_enable; 146 } 147 EXPORT_SYMBOL_GPL(cpsw_get_msglevel); 148 149 void cpsw_set_msglevel(struct net_device *ndev, u32 value) 150 { 151 struct cpsw_priv *priv = netdev_priv(ndev); 152 153 priv->msg_enable = value; 154 } 155 EXPORT_SYMBOL_GPL(cpsw_set_msglevel); 156 157 int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, 158 struct kernel_ethtool_coalesce *kernel_coal, 159 struct netlink_ext_ack *extack) 160 { 161 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 162 163 coal->rx_coalesce_usecs = cpsw->coal_intvl; 164 return 0; 165 } 166 EXPORT_SYMBOL_GPL(cpsw_get_coalesce); 167 168 int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, 169 struct kernel_ethtool_coalesce *kernel_coal, 170 struct netlink_ext_ack *extack) 171 { 172 struct cpsw_priv *priv = netdev_priv(ndev); 173 u32 int_ctrl; 174 u32 num_interrupts = 0; 175 u32 prescale = 0; 176 u32 addnl_dvdr = 1; 177 u32 coal_intvl = 0; 178 struct cpsw_common *cpsw = priv->cpsw; 179 180 coal_intvl = coal->rx_coalesce_usecs; 181 182 int_ctrl = readl(&cpsw->wr_regs->int_control); 183 prescale = cpsw->bus_freq_mhz * 4; 184 185 if (!coal->rx_coalesce_usecs) { 186 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); 187 goto update_return; 188 } 189 190 if (coal_intvl < CPSW_CMINTMIN_INTVL) 191 coal_intvl = CPSW_CMINTMIN_INTVL; 192 193 if (coal_intvl > CPSW_CMINTMAX_INTVL) { 194 /* Interrupt pacer works with 4us Pulse, we can 195 * throttle further by dilating the 4us pulse. 196 */ 197 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; 198 199 if (addnl_dvdr > 1) { 200 prescale *= addnl_dvdr; 201 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) 202 coal_intvl = (CPSW_CMINTMAX_INTVL 203 * addnl_dvdr); 204 } else { 205 addnl_dvdr = 1; 206 coal_intvl = CPSW_CMINTMAX_INTVL; 207 } 208 } 209 210 num_interrupts = (1000 * addnl_dvdr) / coal_intvl; 211 writel(num_interrupts, &cpsw->wr_regs->rx_imax); 212 writel(num_interrupts, &cpsw->wr_regs->tx_imax); 213 214 int_ctrl |= CPSW_INTPACEEN; 215 int_ctrl &= (~CPSW_INTPRESCALE_MASK); 216 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); 217 218 update_return: 219 writel(int_ctrl, &cpsw->wr_regs->int_control); 220 221 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); 222 cpsw->coal_intvl = coal_intvl; 223 224 return 0; 225 } 226 EXPORT_SYMBOL_GPL(cpsw_set_coalesce); 227 228 int cpsw_get_sset_count(struct net_device *ndev, int sset) 229 { 230 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 231 232 switch (sset) { 233 case ETH_SS_STATS: 234 return (CPSW_STATS_COMMON_LEN + 235 (cpsw->rx_ch_num + cpsw->tx_ch_num) * 236 CPSW_STATS_CH_LEN); 237 default: 238 return -EOPNOTSUPP; 239 } 240 } 241 EXPORT_SYMBOL_GPL(cpsw_get_sset_count); 242 243 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) 244 { 245 int ch_stats_len; 246 int line; 247 int i; 248 249 ch_stats_len = CPSW_STATS_CH_LEN * ch_num; 250 for (i = 0; i < ch_stats_len; i++) { 251 line = i % CPSW_STATS_CH_LEN; 252 snprintf(*p, ETH_GSTRING_LEN, 253 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx", 254 (long)(i / CPSW_STATS_CH_LEN), 255 cpsw_gstrings_ch_stats[line].stat_string); 256 *p += ETH_GSTRING_LEN; 257 } 258 } 259 260 void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 261 { 262 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 263 u8 *p = data; 264 int i; 265 266 switch (stringset) { 267 case ETH_SS_STATS: 268 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) { 269 memcpy(p, cpsw_gstrings_stats[i].stat_string, 270 ETH_GSTRING_LEN); 271 p += ETH_GSTRING_LEN; 272 } 273 274 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1); 275 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0); 276 break; 277 } 278 } 279 EXPORT_SYMBOL_GPL(cpsw_get_strings); 280 281 void cpsw_get_ethtool_stats(struct net_device *ndev, 282 struct ethtool_stats *stats, u64 *data) 283 { 284 u8 *p; 285 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 286 struct cpdma_chan_stats ch_stats; 287 int i, l, ch; 288 289 /* Collect Davinci CPDMA stats for Rx and Tx Channel */ 290 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) 291 data[l] = readl(cpsw->hw_stats + 292 cpsw_gstrings_stats[l].stat_offset); 293 294 for (ch = 0; ch < cpsw->rx_ch_num; ch++) { 295 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats); 296 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { 297 p = (u8 *)&ch_stats + 298 cpsw_gstrings_ch_stats[i].stat_offset; 299 data[l] = *(u32 *)p; 300 } 301 } 302 303 for (ch = 0; ch < cpsw->tx_ch_num; ch++) { 304 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats); 305 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { 306 p = (u8 *)&ch_stats + 307 cpsw_gstrings_ch_stats[i].stat_offset; 308 data[l] = *(u32 *)p; 309 } 310 } 311 } 312 EXPORT_SYMBOL_GPL(cpsw_get_ethtool_stats); 313 314 void cpsw_get_pauseparam(struct net_device *ndev, 315 struct ethtool_pauseparam *pause) 316 { 317 struct cpsw_priv *priv = netdev_priv(ndev); 318 319 pause->autoneg = AUTONEG_DISABLE; 320 pause->rx_pause = priv->rx_pause ? true : false; 321 pause->tx_pause = priv->tx_pause ? true : false; 322 } 323 EXPORT_SYMBOL_GPL(cpsw_get_pauseparam); 324 325 void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 326 { 327 struct cpsw_priv *priv = netdev_priv(ndev); 328 struct cpsw_common *cpsw = priv->cpsw; 329 int slave_no = cpsw_slave_index(cpsw, priv); 330 331 wol->supported = 0; 332 wol->wolopts = 0; 333 334 if (cpsw->slaves[slave_no].phy) 335 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol); 336 } 337 EXPORT_SYMBOL_GPL(cpsw_get_wol); 338 339 int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 340 { 341 struct cpsw_priv *priv = netdev_priv(ndev); 342 struct cpsw_common *cpsw = priv->cpsw; 343 int slave_no = cpsw_slave_index(cpsw, priv); 344 345 if (cpsw->slaves[slave_no].phy) 346 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol); 347 else 348 return -EOPNOTSUPP; 349 } 350 EXPORT_SYMBOL_GPL(cpsw_set_wol); 351 352 int cpsw_get_regs_len(struct net_device *ndev) 353 { 354 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 355 356 return cpsw_ale_get_num_entries(cpsw->ale) * 357 ALE_ENTRY_WORDS * sizeof(u32); 358 } 359 EXPORT_SYMBOL_GPL(cpsw_get_regs_len); 360 361 void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) 362 { 363 u32 *reg = p; 364 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 365 366 /* update CPSW IP version */ 367 regs->version = cpsw->version; 368 369 cpsw_ale_dump(cpsw->ale, reg); 370 } 371 EXPORT_SYMBOL_GPL(cpsw_get_regs); 372 373 int cpsw_ethtool_op_begin(struct net_device *ndev) 374 { 375 struct cpsw_priv *priv = netdev_priv(ndev); 376 struct cpsw_common *cpsw = priv->cpsw; 377 int ret; 378 379 ret = pm_runtime_resume_and_get(cpsw->dev); 380 if (ret < 0) 381 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); 382 383 return ret; 384 } 385 EXPORT_SYMBOL_GPL(cpsw_ethtool_op_begin); 386 387 void cpsw_ethtool_op_complete(struct net_device *ndev) 388 { 389 struct cpsw_priv *priv = netdev_priv(ndev); 390 391 pm_runtime_put(priv->cpsw->dev); 392 } 393 EXPORT_SYMBOL_GPL(cpsw_ethtool_op_complete); 394 395 void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch) 396 { 397 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 398 399 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; 400 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; 401 ch->max_combined = 0; 402 ch->max_other = 0; 403 ch->other_count = 0; 404 ch->rx_count = cpsw->rx_ch_num; 405 ch->tx_count = cpsw->tx_ch_num; 406 ch->combined_count = 0; 407 } 408 EXPORT_SYMBOL_GPL(cpsw_get_channels); 409 410 int cpsw_get_link_ksettings(struct net_device *ndev, 411 struct ethtool_link_ksettings *ecmd) 412 { 413 struct cpsw_priv *priv = netdev_priv(ndev); 414 struct cpsw_common *cpsw = priv->cpsw; 415 int slave_no = cpsw_slave_index(cpsw, priv); 416 417 if (!cpsw->slaves[slave_no].phy) 418 return -EOPNOTSUPP; 419 420 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd); 421 return 0; 422 } 423 EXPORT_SYMBOL_GPL(cpsw_get_link_ksettings); 424 425 int cpsw_set_link_ksettings(struct net_device *ndev, 426 const struct ethtool_link_ksettings *ecmd) 427 { 428 struct cpsw_priv *priv = netdev_priv(ndev); 429 struct cpsw_common *cpsw = priv->cpsw; 430 int slave_no = cpsw_slave_index(cpsw, priv); 431 432 if (!cpsw->slaves[slave_no].phy) 433 return -EOPNOTSUPP; 434 435 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd); 436 } 437 EXPORT_SYMBOL_GPL(cpsw_set_link_ksettings); 438 439 int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata) 440 { 441 struct cpsw_priv *priv = netdev_priv(ndev); 442 struct cpsw_common *cpsw = priv->cpsw; 443 int slave_no = cpsw_slave_index(cpsw, priv); 444 445 if (cpsw->slaves[slave_no].phy) 446 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata); 447 else 448 return -EOPNOTSUPP; 449 } 450 EXPORT_SYMBOL_GPL(cpsw_get_eee); 451 452 int cpsw_nway_reset(struct net_device *ndev) 453 { 454 struct cpsw_priv *priv = netdev_priv(ndev); 455 struct cpsw_common *cpsw = priv->cpsw; 456 int slave_no = cpsw_slave_index(cpsw, priv); 457 458 if (cpsw->slaves[slave_no].phy) 459 return genphy_restart_aneg(cpsw->slaves[slave_no].phy); 460 else 461 return -EOPNOTSUPP; 462 } 463 EXPORT_SYMBOL_GPL(cpsw_nway_reset); 464 465 static void cpsw_suspend_data_pass(struct net_device *ndev) 466 { 467 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 468 int i; 469 470 /* Disable NAPI scheduling */ 471 cpsw_intr_disable(cpsw); 472 473 /* Stop all transmit queues for every network device. 474 */ 475 for (i = 0; i < cpsw->data.slaves; i++) { 476 ndev = cpsw->slaves[i].ndev; 477 if (!(ndev && netif_running(ndev))) 478 continue; 479 480 netif_tx_stop_all_queues(ndev); 481 482 /* Barrier, so that stop_queue visible to other cpus */ 483 smp_mb__after_atomic(); 484 } 485 486 /* Handle rest of tx packets and stop cpdma channels */ 487 cpdma_ctlr_stop(cpsw->dma); 488 } 489 490 static int cpsw_resume_data_pass(struct net_device *ndev) 491 { 492 struct cpsw_priv *priv = netdev_priv(ndev); 493 struct cpsw_common *cpsw = priv->cpsw; 494 int i, ret; 495 496 /* After this receive is started */ 497 if (cpsw->usage_count) { 498 ret = cpsw_fill_rx_channels(priv); 499 if (ret) 500 return ret; 501 502 cpdma_ctlr_start(cpsw->dma); 503 cpsw_intr_enable(cpsw); 504 } 505 506 /* Resume transmit for every affected interface */ 507 for (i = 0; i < cpsw->data.slaves; i++) { 508 ndev = cpsw->slaves[i].ndev; 509 if (ndev && netif_running(ndev)) 510 netif_tx_start_all_queues(ndev); 511 } 512 513 return 0; 514 } 515 516 static int cpsw_check_ch_settings(struct cpsw_common *cpsw, 517 struct ethtool_channels *ch) 518 { 519 if (cpsw->quirk_irq) { 520 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed"); 521 return -EOPNOTSUPP; 522 } 523 524 if (ch->combined_count) 525 return -EINVAL; 526 527 /* verify we have at least one channel in each direction */ 528 if (!ch->rx_count || !ch->tx_count) 529 return -EINVAL; 530 531 if (ch->rx_count > cpsw->data.channels || 532 ch->tx_count > cpsw->data.channels) 533 return -EINVAL; 534 535 return 0; 536 } 537 538 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx, 539 cpdma_handler_fn rx_handler) 540 { 541 struct cpsw_common *cpsw = priv->cpsw; 542 void (*handler)(void *, int, int); 543 struct netdev_queue *queue; 544 struct cpsw_vector *vec; 545 int ret, *ch, vch; 546 547 if (rx) { 548 ch = &cpsw->rx_ch_num; 549 vec = cpsw->rxv; 550 handler = rx_handler; 551 } else { 552 ch = &cpsw->tx_ch_num; 553 vec = cpsw->txv; 554 handler = cpsw_tx_handler; 555 } 556 557 while (*ch < ch_num) { 558 vch = rx ? *ch : 7 - *ch; 559 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); 560 queue = netdev_get_tx_queue(priv->ndev, *ch); 561 queue->tx_maxrate = 0; 562 563 if (IS_ERR(vec[*ch].ch)) 564 return PTR_ERR(vec[*ch].ch); 565 566 if (!vec[*ch].ch) 567 return -EINVAL; 568 569 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch, 570 (rx ? "rx" : "tx")); 571 (*ch)++; 572 } 573 574 while (*ch > ch_num) { 575 (*ch)--; 576 577 ret = cpdma_chan_destroy(vec[*ch].ch); 578 if (ret) 579 return ret; 580 581 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch, 582 (rx ? "rx" : "tx")); 583 } 584 585 return 0; 586 } 587 588 static void cpsw_fail(struct cpsw_common *cpsw) 589 { 590 struct net_device *ndev; 591 int i; 592 593 for (i = 0; i < cpsw->data.slaves; i++) { 594 ndev = cpsw->slaves[i].ndev; 595 if (ndev) 596 dev_close(ndev); 597 } 598 } 599 600 int cpsw_set_channels_common(struct net_device *ndev, 601 struct ethtool_channels *chs, 602 cpdma_handler_fn rx_handler) 603 { 604 struct cpsw_priv *priv = netdev_priv(ndev); 605 struct cpsw_common *cpsw = priv->cpsw; 606 struct net_device *sl_ndev; 607 int i, new_pools, ret; 608 609 ret = cpsw_check_ch_settings(cpsw, chs); 610 if (ret < 0) 611 return ret; 612 613 cpsw_suspend_data_pass(ndev); 614 615 new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count; 616 617 ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler); 618 if (ret) 619 goto err; 620 621 ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler); 622 if (ret) 623 goto err; 624 625 for (i = 0; i < cpsw->data.slaves; i++) { 626 sl_ndev = cpsw->slaves[i].ndev; 627 if (!(sl_ndev && netif_running(sl_ndev))) 628 continue; 629 630 /* Inform stack about new count of queues */ 631 ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num); 632 if (ret) { 633 dev_err(priv->dev, "cannot set real number of tx queues\n"); 634 goto err; 635 } 636 637 ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num); 638 if (ret) { 639 dev_err(priv->dev, "cannot set real number of rx queues\n"); 640 goto err; 641 } 642 } 643 644 cpsw_split_res(cpsw); 645 646 if (new_pools) { 647 cpsw_destroy_xdp_rxqs(cpsw); 648 ret = cpsw_create_xdp_rxqs(cpsw); 649 if (ret) 650 goto err; 651 } 652 653 ret = cpsw_resume_data_pass(ndev); 654 if (!ret) 655 return 0; 656 err: 657 dev_err(priv->dev, "cannot update channels number, closing device\n"); 658 cpsw_fail(cpsw); 659 return ret; 660 } 661 EXPORT_SYMBOL_GPL(cpsw_set_channels_common); 662 663 void cpsw_get_ringparam(struct net_device *ndev, 664 struct ethtool_ringparam *ering, 665 struct kernel_ethtool_ringparam *kernel_ering, 666 struct netlink_ext_ack *extack) 667 { 668 struct cpsw_priv *priv = netdev_priv(ndev); 669 struct cpsw_common *cpsw = priv->cpsw; 670 671 /* not supported */ 672 ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES; 673 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); 674 ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES; 675 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); 676 } 677 EXPORT_SYMBOL_GPL(cpsw_get_ringparam); 678 679 int cpsw_set_ringparam(struct net_device *ndev, 680 struct ethtool_ringparam *ering, 681 struct kernel_ethtool_ringparam *kernel_ering, 682 struct netlink_ext_ack *extack) 683 { 684 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 685 int descs_num, ret; 686 687 /* ignore ering->tx_pending - only rx_pending adjustment is supported */ 688 689 if (ering->rx_mini_pending || ering->rx_jumbo_pending || 690 ering->rx_pending < CPSW_MAX_QUEUES || 691 ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES)) 692 return -EINVAL; 693 694 descs_num = cpdma_get_num_rx_descs(cpsw->dma); 695 if (ering->rx_pending == descs_num) 696 return 0; 697 698 cpsw_suspend_data_pass(ndev); 699 700 ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); 701 if (ret) { 702 if (cpsw_resume_data_pass(ndev)) 703 goto err; 704 705 return ret; 706 } 707 708 if (cpsw->usage_count) { 709 cpsw_destroy_xdp_rxqs(cpsw); 710 ret = cpsw_create_xdp_rxqs(cpsw); 711 if (ret) 712 goto err; 713 } 714 715 ret = cpsw_resume_data_pass(ndev); 716 if (!ret) 717 return 0; 718 err: 719 cpdma_set_num_rx_descs(cpsw->dma, descs_num); 720 dev_err(cpsw->dev, "cannot set ring params, closing device\n"); 721 cpsw_fail(cpsw); 722 return ret; 723 } 724 EXPORT_SYMBOL_GPL(cpsw_set_ringparam); 725 726 #if IS_ENABLED(CONFIG_TI_CPTS) 727 int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) 728 { 729 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 730 731 info->so_timestamping = 732 SOF_TIMESTAMPING_TX_HARDWARE | 733 SOF_TIMESTAMPING_TX_SOFTWARE | 734 SOF_TIMESTAMPING_RX_HARDWARE | 735 SOF_TIMESTAMPING_RAW_HARDWARE; 736 info->phc_index = cpsw->cpts->phc_index; 737 info->tx_types = 738 (1 << HWTSTAMP_TX_OFF) | 739 (1 << HWTSTAMP_TX_ON); 740 info->rx_filters = 741 (1 << HWTSTAMP_FILTER_NONE) | 742 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 743 return 0; 744 } 745 EXPORT_SYMBOL_GPL(cpsw_get_ts_info); 746 #else 747 int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) 748 { 749 info->so_timestamping = 750 SOF_TIMESTAMPING_TX_SOFTWARE; 751 info->tx_types = 0; 752 info->rx_filters = 0; 753 return 0; 754 } 755 EXPORT_SYMBOL_GPL(cpsw_get_ts_info); 756 #endif 757