1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments Ethernet Switch Driver 4 * 5 * Copyright (C) 2012 Texas Instruments 6 * 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/clk.h> 12 #include <linux/timer.h> 13 #include <linux/module.h> 14 #include <linux/platform_device.h> 15 #include <linux/irqreturn.h> 16 #include <linux/interrupt.h> 17 #include <linux/if_ether.h> 18 #include <linux/etherdevice.h> 19 #include <linux/netdevice.h> 20 #include <linux/net_tstamp.h> 21 #include <linux/phy.h> 22 #include <linux/phy/phy.h> 23 #include <linux/workqueue.h> 24 #include <linux/delay.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/gpio/consumer.h> 27 #include <linux/of.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/of_device.h> 31 #include <linux/if_vlan.h> 32 #include <linux/kmemleak.h> 33 #include <linux/sys_soc.h> 34 35 #include <linux/pinctrl/consumer.h> 36 #include <net/pkt_cls.h> 37 38 #include "cpsw.h" 39 #include "cpsw_ale.h" 40 #include "cpsw_priv.h" 41 #include "cpsw_sl.h" 42 #include "cpts.h" 43 #include "davinci_cpdma.h" 44 45 #include <net/pkt_sched.h> 46 47 static int debug_level; 48 module_param(debug_level, int, 0); 49 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 50 51 static int ale_ageout = 10; 52 module_param(ale_ageout, int, 0); 53 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); 54 55 static int rx_packet_max = CPSW_MAX_PACKET_SIZE; 56 module_param(rx_packet_max, int, 0); 57 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 58 59 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT; 60 module_param(descs_pool_size, int, 0444); 61 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool"); 62 63 #define for_each_slave(priv, func, arg...) \ 64 do { \ 65 struct cpsw_slave *slave; \ 66 struct cpsw_common *cpsw = (priv)->cpsw; \ 67 int n; \ 68 if (cpsw->data.dual_emac) \ 69 (func)((cpsw)->slaves + priv->emac_port, ##arg);\ 70 else \ 71 for (n = cpsw->data.slaves, \ 72 slave = cpsw->slaves; \ 73 n; n--) \ 74 (func)(slave++, ##arg); \ 75 } while (0) 76 77 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 78 __be16 proto, u16 vid); 79 80 static void cpsw_set_promiscious(struct net_device *ndev, bool enable) 81 { 82 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 83 struct cpsw_ale *ale = cpsw->ale; 84 int i; 85 86 if (cpsw->data.dual_emac) { 87 bool flag = false; 88 89 /* Enabling promiscuous mode for one interface will be 90 * common for both the interface as the interface shares 91 * the same hardware resource. 92 */ 93 for (i = 0; i < cpsw->data.slaves; i++) 94 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC) 95 flag = true; 96 97 if (!enable && flag) { 98 enable = true; 99 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); 100 } 101 102 if (enable) { 103 /* Enable Bypass */ 104 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1); 105 106 dev_dbg(&ndev->dev, "promiscuity enabled\n"); 107 } else { 108 /* Disable Bypass */ 109 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0); 110 dev_dbg(&ndev->dev, "promiscuity disabled\n"); 111 } 112 } else { 113 if (enable) { 114 unsigned long timeout = jiffies + HZ; 115 116 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ 117 for (i = 0; i <= cpsw->data.slaves; i++) { 118 cpsw_ale_control_set(ale, i, 119 ALE_PORT_NOLEARN, 1); 120 cpsw_ale_control_set(ale, i, 121 ALE_PORT_NO_SA_UPDATE, 1); 122 } 123 124 /* Clear All Untouched entries */ 125 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); 126 do { 127 cpu_relax(); 128 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) 129 break; 130 } while (time_after(timeout, jiffies)); 131 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); 132 133 /* Clear all mcast from ALE */ 134 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); 135 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL); 136 137 /* Flood All Unicast Packets to Host port */ 138 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); 139 dev_dbg(&ndev->dev, "promiscuity enabled\n"); 140 } else { 141 /* Don't Flood All Unicast Packets to Host port */ 142 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 143 144 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ 145 for (i = 0; i <= cpsw->data.slaves; i++) { 146 cpsw_ale_control_set(ale, i, 147 ALE_PORT_NOLEARN, 0); 148 cpsw_ale_control_set(ale, i, 149 ALE_PORT_NO_SA_UPDATE, 0); 150 } 151 dev_dbg(&ndev->dev, "promiscuity disabled\n"); 152 } 153 } 154 } 155 156 /** 157 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes 158 * if it's not deleted 159 * @ndev: device to sync 160 * @addr: address to be added or deleted 161 * @vid: vlan id, if vid < 0 set/unset address for real device 162 * @add: add address if the flag is set or remove otherwise 163 */ 164 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, 165 int vid, int add) 166 { 167 struct cpsw_priv *priv = netdev_priv(ndev); 168 struct cpsw_common *cpsw = priv->cpsw; 169 int mask, flags, ret; 170 171 if (vid < 0) { 172 if (cpsw->data.dual_emac) 173 vid = cpsw->slaves[priv->emac_port].port_vlan; 174 else 175 vid = 0; 176 } 177 178 mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS; 179 flags = vid ? ALE_VLAN : 0; 180 181 if (add) 182 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); 183 else 184 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); 185 186 return ret; 187 } 188 189 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) 190 { 191 struct addr_sync_ctx *sync_ctx = ctx; 192 struct netdev_hw_addr *ha; 193 int found = 0, ret = 0; 194 195 if (!vdev || !(vdev->flags & IFF_UP)) 196 return 0; 197 198 /* vlan address is relevant if its sync_cnt != 0 */ 199 netdev_for_each_mc_addr(ha, vdev) { 200 if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 201 found = ha->sync_cnt; 202 break; 203 } 204 } 205 206 if (found) 207 sync_ctx->consumed++; 208 209 if (sync_ctx->flush) { 210 if (!found) 211 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 212 return 0; 213 } 214 215 if (found) 216 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); 217 218 return ret; 219 } 220 221 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) 222 { 223 struct addr_sync_ctx sync_ctx; 224 int ret; 225 226 sync_ctx.consumed = 0; 227 sync_ctx.addr = addr; 228 sync_ctx.ndev = ndev; 229 sync_ctx.flush = 0; 230 231 ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 232 if (sync_ctx.consumed < num && !ret) 233 ret = cpsw_set_mc(ndev, addr, -1, 1); 234 235 return ret; 236 } 237 238 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) 239 { 240 struct addr_sync_ctx sync_ctx; 241 242 sync_ctx.consumed = 0; 243 sync_ctx.addr = addr; 244 sync_ctx.ndev = ndev; 245 sync_ctx.flush = 1; 246 247 vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); 248 if (sync_ctx.consumed == num) 249 cpsw_set_mc(ndev, addr, -1, 0); 250 251 return 0; 252 } 253 254 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) 255 { 256 struct addr_sync_ctx *sync_ctx = ctx; 257 struct netdev_hw_addr *ha; 258 int found = 0; 259 260 if (!vdev || !(vdev->flags & IFF_UP)) 261 return 0; 262 263 /* vlan address is relevant if its sync_cnt != 0 */ 264 netdev_for_each_mc_addr(ha, vdev) { 265 if (ether_addr_equal(ha->addr, sync_ctx->addr)) { 266 found = ha->sync_cnt; 267 break; 268 } 269 } 270 271 if (!found) 272 return 0; 273 274 sync_ctx->consumed++; 275 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); 276 return 0; 277 } 278 279 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) 280 { 281 struct addr_sync_ctx sync_ctx; 282 283 sync_ctx.addr = addr; 284 sync_ctx.ndev = ndev; 285 sync_ctx.consumed = 0; 286 287 vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); 288 if (sync_ctx.consumed < num) 289 cpsw_set_mc(ndev, addr, -1, 0); 290 291 return 0; 292 } 293 294 static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 295 { 296 struct cpsw_priv *priv = netdev_priv(ndev); 297 struct cpsw_common *cpsw = priv->cpsw; 298 int slave_port = -1; 299 300 if (cpsw->data.dual_emac) 301 slave_port = priv->emac_port + 1; 302 303 if (ndev->flags & IFF_PROMISC) { 304 /* Enable promiscuous mode */ 305 cpsw_set_promiscious(ndev, true); 306 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); 307 return; 308 } else { 309 /* Disable promiscuous mode */ 310 cpsw_set_promiscious(ndev, false); 311 } 312 313 /* Restore allmulti on vlans if necessary */ 314 cpsw_ale_set_allmulti(cpsw->ale, 315 ndev->flags & IFF_ALLMULTI, slave_port); 316 317 /* add/remove mcast address either for real netdev or for vlan */ 318 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 319 cpsw_del_mc_addr); 320 } 321 322 void cpsw_intr_enable(struct cpsw_common *cpsw) 323 { 324 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en); 325 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en); 326 327 cpdma_ctlr_int_ctrl(cpsw->dma, true); 328 return; 329 } 330 331 void cpsw_intr_disable(struct cpsw_common *cpsw) 332 { 333 writel_relaxed(0, &cpsw->wr_regs->tx_en); 334 writel_relaxed(0, &cpsw->wr_regs->rx_en); 335 336 cpdma_ctlr_int_ctrl(cpsw->dma, false); 337 return; 338 } 339 340 void cpsw_tx_handler(void *token, int len, int status) 341 { 342 struct netdev_queue *txq; 343 struct sk_buff *skb = token; 344 struct net_device *ndev = skb->dev; 345 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 346 347 /* Check whether the queue is stopped due to stalled tx dma, if the 348 * queue is stopped then start the queue as we have free desc for tx 349 */ 350 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); 351 if (unlikely(netif_tx_queue_stopped(txq))) 352 netif_tx_wake_queue(txq); 353 354 cpts_tx_timestamp(cpsw->cpts, skb); 355 ndev->stats.tx_packets++; 356 ndev->stats.tx_bytes += len; 357 dev_kfree_skb_any(skb); 358 } 359 360 static void cpsw_rx_vlan_encap(struct sk_buff *skb) 361 { 362 struct cpsw_priv *priv = netdev_priv(skb->dev); 363 struct cpsw_common *cpsw = priv->cpsw; 364 u32 rx_vlan_encap_hdr = *((u32 *)skb->data); 365 u16 vtag, vid, prio, pkt_type; 366 367 /* Remove VLAN header encapsulation word */ 368 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE); 369 370 pkt_type = (rx_vlan_encap_hdr >> 371 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) & 372 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK; 373 /* Ignore unknown & Priority-tagged packets*/ 374 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV || 375 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG) 376 return; 377 378 vid = (rx_vlan_encap_hdr >> 379 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) & 380 VLAN_VID_MASK; 381 /* Ignore vid 0 and pass packet as is */ 382 if (!vid) 383 return; 384 /* Ignore default vlans in dual mac mode */ 385 if (cpsw->data.dual_emac && 386 vid == cpsw->slaves[priv->emac_port].port_vlan) 387 return; 388 389 prio = (rx_vlan_encap_hdr >> 390 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) & 391 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK; 392 393 vtag = (prio << VLAN_PRIO_SHIFT) | vid; 394 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); 395 396 /* strip vlan tag for VLAN-tagged packet */ 397 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) { 398 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 399 skb_pull(skb, VLAN_HLEN); 400 } 401 } 402 403 static void cpsw_rx_handler(void *token, int len, int status) 404 { 405 struct cpdma_chan *ch; 406 struct sk_buff *skb = token; 407 struct sk_buff *new_skb; 408 struct net_device *ndev = skb->dev; 409 int ret = 0, port; 410 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 411 struct cpsw_priv *priv; 412 413 if (cpsw->data.dual_emac) { 414 port = CPDMA_RX_SOURCE_PORT(status); 415 if (port) { 416 ndev = cpsw->slaves[--port].ndev; 417 skb->dev = ndev; 418 } 419 } 420 421 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 422 /* In dual emac mode check for all interfaces */ 423 if (cpsw->data.dual_emac && cpsw->usage_count && 424 (status >= 0)) { 425 /* The packet received is for the interface which 426 * is already down and the other interface is up 427 * and running, instead of freeing which results 428 * in reducing of the number of rx descriptor in 429 * DMA engine, requeue skb back to cpdma. 430 */ 431 new_skb = skb; 432 goto requeue; 433 } 434 435 /* the interface is going down, skbs are purged */ 436 dev_kfree_skb_any(skb); 437 return; 438 } 439 440 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max); 441 if (new_skb) { 442 skb_copy_queue_mapping(new_skb, skb); 443 skb_put(skb, len); 444 if (status & CPDMA_RX_VLAN_ENCAP) 445 cpsw_rx_vlan_encap(skb); 446 priv = netdev_priv(ndev); 447 if (priv->rx_ts_enabled) 448 cpts_rx_timestamp(cpsw->cpts, skb); 449 skb->protocol = eth_type_trans(skb, ndev); 450 netif_receive_skb(skb); 451 ndev->stats.rx_bytes += len; 452 ndev->stats.rx_packets++; 453 kmemleak_not_leak(new_skb); 454 } else { 455 ndev->stats.rx_dropped++; 456 new_skb = skb; 457 } 458 459 requeue: 460 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch; 461 ret = cpdma_chan_submit(ch, new_skb, new_skb->data, 462 skb_tailroom(new_skb), 0); 463 if (ret < 0) { 464 WARN_ON(ret == -ENOMEM); 465 dev_kfree_skb_any(new_skb); 466 } 467 } 468 469 void cpsw_split_res(struct cpsw_common *cpsw) 470 { 471 u32 consumed_rate = 0, bigest_rate = 0; 472 struct cpsw_vector *txv = cpsw->txv; 473 int i, ch_weight, rlim_ch_num = 0; 474 int budget, bigest_rate_ch = 0; 475 u32 ch_rate, max_rate; 476 int ch_budget = 0; 477 478 for (i = 0; i < cpsw->tx_ch_num; i++) { 479 ch_rate = cpdma_chan_get_rate(txv[i].ch); 480 if (!ch_rate) 481 continue; 482 483 rlim_ch_num++; 484 consumed_rate += ch_rate; 485 } 486 487 if (cpsw->tx_ch_num == rlim_ch_num) { 488 max_rate = consumed_rate; 489 } else if (!rlim_ch_num) { 490 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num; 491 bigest_rate = 0; 492 max_rate = consumed_rate; 493 } else { 494 max_rate = cpsw->speed * 1000; 495 496 /* if max_rate is less then expected due to reduced link speed, 497 * split proportionally according next potential max speed 498 */ 499 if (max_rate < consumed_rate) 500 max_rate *= 10; 501 502 if (max_rate < consumed_rate) 503 max_rate *= 10; 504 505 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate; 506 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) / 507 (cpsw->tx_ch_num - rlim_ch_num); 508 bigest_rate = (max_rate - consumed_rate) / 509 (cpsw->tx_ch_num - rlim_ch_num); 510 } 511 512 /* split tx weight/budget */ 513 budget = CPSW_POLL_WEIGHT; 514 for (i = 0; i < cpsw->tx_ch_num; i++) { 515 ch_rate = cpdma_chan_get_rate(txv[i].ch); 516 if (ch_rate) { 517 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate; 518 if (!txv[i].budget) 519 txv[i].budget++; 520 if (ch_rate > bigest_rate) { 521 bigest_rate_ch = i; 522 bigest_rate = ch_rate; 523 } 524 525 ch_weight = (ch_rate * 100) / max_rate; 526 if (!ch_weight) 527 ch_weight++; 528 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight); 529 } else { 530 txv[i].budget = ch_budget; 531 if (!bigest_rate_ch) 532 bigest_rate_ch = i; 533 cpdma_chan_set_weight(cpsw->txv[i].ch, 0); 534 } 535 536 budget -= txv[i].budget; 537 } 538 539 if (budget) 540 txv[bigest_rate_ch].budget += budget; 541 542 /* split rx budget */ 543 budget = CPSW_POLL_WEIGHT; 544 ch_budget = budget / cpsw->rx_ch_num; 545 for (i = 0; i < cpsw->rx_ch_num; i++) { 546 cpsw->rxv[i].budget = ch_budget; 547 budget -= ch_budget; 548 } 549 550 if (budget) 551 cpsw->rxv[0].budget += budget; 552 } 553 554 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) 555 { 556 struct cpsw_common *cpsw = dev_id; 557 558 writel(0, &cpsw->wr_regs->tx_en); 559 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX); 560 561 if (cpsw->quirk_irq) { 562 disable_irq_nosync(cpsw->irqs_table[1]); 563 cpsw->tx_irq_disabled = true; 564 } 565 566 napi_schedule(&cpsw->napi_tx); 567 return IRQ_HANDLED; 568 } 569 570 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) 571 { 572 struct cpsw_common *cpsw = dev_id; 573 574 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX); 575 writel(0, &cpsw->wr_regs->rx_en); 576 577 if (cpsw->quirk_irq) { 578 disable_irq_nosync(cpsw->irqs_table[0]); 579 cpsw->rx_irq_disabled = true; 580 } 581 582 napi_schedule(&cpsw->napi_rx); 583 return IRQ_HANDLED; 584 } 585 586 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) 587 { 588 u32 ch_map; 589 int num_tx, cur_budget, ch; 590 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); 591 struct cpsw_vector *txv; 592 593 /* process every unprocessed channel */ 594 ch_map = cpdma_ctrl_txchs_state(cpsw->dma); 595 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) { 596 if (!(ch_map & 0x80)) 597 continue; 598 599 txv = &cpsw->txv[ch]; 600 if (unlikely(txv->budget > budget - num_tx)) 601 cur_budget = budget - num_tx; 602 else 603 cur_budget = txv->budget; 604 605 num_tx += cpdma_chan_process(txv->ch, cur_budget); 606 if (num_tx >= budget) 607 break; 608 } 609 610 if (num_tx < budget) { 611 napi_complete(napi_tx); 612 writel(0xff, &cpsw->wr_regs->tx_en); 613 } 614 615 return num_tx; 616 } 617 618 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) 619 { 620 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); 621 int num_tx; 622 623 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); 624 if (num_tx < budget) { 625 napi_complete(napi_tx); 626 writel(0xff, &cpsw->wr_regs->tx_en); 627 if (cpsw->tx_irq_disabled) { 628 cpsw->tx_irq_disabled = false; 629 enable_irq(cpsw->irqs_table[1]); 630 } 631 } 632 633 return num_tx; 634 } 635 636 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) 637 { 638 u32 ch_map; 639 int num_rx, cur_budget, ch; 640 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); 641 struct cpsw_vector *rxv; 642 643 /* process every unprocessed channel */ 644 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma); 645 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) { 646 if (!(ch_map & 0x01)) 647 continue; 648 649 rxv = &cpsw->rxv[ch]; 650 if (unlikely(rxv->budget > budget - num_rx)) 651 cur_budget = budget - num_rx; 652 else 653 cur_budget = rxv->budget; 654 655 num_rx += cpdma_chan_process(rxv->ch, cur_budget); 656 if (num_rx >= budget) 657 break; 658 } 659 660 if (num_rx < budget) { 661 napi_complete_done(napi_rx, num_rx); 662 writel(0xff, &cpsw->wr_regs->rx_en); 663 } 664 665 return num_rx; 666 } 667 668 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) 669 { 670 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); 671 int num_rx; 672 673 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); 674 if (num_rx < budget) { 675 napi_complete_done(napi_rx, num_rx); 676 writel(0xff, &cpsw->wr_regs->rx_en); 677 if (cpsw->rx_irq_disabled) { 678 cpsw->rx_irq_disabled = false; 679 enable_irq(cpsw->irqs_table[0]); 680 } 681 } 682 683 return num_rx; 684 } 685 686 static inline void soft_reset(const char *module, void __iomem *reg) 687 { 688 unsigned long timeout = jiffies + HZ; 689 690 writel_relaxed(1, reg); 691 do { 692 cpu_relax(); 693 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies)); 694 695 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module); 696 } 697 698 static void cpsw_set_slave_mac(struct cpsw_slave *slave, 699 struct cpsw_priv *priv) 700 { 701 slave_write(slave, mac_hi(priv->mac_addr), SA_HI); 702 slave_write(slave, mac_lo(priv->mac_addr), SA_LO); 703 } 704 705 static bool cpsw_shp_is_off(struct cpsw_priv *priv) 706 { 707 struct cpsw_common *cpsw = priv->cpsw; 708 struct cpsw_slave *slave; 709 u32 shift, mask, val; 710 711 val = readl_relaxed(&cpsw->regs->ptype); 712 713 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 714 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; 715 mask = 7 << shift; 716 val = val & mask; 717 718 return !val; 719 } 720 721 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on) 722 { 723 struct cpsw_common *cpsw = priv->cpsw; 724 struct cpsw_slave *slave; 725 u32 shift, mask, val; 726 727 val = readl_relaxed(&cpsw->regs->ptype); 728 729 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 730 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num; 731 mask = (1 << --fifo) << shift; 732 val = on ? val | mask : val & ~mask; 733 734 writel_relaxed(val, &cpsw->regs->ptype); 735 } 736 737 static void _cpsw_adjust_link(struct cpsw_slave *slave, 738 struct cpsw_priv *priv, bool *link) 739 { 740 struct phy_device *phy = slave->phy; 741 u32 mac_control = 0; 742 u32 slave_port; 743 struct cpsw_common *cpsw = priv->cpsw; 744 745 if (!phy) 746 return; 747 748 slave_port = cpsw_get_slave_port(slave->slave_num); 749 750 if (phy->link) { 751 mac_control = CPSW_SL_CTL_GMII_EN; 752 753 if (phy->speed == 1000) 754 mac_control |= CPSW_SL_CTL_GIG; 755 if (phy->duplex) 756 mac_control |= CPSW_SL_CTL_FULLDUPLEX; 757 758 /* set speed_in input in case RMII mode is used in 100Mbps */ 759 if (phy->speed == 100) 760 mac_control |= CPSW_SL_CTL_IFCTL_A; 761 /* in band mode only works in 10Mbps RGMII mode */ 762 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) 763 mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */ 764 765 if (priv->rx_pause) 766 mac_control |= CPSW_SL_CTL_RX_FLOW_EN; 767 768 if (priv->tx_pause) 769 mac_control |= CPSW_SL_CTL_TX_FLOW_EN; 770 771 if (mac_control != slave->mac_control) 772 cpsw_sl_ctl_set(slave->mac_sl, mac_control); 773 774 /* enable forwarding */ 775 cpsw_ale_control_set(cpsw->ale, slave_port, 776 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 777 778 *link = true; 779 780 if (priv->shp_cfg_speed && 781 priv->shp_cfg_speed != slave->phy->speed && 782 !cpsw_shp_is_off(priv)) 783 dev_warn(priv->dev, 784 "Speed was changed, CBS shaper speeds are changed!"); 785 } else { 786 mac_control = 0; 787 /* disable forwarding */ 788 cpsw_ale_control_set(cpsw->ale, slave_port, 789 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 790 791 cpsw_sl_wait_for_idle(slave->mac_sl, 100); 792 793 cpsw_sl_ctl_reset(slave->mac_sl); 794 } 795 796 if (mac_control != slave->mac_control) 797 phy_print_status(phy); 798 799 slave->mac_control = mac_control; 800 } 801 802 static int cpsw_get_common_speed(struct cpsw_common *cpsw) 803 { 804 int i, speed; 805 806 for (i = 0, speed = 0; i < cpsw->data.slaves; i++) 807 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link) 808 speed += cpsw->slaves[i].phy->speed; 809 810 return speed; 811 } 812 813 static int cpsw_need_resplit(struct cpsw_common *cpsw) 814 { 815 int i, rlim_ch_num; 816 int speed, ch_rate; 817 818 /* re-split resources only in case speed was changed */ 819 speed = cpsw_get_common_speed(cpsw); 820 if (speed == cpsw->speed || !speed) 821 return 0; 822 823 cpsw->speed = speed; 824 825 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) { 826 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch); 827 if (!ch_rate) 828 break; 829 830 rlim_ch_num++; 831 } 832 833 /* cases not dependent on speed */ 834 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num) 835 return 0; 836 837 return 1; 838 } 839 840 static void cpsw_adjust_link(struct net_device *ndev) 841 { 842 struct cpsw_priv *priv = netdev_priv(ndev); 843 struct cpsw_common *cpsw = priv->cpsw; 844 bool link = false; 845 846 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 847 848 if (link) { 849 if (cpsw_need_resplit(cpsw)) 850 cpsw_split_res(cpsw); 851 852 netif_carrier_on(ndev); 853 if (netif_running(ndev)) 854 netif_tx_wake_all_queues(ndev); 855 } else { 856 netif_carrier_off(ndev); 857 netif_tx_stop_all_queues(ndev); 858 } 859 } 860 861 static inline void cpsw_add_dual_emac_def_ale_entries( 862 struct cpsw_priv *priv, struct cpsw_slave *slave, 863 u32 slave_port) 864 { 865 struct cpsw_common *cpsw = priv->cpsw; 866 u32 port_mask = 1 << slave_port | ALE_PORT_HOST; 867 868 if (cpsw->version == CPSW_VERSION_1) 869 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); 870 else 871 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); 872 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask, 873 port_mask, port_mask, 0); 874 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 875 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0); 876 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 877 HOST_PORT_NUM, ALE_VLAN | 878 ALE_SECURE, slave->port_vlan); 879 cpsw_ale_control_set(cpsw->ale, slave_port, 880 ALE_PORT_DROP_UNKNOWN_VLAN, 1); 881 } 882 883 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 884 { 885 u32 slave_port; 886 struct phy_device *phy; 887 struct cpsw_common *cpsw = priv->cpsw; 888 889 cpsw_sl_reset(slave->mac_sl, 100); 890 cpsw_sl_ctl_reset(slave->mac_sl); 891 892 /* setup priority mapping */ 893 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP, 894 RX_PRIORITY_MAPPING); 895 896 switch (cpsw->version) { 897 case CPSW_VERSION_1: 898 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); 899 /* Increase RX FIFO size to 5 for supporting fullduplex 900 * flow control mode 901 */ 902 slave_write(slave, 903 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 904 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); 905 break; 906 case CPSW_VERSION_2: 907 case CPSW_VERSION_3: 908 case CPSW_VERSION_4: 909 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); 910 /* Increase RX FIFO size to 5 for supporting fullduplex 911 * flow control mode 912 */ 913 slave_write(slave, 914 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | 915 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); 916 break; 917 } 918 919 /* setup max packet size, and mac address */ 920 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN, 921 cpsw->rx_packet_max); 922 cpsw_set_slave_mac(slave, priv); 923 924 slave->mac_control = 0; /* no link yet */ 925 926 slave_port = cpsw_get_slave_port(slave->slave_num); 927 928 if (cpsw->data.dual_emac) 929 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); 930 else 931 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 932 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); 933 934 if (slave->data->phy_node) { 935 phy = of_phy_connect(priv->ndev, slave->data->phy_node, 936 &cpsw_adjust_link, 0, slave->data->phy_if); 937 if (!phy) { 938 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", 939 slave->data->phy_node, 940 slave->slave_num); 941 return; 942 } 943 } else { 944 phy = phy_connect(priv->ndev, slave->data->phy_id, 945 &cpsw_adjust_link, slave->data->phy_if); 946 if (IS_ERR(phy)) { 947 dev_err(priv->dev, 948 "phy \"%s\" not found on slave %d, err %ld\n", 949 slave->data->phy_id, slave->slave_num, 950 PTR_ERR(phy)); 951 return; 952 } 953 } 954 955 slave->phy = phy; 956 957 phy_attached_info(slave->phy); 958 959 phy_start(slave->phy); 960 961 /* Configure GMII_SEL register */ 962 if (!IS_ERR(slave->data->ifphy)) 963 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET, 964 slave->data->phy_if); 965 else 966 cpsw_phy_sel(cpsw->dev, slave->phy->interface, 967 slave->slave_num); 968 } 969 970 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) 971 { 972 struct cpsw_common *cpsw = priv->cpsw; 973 const int vlan = cpsw->data.default_vlan; 974 u32 reg; 975 int i; 976 int unreg_mcast_mask; 977 978 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 979 CPSW2_PORT_VLAN; 980 981 writel(vlan, &cpsw->host_port_regs->port_vlan); 982 983 for (i = 0; i < cpsw->data.slaves; i++) 984 slave_write(cpsw->slaves + i, vlan, reg); 985 986 if (priv->ndev->flags & IFF_ALLMULTI) 987 unreg_mcast_mask = ALE_ALL_PORTS; 988 else 989 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; 990 991 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, 992 ALE_ALL_PORTS, ALE_ALL_PORTS, 993 unreg_mcast_mask); 994 } 995 996 static void cpsw_init_host_port(struct cpsw_priv *priv) 997 { 998 u32 fifo_mode; 999 u32 control_reg; 1000 struct cpsw_common *cpsw = priv->cpsw; 1001 1002 /* soft reset the controller and initialize ale */ 1003 soft_reset("cpsw", &cpsw->regs->soft_reset); 1004 cpsw_ale_start(cpsw->ale); 1005 1006 /* switch to vlan unaware mode */ 1007 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1008 CPSW_ALE_VLAN_AWARE); 1009 control_reg = readl(&cpsw->regs->control); 1010 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP; 1011 writel(control_reg, &cpsw->regs->control); 1012 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : 1013 CPSW_FIFO_NORMAL_MODE; 1014 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl); 1015 1016 /* setup host port priority mapping */ 1017 writel_relaxed(CPDMA_TX_PRIORITY_MAP, 1018 &cpsw->host_port_regs->cpdma_tx_pri_map); 1019 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map); 1020 1021 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, 1022 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); 1023 1024 if (!cpsw->data.dual_emac) { 1025 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 1026 0, 0); 1027 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 1028 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2); 1029 } 1030 } 1031 1032 int cpsw_fill_rx_channels(struct cpsw_priv *priv) 1033 { 1034 struct cpsw_common *cpsw = priv->cpsw; 1035 struct sk_buff *skb; 1036 int ch_buf_num; 1037 int ch, i, ret; 1038 1039 for (ch = 0; ch < cpsw->rx_ch_num; ch++) { 1040 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch); 1041 for (i = 0; i < ch_buf_num; i++) { 1042 skb = __netdev_alloc_skb_ip_align(priv->ndev, 1043 cpsw->rx_packet_max, 1044 GFP_KERNEL); 1045 if (!skb) { 1046 cpsw_err(priv, ifup, "cannot allocate skb\n"); 1047 return -ENOMEM; 1048 } 1049 1050 skb_set_queue_mapping(skb, ch); 1051 ret = cpdma_chan_idle_submit(cpsw->rxv[ch].ch, skb, 1052 skb->data, 1053 skb_tailroom(skb), 0); 1054 if (ret < 0) { 1055 cpsw_err(priv, ifup, 1056 "cannot submit skb to channel %d rx, error %d\n", 1057 ch, ret); 1058 kfree_skb(skb); 1059 return ret; 1060 } 1061 kmemleak_not_leak(skb); 1062 } 1063 1064 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n", 1065 ch, ch_buf_num); 1066 } 1067 1068 return 0; 1069 } 1070 1071 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw) 1072 { 1073 u32 slave_port; 1074 1075 slave_port = cpsw_get_slave_port(slave->slave_num); 1076 1077 if (!slave->phy) 1078 return; 1079 phy_stop(slave->phy); 1080 phy_disconnect(slave->phy); 1081 slave->phy = NULL; 1082 cpsw_ale_control_set(cpsw->ale, slave_port, 1083 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); 1084 cpsw_sl_reset(slave->mac_sl, 100); 1085 cpsw_sl_ctl_reset(slave->mac_sl); 1086 } 1087 1088 static int cpsw_tc_to_fifo(int tc, int num_tc) 1089 { 1090 if (tc == num_tc - 1) 1091 return 0; 1092 1093 return CPSW_FIFO_SHAPERS_NUM - tc; 1094 } 1095 1096 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw) 1097 { 1098 struct cpsw_common *cpsw = priv->cpsw; 1099 u32 val = 0, send_pct, shift; 1100 struct cpsw_slave *slave; 1101 int pct = 0, i; 1102 1103 if (bw > priv->shp_cfg_speed * 1000) 1104 goto err; 1105 1106 /* shaping has to stay enabled for highest fifos linearly 1107 * and fifo bw no more then interface can allow 1108 */ 1109 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 1110 send_pct = slave_read(slave, SEND_PERCENT); 1111 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) { 1112 if (!bw) { 1113 if (i >= fifo || !priv->fifo_bw[i]) 1114 continue; 1115 1116 dev_warn(priv->dev, "Prev FIFO%d is shaped", i); 1117 continue; 1118 } 1119 1120 if (!priv->fifo_bw[i] && i > fifo) { 1121 dev_err(priv->dev, "Upper FIFO%d is not shaped", i); 1122 return -EINVAL; 1123 } 1124 1125 shift = (i - 1) * 8; 1126 if (i == fifo) { 1127 send_pct &= ~(CPSW_PCT_MASK << shift); 1128 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10); 1129 if (!val) 1130 val = 1; 1131 1132 send_pct |= val << shift; 1133 pct += val; 1134 continue; 1135 } 1136 1137 if (priv->fifo_bw[i]) 1138 pct += (send_pct >> shift) & CPSW_PCT_MASK; 1139 } 1140 1141 if (pct >= 100) 1142 goto err; 1143 1144 slave_write(slave, send_pct, SEND_PERCENT); 1145 priv->fifo_bw[fifo] = bw; 1146 1147 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo, 1148 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100)); 1149 1150 return 0; 1151 err: 1152 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration"); 1153 return -EINVAL; 1154 } 1155 1156 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw) 1157 { 1158 struct cpsw_common *cpsw = priv->cpsw; 1159 struct cpsw_slave *slave; 1160 u32 tx_in_ctl_rg, val; 1161 int ret; 1162 1163 ret = cpsw_set_fifo_bw(priv, fifo, bw); 1164 if (ret) 1165 return ret; 1166 1167 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 1168 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ? 1169 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL; 1170 1171 if (!bw) 1172 cpsw_fifo_shp_on(priv, fifo, bw); 1173 1174 val = slave_read(slave, tx_in_ctl_rg); 1175 if (cpsw_shp_is_off(priv)) { 1176 /* disable FIFOs rate limited queues */ 1177 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT); 1178 1179 /* set type of FIFO queues to normal priority mode */ 1180 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT); 1181 1182 /* set type of FIFO queues to be rate limited */ 1183 if (bw) 1184 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT; 1185 else 1186 priv->shp_cfg_speed = 0; 1187 } 1188 1189 /* toggle a FIFO rate limited queue */ 1190 if (bw) 1191 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); 1192 else 1193 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT); 1194 slave_write(slave, val, tx_in_ctl_rg); 1195 1196 /* FIFO transmit shape enable */ 1197 cpsw_fifo_shp_on(priv, fifo, bw); 1198 return 0; 1199 } 1200 1201 /* Defaults: 1202 * class A - prio 3 1203 * class B - prio 2 1204 * shaping for class A should be set first 1205 */ 1206 static int cpsw_set_cbs(struct net_device *ndev, 1207 struct tc_cbs_qopt_offload *qopt) 1208 { 1209 struct cpsw_priv *priv = netdev_priv(ndev); 1210 struct cpsw_common *cpsw = priv->cpsw; 1211 struct cpsw_slave *slave; 1212 int prev_speed = 0; 1213 int tc, ret, fifo; 1214 u32 bw = 0; 1215 1216 tc = netdev_txq_to_tc(priv->ndev, qopt->queue); 1217 1218 /* enable channels in backward order, as highest FIFOs must be rate 1219 * limited first and for compliance with CPDMA rate limited channels 1220 * that also used in bacward order. FIFO0 cannot be rate limited. 1221 */ 1222 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc); 1223 if (!fifo) { 1224 dev_err(priv->dev, "Last tc%d can't be rate limited", tc); 1225 return -EINVAL; 1226 } 1227 1228 /* do nothing, it's disabled anyway */ 1229 if (!qopt->enable && !priv->fifo_bw[fifo]) 1230 return 0; 1231 1232 /* shapers can be set if link speed is known */ 1233 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 1234 if (slave->phy && slave->phy->link) { 1235 if (priv->shp_cfg_speed && 1236 priv->shp_cfg_speed != slave->phy->speed) 1237 prev_speed = priv->shp_cfg_speed; 1238 1239 priv->shp_cfg_speed = slave->phy->speed; 1240 } 1241 1242 if (!priv->shp_cfg_speed) { 1243 dev_err(priv->dev, "Link speed is not known"); 1244 return -1; 1245 } 1246 1247 ret = pm_runtime_get_sync(cpsw->dev); 1248 if (ret < 0) { 1249 pm_runtime_put_noidle(cpsw->dev); 1250 return ret; 1251 } 1252 1253 bw = qopt->enable ? qopt->idleslope : 0; 1254 ret = cpsw_set_fifo_rlimit(priv, fifo, bw); 1255 if (ret) { 1256 priv->shp_cfg_speed = prev_speed; 1257 prev_speed = 0; 1258 } 1259 1260 if (bw && prev_speed) 1261 dev_warn(priv->dev, 1262 "Speed was changed, CBS shaper speeds are changed!"); 1263 1264 pm_runtime_put_sync(cpsw->dev); 1265 return ret; 1266 } 1267 1268 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) 1269 { 1270 int fifo, bw; 1271 1272 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) { 1273 bw = priv->fifo_bw[fifo]; 1274 if (!bw) 1275 continue; 1276 1277 cpsw_set_fifo_rlimit(priv, fifo, bw); 1278 } 1279 } 1280 1281 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) 1282 { 1283 struct cpsw_common *cpsw = priv->cpsw; 1284 u32 tx_prio_map = 0; 1285 int i, tc, fifo; 1286 u32 tx_prio_rg; 1287 1288 if (!priv->mqprio_hw) 1289 return; 1290 1291 for (i = 0; i < 8; i++) { 1292 tc = netdev_get_prio_tc_map(priv->ndev, i); 1293 fifo = CPSW_FIFO_SHAPERS_NUM - tc; 1294 tx_prio_map |= fifo << (4 * i); 1295 } 1296 1297 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ? 1298 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; 1299 1300 slave_write(slave, tx_prio_map, tx_prio_rg); 1301 } 1302 1303 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) 1304 { 1305 struct cpsw_priv *priv = arg; 1306 1307 if (!vdev) 1308 return 0; 1309 1310 cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); 1311 return 0; 1312 } 1313 1314 /* restore resources after port reset */ 1315 static void cpsw_restore(struct cpsw_priv *priv) 1316 { 1317 /* restore vlan configurations */ 1318 vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); 1319 1320 /* restore MQPRIO offload */ 1321 for_each_slave(priv, cpsw_mqprio_resume, priv); 1322 1323 /* restore CBS offload */ 1324 for_each_slave(priv, cpsw_cbs_resume, priv); 1325 } 1326 1327 static int cpsw_ndo_open(struct net_device *ndev) 1328 { 1329 struct cpsw_priv *priv = netdev_priv(ndev); 1330 struct cpsw_common *cpsw = priv->cpsw; 1331 int ret; 1332 u32 reg; 1333 1334 ret = pm_runtime_get_sync(cpsw->dev); 1335 if (ret < 0) { 1336 pm_runtime_put_noidle(cpsw->dev); 1337 return ret; 1338 } 1339 1340 netif_carrier_off(ndev); 1341 1342 /* Notify the stack of the actual queue counts. */ 1343 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num); 1344 if (ret) { 1345 dev_err(priv->dev, "cannot set real number of tx queues\n"); 1346 goto err_cleanup; 1347 } 1348 1349 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num); 1350 if (ret) { 1351 dev_err(priv->dev, "cannot set real number of rx queues\n"); 1352 goto err_cleanup; 1353 } 1354 1355 reg = cpsw->version; 1356 1357 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1358 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 1359 CPSW_RTL_VERSION(reg)); 1360 1361 /* Initialize host and slave ports */ 1362 if (!cpsw->usage_count) 1363 cpsw_init_host_port(priv); 1364 for_each_slave(priv, cpsw_slave_open, priv); 1365 1366 /* Add default VLAN */ 1367 if (!cpsw->data.dual_emac) 1368 cpsw_add_default_vlan(priv); 1369 else 1370 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan, 1371 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); 1372 1373 /* initialize shared resources for every ndev */ 1374 if (!cpsw->usage_count) { 1375 /* disable priority elevation */ 1376 writel_relaxed(0, &cpsw->regs->ptype); 1377 1378 /* enable statistics collection only on all ports */ 1379 writel_relaxed(0x7, &cpsw->regs->stat_port_en); 1380 1381 /* Enable internal fifo flow control */ 1382 writel(0x7, &cpsw->regs->flow_control); 1383 1384 napi_enable(&cpsw->napi_rx); 1385 napi_enable(&cpsw->napi_tx); 1386 1387 if (cpsw->tx_irq_disabled) { 1388 cpsw->tx_irq_disabled = false; 1389 enable_irq(cpsw->irqs_table[1]); 1390 } 1391 1392 if (cpsw->rx_irq_disabled) { 1393 cpsw->rx_irq_disabled = false; 1394 enable_irq(cpsw->irqs_table[0]); 1395 } 1396 1397 ret = cpsw_fill_rx_channels(priv); 1398 if (ret < 0) 1399 goto err_cleanup; 1400 1401 if (cpts_register(cpsw->cpts)) 1402 dev_err(priv->dev, "error registering cpts device\n"); 1403 1404 } 1405 1406 cpsw_restore(priv); 1407 1408 /* Enable Interrupt pacing if configured */ 1409 if (cpsw->coal_intvl != 0) { 1410 struct ethtool_coalesce coal; 1411 1412 coal.rx_coalesce_usecs = cpsw->coal_intvl; 1413 cpsw_set_coalesce(ndev, &coal); 1414 } 1415 1416 cpdma_ctlr_start(cpsw->dma); 1417 cpsw_intr_enable(cpsw); 1418 cpsw->usage_count++; 1419 1420 return 0; 1421 1422 err_cleanup: 1423 if (!cpsw->usage_count) { 1424 cpdma_ctlr_stop(cpsw->dma); 1425 for_each_slave(priv, cpsw_slave_stop, cpsw); 1426 } 1427 1428 pm_runtime_put_sync(cpsw->dev); 1429 netif_carrier_off(priv->ndev); 1430 return ret; 1431 } 1432 1433 static int cpsw_ndo_stop(struct net_device *ndev) 1434 { 1435 struct cpsw_priv *priv = netdev_priv(ndev); 1436 struct cpsw_common *cpsw = priv->cpsw; 1437 1438 cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 1439 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); 1440 netif_tx_stop_all_queues(priv->ndev); 1441 netif_carrier_off(priv->ndev); 1442 1443 if (cpsw->usage_count <= 1) { 1444 napi_disable(&cpsw->napi_rx); 1445 napi_disable(&cpsw->napi_tx); 1446 cpts_unregister(cpsw->cpts); 1447 cpsw_intr_disable(cpsw); 1448 cpdma_ctlr_stop(cpsw->dma); 1449 cpsw_ale_stop(cpsw->ale); 1450 } 1451 for_each_slave(priv, cpsw_slave_stop, cpsw); 1452 1453 if (cpsw_need_resplit(cpsw)) 1454 cpsw_split_res(cpsw); 1455 1456 cpsw->usage_count--; 1457 pm_runtime_put_sync(cpsw->dev); 1458 return 0; 1459 } 1460 1461 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, 1462 struct net_device *ndev) 1463 { 1464 struct cpsw_priv *priv = netdev_priv(ndev); 1465 struct cpsw_common *cpsw = priv->cpsw; 1466 struct cpts *cpts = cpsw->cpts; 1467 struct netdev_queue *txq; 1468 struct cpdma_chan *txch; 1469 int ret, q_idx; 1470 1471 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 1472 cpsw_err(priv, tx_err, "packet pad failed\n"); 1473 ndev->stats.tx_dropped++; 1474 return NET_XMIT_DROP; 1475 } 1476 1477 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 1478 priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) 1479 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1480 1481 q_idx = skb_get_queue_mapping(skb); 1482 if (q_idx >= cpsw->tx_ch_num) 1483 q_idx = q_idx % cpsw->tx_ch_num; 1484 1485 txch = cpsw->txv[q_idx].ch; 1486 txq = netdev_get_tx_queue(ndev, q_idx); 1487 skb_tx_timestamp(skb); 1488 ret = cpdma_chan_submit(txch, skb, skb->data, skb->len, 1489 priv->emac_port + cpsw->data.dual_emac); 1490 if (unlikely(ret != 0)) { 1491 cpsw_err(priv, tx_err, "desc submit failed\n"); 1492 goto fail; 1493 } 1494 1495 /* If there is no more tx desc left free then we need to 1496 * tell the kernel to stop sending us tx frames. 1497 */ 1498 if (unlikely(!cpdma_check_free_tx_desc(txch))) { 1499 netif_tx_stop_queue(txq); 1500 1501 /* Barrier, so that stop_queue visible to other cpus */ 1502 smp_mb__after_atomic(); 1503 1504 if (cpdma_check_free_tx_desc(txch)) 1505 netif_tx_wake_queue(txq); 1506 } 1507 1508 return NETDEV_TX_OK; 1509 fail: 1510 ndev->stats.tx_dropped++; 1511 netif_tx_stop_queue(txq); 1512 1513 /* Barrier, so that stop_queue visible to other cpus */ 1514 smp_mb__after_atomic(); 1515 1516 if (cpdma_check_free_tx_desc(txch)) 1517 netif_tx_wake_queue(txq); 1518 1519 return NETDEV_TX_BUSY; 1520 } 1521 1522 #if IS_ENABLED(CONFIG_TI_CPTS) 1523 1524 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) 1525 { 1526 struct cpsw_common *cpsw = priv->cpsw; 1527 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; 1528 u32 ts_en, seq_id; 1529 1530 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { 1531 slave_write(slave, 0, CPSW1_TS_CTL); 1532 return; 1533 } 1534 1535 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; 1536 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; 1537 1538 if (priv->tx_ts_enabled) 1539 ts_en |= CPSW_V1_TS_TX_EN; 1540 1541 if (priv->rx_ts_enabled) 1542 ts_en |= CPSW_V1_TS_RX_EN; 1543 1544 slave_write(slave, ts_en, CPSW1_TS_CTL); 1545 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); 1546 } 1547 1548 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) 1549 { 1550 struct cpsw_slave *slave; 1551 struct cpsw_common *cpsw = priv->cpsw; 1552 u32 ctrl, mtype; 1553 1554 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 1555 1556 ctrl = slave_read(slave, CPSW2_CONTROL); 1557 switch (cpsw->version) { 1558 case CPSW_VERSION_2: 1559 ctrl &= ~CTRL_V2_ALL_TS_MASK; 1560 1561 if (priv->tx_ts_enabled) 1562 ctrl |= CTRL_V2_TX_TS_BITS; 1563 1564 if (priv->rx_ts_enabled) 1565 ctrl |= CTRL_V2_RX_TS_BITS; 1566 break; 1567 case CPSW_VERSION_3: 1568 default: 1569 ctrl &= ~CTRL_V3_ALL_TS_MASK; 1570 1571 if (priv->tx_ts_enabled) 1572 ctrl |= CTRL_V3_TX_TS_BITS; 1573 1574 if (priv->rx_ts_enabled) 1575 ctrl |= CTRL_V3_RX_TS_BITS; 1576 break; 1577 } 1578 1579 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; 1580 1581 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); 1582 slave_write(slave, ctrl, CPSW2_CONTROL); 1583 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); 1584 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); 1585 } 1586 1587 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 1588 { 1589 struct cpsw_priv *priv = netdev_priv(dev); 1590 struct hwtstamp_config cfg; 1591 struct cpsw_common *cpsw = priv->cpsw; 1592 1593 if (cpsw->version != CPSW_VERSION_1 && 1594 cpsw->version != CPSW_VERSION_2 && 1595 cpsw->version != CPSW_VERSION_3) 1596 return -EOPNOTSUPP; 1597 1598 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 1599 return -EFAULT; 1600 1601 /* reserved for future extensions */ 1602 if (cfg.flags) 1603 return -EINVAL; 1604 1605 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) 1606 return -ERANGE; 1607 1608 switch (cfg.rx_filter) { 1609 case HWTSTAMP_FILTER_NONE: 1610 priv->rx_ts_enabled = 0; 1611 break; 1612 case HWTSTAMP_FILTER_ALL: 1613 case HWTSTAMP_FILTER_NTP_ALL: 1614 return -ERANGE; 1615 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1616 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1617 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1618 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1619 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1620 break; 1621 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1622 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1623 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 1624 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 1625 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 1626 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 1627 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1628 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1629 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1630 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; 1631 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1632 break; 1633 default: 1634 return -ERANGE; 1635 } 1636 1637 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; 1638 1639 switch (cpsw->version) { 1640 case CPSW_VERSION_1: 1641 cpsw_hwtstamp_v1(priv); 1642 break; 1643 case CPSW_VERSION_2: 1644 case CPSW_VERSION_3: 1645 cpsw_hwtstamp_v2(priv); 1646 break; 1647 default: 1648 WARN_ON(1); 1649 } 1650 1651 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1652 } 1653 1654 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 1655 { 1656 struct cpsw_common *cpsw = ndev_to_cpsw(dev); 1657 struct cpsw_priv *priv = netdev_priv(dev); 1658 struct hwtstamp_config cfg; 1659 1660 if (cpsw->version != CPSW_VERSION_1 && 1661 cpsw->version != CPSW_VERSION_2 && 1662 cpsw->version != CPSW_VERSION_3) 1663 return -EOPNOTSUPP; 1664 1665 cfg.flags = 0; 1666 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1667 cfg.rx_filter = priv->rx_ts_enabled; 1668 1669 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 1670 } 1671 #else 1672 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 1673 { 1674 return -EOPNOTSUPP; 1675 } 1676 1677 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 1678 { 1679 return -EOPNOTSUPP; 1680 } 1681 #endif /*CONFIG_TI_CPTS*/ 1682 1683 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1684 { 1685 struct cpsw_priv *priv = netdev_priv(dev); 1686 struct cpsw_common *cpsw = priv->cpsw; 1687 int slave_no = cpsw_slave_index(cpsw, priv); 1688 1689 if (!netif_running(dev)) 1690 return -EINVAL; 1691 1692 switch (cmd) { 1693 case SIOCSHWTSTAMP: 1694 return cpsw_hwtstamp_set(dev, req); 1695 case SIOCGHWTSTAMP: 1696 return cpsw_hwtstamp_get(dev, req); 1697 } 1698 1699 if (!cpsw->slaves[slave_no].phy) 1700 return -EOPNOTSUPP; 1701 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd); 1702 } 1703 1704 static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1705 { 1706 struct cpsw_priv *priv = netdev_priv(ndev); 1707 struct cpsw_common *cpsw = priv->cpsw; 1708 int ch; 1709 1710 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 1711 ndev->stats.tx_errors++; 1712 cpsw_intr_disable(cpsw); 1713 for (ch = 0; ch < cpsw->tx_ch_num; ch++) { 1714 cpdma_chan_stop(cpsw->txv[ch].ch); 1715 cpdma_chan_start(cpsw->txv[ch].ch); 1716 } 1717 1718 cpsw_intr_enable(cpsw); 1719 netif_trans_update(ndev); 1720 netif_tx_wake_all_queues(ndev); 1721 } 1722 1723 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) 1724 { 1725 struct cpsw_priv *priv = netdev_priv(ndev); 1726 struct sockaddr *addr = (struct sockaddr *)p; 1727 struct cpsw_common *cpsw = priv->cpsw; 1728 int flags = 0; 1729 u16 vid = 0; 1730 int ret; 1731 1732 if (!is_valid_ether_addr(addr->sa_data)) 1733 return -EADDRNOTAVAIL; 1734 1735 ret = pm_runtime_get_sync(cpsw->dev); 1736 if (ret < 0) { 1737 pm_runtime_put_noidle(cpsw->dev); 1738 return ret; 1739 } 1740 1741 if (cpsw->data.dual_emac) { 1742 vid = cpsw->slaves[priv->emac_port].port_vlan; 1743 flags = ALE_VLAN; 1744 } 1745 1746 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM, 1747 flags, vid); 1748 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM, 1749 flags, vid); 1750 1751 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 1752 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 1753 for_each_slave(priv, cpsw_set_slave_mac, priv); 1754 1755 pm_runtime_put(cpsw->dev); 1756 1757 return 0; 1758 } 1759 1760 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, 1761 unsigned short vid) 1762 { 1763 int ret; 1764 int unreg_mcast_mask = 0; 1765 int mcast_mask; 1766 u32 port_mask; 1767 struct cpsw_common *cpsw = priv->cpsw; 1768 1769 if (cpsw->data.dual_emac) { 1770 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; 1771 1772 mcast_mask = ALE_PORT_HOST; 1773 if (priv->ndev->flags & IFF_ALLMULTI) 1774 unreg_mcast_mask = mcast_mask; 1775 } else { 1776 port_mask = ALE_ALL_PORTS; 1777 mcast_mask = port_mask; 1778 1779 if (priv->ndev->flags & IFF_ALLMULTI) 1780 unreg_mcast_mask = ALE_ALL_PORTS; 1781 else 1782 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; 1783 } 1784 1785 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask, 1786 unreg_mcast_mask); 1787 if (ret != 0) 1788 return ret; 1789 1790 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 1791 HOST_PORT_NUM, ALE_VLAN, vid); 1792 if (ret != 0) 1793 goto clean_vid; 1794 1795 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast, 1796 mcast_mask, ALE_VLAN, vid, 0); 1797 if (ret != 0) 1798 goto clean_vlan_ucast; 1799 return 0; 1800 1801 clean_vlan_ucast: 1802 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 1803 HOST_PORT_NUM, ALE_VLAN, vid); 1804 clean_vid: 1805 cpsw_ale_del_vlan(cpsw->ale, vid, 0); 1806 return ret; 1807 } 1808 1809 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, 1810 __be16 proto, u16 vid) 1811 { 1812 struct cpsw_priv *priv = netdev_priv(ndev); 1813 struct cpsw_common *cpsw = priv->cpsw; 1814 int ret; 1815 1816 if (vid == cpsw->data.default_vlan) 1817 return 0; 1818 1819 ret = pm_runtime_get_sync(cpsw->dev); 1820 if (ret < 0) { 1821 pm_runtime_put_noidle(cpsw->dev); 1822 return ret; 1823 } 1824 1825 if (cpsw->data.dual_emac) { 1826 /* In dual EMAC, reserved VLAN id should not be used for 1827 * creating VLAN interfaces as this can break the dual 1828 * EMAC port separation 1829 */ 1830 int i; 1831 1832 for (i = 0; i < cpsw->data.slaves; i++) { 1833 if (vid == cpsw->slaves[i].port_vlan) { 1834 ret = -EINVAL; 1835 goto err; 1836 } 1837 } 1838 } 1839 1840 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1841 ret = cpsw_add_vlan_ale_entry(priv, vid); 1842 err: 1843 pm_runtime_put(cpsw->dev); 1844 return ret; 1845 } 1846 1847 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, 1848 __be16 proto, u16 vid) 1849 { 1850 struct cpsw_priv *priv = netdev_priv(ndev); 1851 struct cpsw_common *cpsw = priv->cpsw; 1852 int ret; 1853 1854 if (vid == cpsw->data.default_vlan) 1855 return 0; 1856 1857 ret = pm_runtime_get_sync(cpsw->dev); 1858 if (ret < 0) { 1859 pm_runtime_put_noidle(cpsw->dev); 1860 return ret; 1861 } 1862 1863 if (cpsw->data.dual_emac) { 1864 int i; 1865 1866 for (i = 0; i < cpsw->data.slaves; i++) { 1867 if (vid == cpsw->slaves[i].port_vlan) 1868 goto err; 1869 } 1870 } 1871 1872 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1873 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); 1874 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 1875 HOST_PORT_NUM, ALE_VLAN, vid); 1876 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 1877 0, ALE_VLAN, vid); 1878 ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); 1879 err: 1880 pm_runtime_put(cpsw->dev); 1881 return ret; 1882 } 1883 1884 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate) 1885 { 1886 struct cpsw_priv *priv = netdev_priv(ndev); 1887 struct cpsw_common *cpsw = priv->cpsw; 1888 struct cpsw_slave *slave; 1889 u32 min_rate; 1890 u32 ch_rate; 1891 int i, ret; 1892 1893 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate; 1894 if (ch_rate == rate) 1895 return 0; 1896 1897 ch_rate = rate * 1000; 1898 min_rate = cpdma_chan_get_min_rate(cpsw->dma); 1899 if ((ch_rate < min_rate && ch_rate)) { 1900 dev_err(priv->dev, "The channel rate cannot be less than %dMbps", 1901 min_rate); 1902 return -EINVAL; 1903 } 1904 1905 if (rate > cpsw->speed) { 1906 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps"); 1907 return -EINVAL; 1908 } 1909 1910 ret = pm_runtime_get_sync(cpsw->dev); 1911 if (ret < 0) { 1912 pm_runtime_put_noidle(cpsw->dev); 1913 return ret; 1914 } 1915 1916 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate); 1917 pm_runtime_put(cpsw->dev); 1918 1919 if (ret) 1920 return ret; 1921 1922 /* update rates for slaves tx queues */ 1923 for (i = 0; i < cpsw->data.slaves; i++) { 1924 slave = &cpsw->slaves[i]; 1925 if (!slave->ndev) 1926 continue; 1927 1928 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate; 1929 } 1930 1931 cpsw_split_res(cpsw); 1932 return ret; 1933 } 1934 1935 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data) 1936 { 1937 struct tc_mqprio_qopt_offload *mqprio = type_data; 1938 struct cpsw_priv *priv = netdev_priv(ndev); 1939 struct cpsw_common *cpsw = priv->cpsw; 1940 int fifo, num_tc, count, offset; 1941 struct cpsw_slave *slave; 1942 u32 tx_prio_map = 0; 1943 int i, tc, ret; 1944 1945 num_tc = mqprio->qopt.num_tc; 1946 if (num_tc > CPSW_TC_NUM) 1947 return -EINVAL; 1948 1949 if (mqprio->mode != TC_MQPRIO_MODE_DCB) 1950 return -EINVAL; 1951 1952 ret = pm_runtime_get_sync(cpsw->dev); 1953 if (ret < 0) { 1954 pm_runtime_put_noidle(cpsw->dev); 1955 return ret; 1956 } 1957 1958 if (num_tc) { 1959 for (i = 0; i < 8; i++) { 1960 tc = mqprio->qopt.prio_tc_map[i]; 1961 fifo = cpsw_tc_to_fifo(tc, num_tc); 1962 tx_prio_map |= fifo << (4 * i); 1963 } 1964 1965 netdev_set_num_tc(ndev, num_tc); 1966 for (i = 0; i < num_tc; i++) { 1967 count = mqprio->qopt.count[i]; 1968 offset = mqprio->qopt.offset[i]; 1969 netdev_set_tc_queue(ndev, i, count, offset); 1970 } 1971 } 1972 1973 if (!mqprio->qopt.hw) { 1974 /* restore default configuration */ 1975 netdev_reset_tc(ndev); 1976 tx_prio_map = TX_PRIORITY_MAPPING; 1977 } 1978 1979 priv->mqprio_hw = mqprio->qopt.hw; 1980 1981 offset = cpsw->version == CPSW_VERSION_1 ? 1982 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP; 1983 1984 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)]; 1985 slave_write(slave, tx_prio_map, offset); 1986 1987 pm_runtime_put_sync(cpsw->dev); 1988 1989 return 0; 1990 } 1991 1992 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type, 1993 void *type_data) 1994 { 1995 switch (type) { 1996 case TC_SETUP_QDISC_CBS: 1997 return cpsw_set_cbs(ndev, type_data); 1998 1999 case TC_SETUP_QDISC_MQPRIO: 2000 return cpsw_set_mqprio(ndev, type_data); 2001 2002 default: 2003 return -EOPNOTSUPP; 2004 } 2005 } 2006 2007 #ifdef CONFIG_NET_POLL_CONTROLLER 2008 static void cpsw_ndo_poll_controller(struct net_device *ndev) 2009 { 2010 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2011 2012 cpsw_intr_disable(cpsw); 2013 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw); 2014 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw); 2015 cpsw_intr_enable(cpsw); 2016 } 2017 #endif 2018 2019 static const struct net_device_ops cpsw_netdev_ops = { 2020 .ndo_open = cpsw_ndo_open, 2021 .ndo_stop = cpsw_ndo_stop, 2022 .ndo_start_xmit = cpsw_ndo_start_xmit, 2023 .ndo_set_mac_address = cpsw_ndo_set_mac_address, 2024 .ndo_do_ioctl = cpsw_ndo_ioctl, 2025 .ndo_validate_addr = eth_validate_addr, 2026 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 2027 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 2028 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate, 2029 #ifdef CONFIG_NET_POLL_CONTROLLER 2030 .ndo_poll_controller = cpsw_ndo_poll_controller, 2031 #endif 2032 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, 2033 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, 2034 .ndo_setup_tc = cpsw_ndo_setup_tc, 2035 }; 2036 2037 static void cpsw_get_drvinfo(struct net_device *ndev, 2038 struct ethtool_drvinfo *info) 2039 { 2040 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2041 struct platform_device *pdev = to_platform_device(cpsw->dev); 2042 2043 strlcpy(info->driver, "cpsw", sizeof(info->driver)); 2044 strlcpy(info->version, "1.0", sizeof(info->version)); 2045 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); 2046 } 2047 2048 static int cpsw_set_pauseparam(struct net_device *ndev, 2049 struct ethtool_pauseparam *pause) 2050 { 2051 struct cpsw_priv *priv = netdev_priv(ndev); 2052 bool link; 2053 2054 priv->rx_pause = pause->rx_pause ? true : false; 2055 priv->tx_pause = pause->tx_pause ? true : false; 2056 2057 for_each_slave(priv, _cpsw_adjust_link, priv, &link); 2058 return 0; 2059 } 2060 2061 static int cpsw_set_channels(struct net_device *ndev, 2062 struct ethtool_channels *chs) 2063 { 2064 return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler); 2065 } 2066 2067 static const struct ethtool_ops cpsw_ethtool_ops = { 2068 .get_drvinfo = cpsw_get_drvinfo, 2069 .get_msglevel = cpsw_get_msglevel, 2070 .set_msglevel = cpsw_set_msglevel, 2071 .get_link = ethtool_op_get_link, 2072 .get_ts_info = cpsw_get_ts_info, 2073 .get_coalesce = cpsw_get_coalesce, 2074 .set_coalesce = cpsw_set_coalesce, 2075 .get_sset_count = cpsw_get_sset_count, 2076 .get_strings = cpsw_get_strings, 2077 .get_ethtool_stats = cpsw_get_ethtool_stats, 2078 .get_pauseparam = cpsw_get_pauseparam, 2079 .set_pauseparam = cpsw_set_pauseparam, 2080 .get_wol = cpsw_get_wol, 2081 .set_wol = cpsw_set_wol, 2082 .get_regs_len = cpsw_get_regs_len, 2083 .get_regs = cpsw_get_regs, 2084 .begin = cpsw_ethtool_op_begin, 2085 .complete = cpsw_ethtool_op_complete, 2086 .get_channels = cpsw_get_channels, 2087 .set_channels = cpsw_set_channels, 2088 .get_link_ksettings = cpsw_get_link_ksettings, 2089 .set_link_ksettings = cpsw_set_link_ksettings, 2090 .get_eee = cpsw_get_eee, 2091 .set_eee = cpsw_set_eee, 2092 .nway_reset = cpsw_nway_reset, 2093 .get_ringparam = cpsw_get_ringparam, 2094 .set_ringparam = cpsw_set_ringparam, 2095 }; 2096 2097 static int cpsw_probe_dt(struct cpsw_platform_data *data, 2098 struct platform_device *pdev) 2099 { 2100 struct device_node *node = pdev->dev.of_node; 2101 struct device_node *slave_node; 2102 int i = 0, ret; 2103 u32 prop; 2104 2105 if (!node) 2106 return -EINVAL; 2107 2108 if (of_property_read_u32(node, "slaves", &prop)) { 2109 dev_err(&pdev->dev, "Missing slaves property in the DT.\n"); 2110 return -EINVAL; 2111 } 2112 data->slaves = prop; 2113 2114 if (of_property_read_u32(node, "active_slave", &prop)) { 2115 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n"); 2116 return -EINVAL; 2117 } 2118 data->active_slave = prop; 2119 2120 data->slave_data = devm_kcalloc(&pdev->dev, 2121 data->slaves, 2122 sizeof(struct cpsw_slave_data), 2123 GFP_KERNEL); 2124 if (!data->slave_data) 2125 return -ENOMEM; 2126 2127 if (of_property_read_u32(node, "cpdma_channels", &prop)) { 2128 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n"); 2129 return -EINVAL; 2130 } 2131 data->channels = prop; 2132 2133 if (of_property_read_u32(node, "ale_entries", &prop)) { 2134 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n"); 2135 return -EINVAL; 2136 } 2137 data->ale_entries = prop; 2138 2139 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 2140 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n"); 2141 return -EINVAL; 2142 } 2143 data->bd_ram_size = prop; 2144 2145 if (of_property_read_u32(node, "mac_control", &prop)) { 2146 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); 2147 return -EINVAL; 2148 } 2149 data->mac_control = prop; 2150 2151 if (of_property_read_bool(node, "dual_emac")) 2152 data->dual_emac = 1; 2153 2154 /* 2155 * Populate all the child nodes here... 2156 */ 2157 ret = of_platform_populate(node, NULL, NULL, &pdev->dev); 2158 /* We do not want to force this, as in some cases may not have child */ 2159 if (ret) 2160 dev_warn(&pdev->dev, "Doesn't have any child node\n"); 2161 2162 for_each_available_child_of_node(node, slave_node) { 2163 struct cpsw_slave_data *slave_data = data->slave_data + i; 2164 const void *mac_addr = NULL; 2165 int lenp; 2166 const __be32 *parp; 2167 2168 /* This is no slave child node, continue */ 2169 if (!of_node_name_eq(slave_node, "slave")) 2170 continue; 2171 2172 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node, 2173 NULL); 2174 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) && 2175 IS_ERR(slave_data->ifphy)) { 2176 ret = PTR_ERR(slave_data->ifphy); 2177 dev_err(&pdev->dev, 2178 "%d: Error retrieving port phy: %d\n", i, ret); 2179 return ret; 2180 } 2181 2182 slave_data->phy_node = of_parse_phandle(slave_node, 2183 "phy-handle", 0); 2184 parp = of_get_property(slave_node, "phy_id", &lenp); 2185 if (slave_data->phy_node) { 2186 dev_dbg(&pdev->dev, 2187 "slave[%d] using phy-handle=\"%pOF\"\n", 2188 i, slave_data->phy_node); 2189 } else if (of_phy_is_fixed_link(slave_node)) { 2190 /* In the case of a fixed PHY, the DT node associated 2191 * to the PHY is the Ethernet MAC DT node. 2192 */ 2193 ret = of_phy_register_fixed_link(slave_node); 2194 if (ret) { 2195 if (ret != -EPROBE_DEFER) 2196 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); 2197 return ret; 2198 } 2199 slave_data->phy_node = of_node_get(slave_node); 2200 } else if (parp) { 2201 u32 phyid; 2202 struct device_node *mdio_node; 2203 struct platform_device *mdio; 2204 2205 if (lenp != (sizeof(__be32) * 2)) { 2206 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i); 2207 goto no_phy_slave; 2208 } 2209 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 2210 phyid = be32_to_cpup(parp+1); 2211 mdio = of_find_device_by_node(mdio_node); 2212 of_node_put(mdio_node); 2213 if (!mdio) { 2214 dev_err(&pdev->dev, "Missing mdio platform device\n"); 2215 return -EINVAL; 2216 } 2217 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2218 PHY_ID_FMT, mdio->name, phyid); 2219 put_device(&mdio->dev); 2220 } else { 2221 dev_err(&pdev->dev, 2222 "No slave[%d] phy_id, phy-handle, or fixed-link property\n", 2223 i); 2224 goto no_phy_slave; 2225 } 2226 slave_data->phy_if = of_get_phy_mode(slave_node); 2227 if (slave_data->phy_if < 0) { 2228 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", 2229 i); 2230 return slave_data->phy_if; 2231 } 2232 2233 no_phy_slave: 2234 mac_addr = of_get_mac_address(slave_node); 2235 if (!IS_ERR(mac_addr)) { 2236 ether_addr_copy(slave_data->mac_addr, mac_addr); 2237 } else { 2238 ret = ti_cm_get_macid(&pdev->dev, i, 2239 slave_data->mac_addr); 2240 if (ret) 2241 return ret; 2242 } 2243 if (data->dual_emac) { 2244 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 2245 &prop)) { 2246 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n"); 2247 slave_data->dual_emac_res_vlan = i+1; 2248 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n", 2249 slave_data->dual_emac_res_vlan, i); 2250 } else { 2251 slave_data->dual_emac_res_vlan = prop; 2252 } 2253 } 2254 2255 i++; 2256 if (i == data->slaves) 2257 break; 2258 } 2259 2260 return 0; 2261 } 2262 2263 static void cpsw_remove_dt(struct platform_device *pdev) 2264 { 2265 struct cpsw_common *cpsw = platform_get_drvdata(pdev); 2266 struct cpsw_platform_data *data = &cpsw->data; 2267 struct device_node *node = pdev->dev.of_node; 2268 struct device_node *slave_node; 2269 int i = 0; 2270 2271 for_each_available_child_of_node(node, slave_node) { 2272 struct cpsw_slave_data *slave_data = &data->slave_data[i]; 2273 2274 if (!of_node_name_eq(slave_node, "slave")) 2275 continue; 2276 2277 if (of_phy_is_fixed_link(slave_node)) 2278 of_phy_deregister_fixed_link(slave_node); 2279 2280 of_node_put(slave_data->phy_node); 2281 2282 i++; 2283 if (i == data->slaves) 2284 break; 2285 } 2286 2287 of_platform_depopulate(&pdev->dev); 2288 } 2289 2290 static int cpsw_probe_dual_emac(struct cpsw_priv *priv) 2291 { 2292 struct cpsw_common *cpsw = priv->cpsw; 2293 struct cpsw_platform_data *data = &cpsw->data; 2294 struct net_device *ndev; 2295 struct cpsw_priv *priv_sl2; 2296 int ret = 0; 2297 2298 ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv), 2299 CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); 2300 if (!ndev) { 2301 dev_err(cpsw->dev, "cpsw: error allocating net_device\n"); 2302 return -ENOMEM; 2303 } 2304 2305 priv_sl2 = netdev_priv(ndev); 2306 priv_sl2->cpsw = cpsw; 2307 priv_sl2->ndev = ndev; 2308 priv_sl2->dev = &ndev->dev; 2309 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 2310 2311 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 2312 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, 2313 ETH_ALEN); 2314 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n", 2315 priv_sl2->mac_addr); 2316 } else { 2317 eth_random_addr(priv_sl2->mac_addr); 2318 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n", 2319 priv_sl2->mac_addr); 2320 } 2321 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); 2322 2323 priv_sl2->emac_port = 1; 2324 cpsw->slaves[1].ndev = ndev; 2325 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; 2326 2327 ndev->netdev_ops = &cpsw_netdev_ops; 2328 ndev->ethtool_ops = &cpsw_ethtool_ops; 2329 2330 /* register the network device */ 2331 SET_NETDEV_DEV(ndev, cpsw->dev); 2332 ret = register_netdev(ndev); 2333 if (ret) 2334 dev_err(cpsw->dev, "cpsw: error registering net device\n"); 2335 2336 return ret; 2337 } 2338 2339 static const struct of_device_id cpsw_of_mtable[] = { 2340 { .compatible = "ti,cpsw"}, 2341 { .compatible = "ti,am335x-cpsw"}, 2342 { .compatible = "ti,am4372-cpsw"}, 2343 { .compatible = "ti,dra7-cpsw"}, 2344 { /* sentinel */ }, 2345 }; 2346 MODULE_DEVICE_TABLE(of, cpsw_of_mtable); 2347 2348 static const struct soc_device_attribute cpsw_soc_devices[] = { 2349 { .family = "AM33xx", .revision = "ES1.0"}, 2350 { /* sentinel */ } 2351 }; 2352 2353 static int cpsw_probe(struct platform_device *pdev) 2354 { 2355 struct device *dev = &pdev->dev; 2356 struct clk *clk; 2357 struct cpsw_platform_data *data; 2358 struct net_device *ndev; 2359 struct cpsw_priv *priv; 2360 void __iomem *ss_regs; 2361 struct resource *res, *ss_res; 2362 struct gpio_descs *mode; 2363 const struct soc_device_attribute *soc; 2364 struct cpsw_common *cpsw; 2365 int ret = 0, ch; 2366 int irq; 2367 2368 cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL); 2369 if (!cpsw) 2370 return -ENOMEM; 2371 2372 cpsw->dev = dev; 2373 2374 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 2375 if (IS_ERR(mode)) { 2376 ret = PTR_ERR(mode); 2377 dev_err(dev, "gpio request failed, ret %d\n", ret); 2378 return ret; 2379 } 2380 2381 clk = devm_clk_get(dev, "fck"); 2382 if (IS_ERR(clk)) { 2383 ret = PTR_ERR(clk); 2384 dev_err(dev, "fck is not found %d\n", ret); 2385 return ret; 2386 } 2387 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 2388 2389 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2390 ss_regs = devm_ioremap_resource(dev, ss_res); 2391 if (IS_ERR(ss_regs)) 2392 return PTR_ERR(ss_regs); 2393 cpsw->regs = ss_regs; 2394 2395 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2396 cpsw->wr_regs = devm_ioremap_resource(dev, res); 2397 if (IS_ERR(cpsw->wr_regs)) 2398 return PTR_ERR(cpsw->wr_regs); 2399 2400 /* RX IRQ */ 2401 irq = platform_get_irq(pdev, 1); 2402 if (irq < 0) 2403 return irq; 2404 cpsw->irqs_table[0] = irq; 2405 2406 /* TX IRQ */ 2407 irq = platform_get_irq(pdev, 2); 2408 if (irq < 0) 2409 return irq; 2410 cpsw->irqs_table[1] = irq; 2411 2412 /* 2413 * This may be required here for child devices. 2414 */ 2415 pm_runtime_enable(dev); 2416 2417 /* Need to enable clocks with runtime PM api to access module 2418 * registers 2419 */ 2420 ret = pm_runtime_get_sync(dev); 2421 if (ret < 0) { 2422 pm_runtime_put_noidle(dev); 2423 goto clean_runtime_disable_ret; 2424 } 2425 2426 ret = cpsw_probe_dt(&cpsw->data, pdev); 2427 if (ret) 2428 goto clean_dt_ret; 2429 2430 soc = soc_device_match(cpsw_soc_devices); 2431 if (soc) 2432 cpsw->quirk_irq = 1; 2433 2434 data = &cpsw->data; 2435 cpsw->slaves = devm_kcalloc(dev, 2436 data->slaves, sizeof(struct cpsw_slave), 2437 GFP_KERNEL); 2438 if (!cpsw->slaves) { 2439 ret = -ENOMEM; 2440 goto clean_dt_ret; 2441 } 2442 2443 cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE); 2444 cpsw->descs_pool_size = descs_pool_size; 2445 2446 ret = cpsw_init_common(cpsw, ss_regs, ale_ageout, 2447 ss_res->start + CPSW2_BD_OFFSET, 2448 descs_pool_size); 2449 if (ret) 2450 goto clean_dt_ret; 2451 2452 ch = cpsw->quirk_irq ? 0 : 7; 2453 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0); 2454 if (IS_ERR(cpsw->txv[0].ch)) { 2455 dev_err(dev, "error initializing tx dma channel\n"); 2456 ret = PTR_ERR(cpsw->txv[0].ch); 2457 goto clean_cpts; 2458 } 2459 2460 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1); 2461 if (IS_ERR(cpsw->rxv[0].ch)) { 2462 dev_err(dev, "error initializing rx dma channel\n"); 2463 ret = PTR_ERR(cpsw->rxv[0].ch); 2464 goto clean_cpts; 2465 } 2466 cpsw_split_res(cpsw); 2467 2468 /* setup netdev */ 2469 ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv), 2470 CPSW_MAX_QUEUES, CPSW_MAX_QUEUES); 2471 if (!ndev) { 2472 dev_err(dev, "error allocating net_device\n"); 2473 goto clean_cpts; 2474 } 2475 2476 platform_set_drvdata(pdev, cpsw); 2477 priv = netdev_priv(ndev); 2478 priv->cpsw = cpsw; 2479 priv->ndev = ndev; 2480 priv->dev = dev; 2481 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 2482 priv->emac_port = 0; 2483 2484 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 2485 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); 2486 dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr); 2487 } else { 2488 eth_random_addr(priv->mac_addr); 2489 dev_info(dev, "Random MACID = %pM\n", priv->mac_addr); 2490 } 2491 2492 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); 2493 2494 cpsw->slaves[0].ndev = ndev; 2495 2496 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; 2497 2498 ndev->netdev_ops = &cpsw_netdev_ops; 2499 ndev->ethtool_ops = &cpsw_ethtool_ops; 2500 netif_napi_add(ndev, &cpsw->napi_rx, 2501 cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, 2502 CPSW_POLL_WEIGHT); 2503 netif_tx_napi_add(ndev, &cpsw->napi_tx, 2504 cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, 2505 CPSW_POLL_WEIGHT); 2506 2507 /* register the network device */ 2508 SET_NETDEV_DEV(ndev, dev); 2509 ret = register_netdev(ndev); 2510 if (ret) { 2511 dev_err(dev, "error registering net device\n"); 2512 ret = -ENODEV; 2513 goto clean_cpts; 2514 } 2515 2516 if (cpsw->data.dual_emac) { 2517 ret = cpsw_probe_dual_emac(priv); 2518 if (ret) { 2519 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2520 goto clean_unregister_netdev_ret; 2521 } 2522 } 2523 2524 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and 2525 * MISC IRQs which are always kept disabled with this driver so 2526 * we will not request them. 2527 * 2528 * If anyone wants to implement support for those, make sure to 2529 * first request and append them to irqs_table array. 2530 */ 2531 ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt, 2532 0, dev_name(dev), cpsw); 2533 if (ret < 0) { 2534 dev_err(dev, "error attaching irq (%d)\n", ret); 2535 goto clean_unregister_netdev_ret; 2536 } 2537 2538 2539 ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt, 2540 0, dev_name(&pdev->dev), cpsw); 2541 if (ret < 0) { 2542 dev_err(dev, "error attaching irq (%d)\n", ret); 2543 goto clean_unregister_netdev_ret; 2544 } 2545 2546 cpsw_notice(priv, probe, 2547 "initialized device (regs %pa, irq %d, pool size %d)\n", 2548 &ss_res->start, cpsw->irqs_table[0], descs_pool_size); 2549 2550 pm_runtime_put(&pdev->dev); 2551 2552 return 0; 2553 2554 clean_unregister_netdev_ret: 2555 unregister_netdev(ndev); 2556 clean_cpts: 2557 cpts_release(cpsw->cpts); 2558 cpdma_ctlr_destroy(cpsw->dma); 2559 clean_dt_ret: 2560 cpsw_remove_dt(pdev); 2561 pm_runtime_put_sync(&pdev->dev); 2562 clean_runtime_disable_ret: 2563 pm_runtime_disable(&pdev->dev); 2564 return ret; 2565 } 2566 2567 static int cpsw_remove(struct platform_device *pdev) 2568 { 2569 struct cpsw_common *cpsw = platform_get_drvdata(pdev); 2570 int i, ret; 2571 2572 ret = pm_runtime_get_sync(&pdev->dev); 2573 if (ret < 0) { 2574 pm_runtime_put_noidle(&pdev->dev); 2575 return ret; 2576 } 2577 2578 for (i = 0; i < cpsw->data.slaves; i++) 2579 if (cpsw->slaves[i].ndev) 2580 unregister_netdev(cpsw->slaves[i].ndev); 2581 2582 cpts_release(cpsw->cpts); 2583 cpdma_ctlr_destroy(cpsw->dma); 2584 cpsw_remove_dt(pdev); 2585 pm_runtime_put_sync(&pdev->dev); 2586 pm_runtime_disable(&pdev->dev); 2587 return 0; 2588 } 2589 2590 #ifdef CONFIG_PM_SLEEP 2591 static int cpsw_suspend(struct device *dev) 2592 { 2593 struct net_device *ndev = dev_get_drvdata(dev); 2594 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2595 2596 if (cpsw->data.dual_emac) { 2597 int i; 2598 2599 for (i = 0; i < cpsw->data.slaves; i++) { 2600 if (netif_running(cpsw->slaves[i].ndev)) 2601 cpsw_ndo_stop(cpsw->slaves[i].ndev); 2602 } 2603 } else { 2604 if (netif_running(ndev)) 2605 cpsw_ndo_stop(ndev); 2606 } 2607 2608 /* Select sleep pin state */ 2609 pinctrl_pm_select_sleep_state(dev); 2610 2611 return 0; 2612 } 2613 2614 static int cpsw_resume(struct device *dev) 2615 { 2616 struct net_device *ndev = dev_get_drvdata(dev); 2617 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 2618 2619 /* Select default pin state */ 2620 pinctrl_pm_select_default_state(dev); 2621 2622 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ 2623 rtnl_lock(); 2624 if (cpsw->data.dual_emac) { 2625 int i; 2626 2627 for (i = 0; i < cpsw->data.slaves; i++) { 2628 if (netif_running(cpsw->slaves[i].ndev)) 2629 cpsw_ndo_open(cpsw->slaves[i].ndev); 2630 } 2631 } else { 2632 if (netif_running(ndev)) 2633 cpsw_ndo_open(ndev); 2634 } 2635 rtnl_unlock(); 2636 2637 return 0; 2638 } 2639 #endif 2640 2641 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); 2642 2643 static struct platform_driver cpsw_driver = { 2644 .driver = { 2645 .name = "cpsw", 2646 .pm = &cpsw_pm_ops, 2647 .of_match_table = cpsw_of_mtable, 2648 }, 2649 .probe = cpsw_probe, 2650 .remove = cpsw_remove, 2651 }; 2652 2653 module_platform_driver(cpsw_driver); 2654 2655 MODULE_LICENSE("GPL"); 2656 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>"); 2657 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>"); 2658 MODULE_DESCRIPTION("TI CPSW Ethernet driver"); 2659