1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "bcmasp_intf: " fmt 3 4 #include <asm/byteorder.h> 5 #include <linux/brcmphy.h> 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/etherdevice.h> 9 #include <linux/netdevice.h> 10 #include <linux/of_net.h> 11 #include <linux/of_mdio.h> 12 #include <linux/phy.h> 13 #include <linux/phy_fixed.h> 14 #include <linux/ptp_classify.h> 15 #include <linux/platform_device.h> 16 #include <net/ip.h> 17 #include <net/ipv6.h> 18 19 #include "bcmasp.h" 20 #include "bcmasp_intf_defs.h" 21 22 static int incr_ring(int index, int ring_count) 23 { 24 index++; 25 if (index == ring_count) 26 return 0; 27 28 return index; 29 } 30 31 /* Points to last byte of descriptor */ 32 static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg, 33 int ring_count) 34 { 35 dma_addr_t end = beg + (ring_count * DESC_SIZE); 36 37 addr += DESC_SIZE; 38 if (addr > end) 39 return beg + DESC_SIZE - 1; 40 41 return addr; 42 } 43 44 /* Points to first byte of descriptor */ 45 static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg, 46 int ring_count) 47 { 48 dma_addr_t end = beg + (ring_count * DESC_SIZE); 49 50 addr += DESC_SIZE; 51 if (addr >= end) 52 return beg; 53 54 return addr; 55 } 56 57 static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en) 58 { 59 if (en) { 60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE); 61 tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN | 62 TX_EPKT_C_CFG_MISC_PT | 63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)), 64 TX_EPKT_C_CFG_MISC); 65 } else { 66 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); 67 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); 68 } 69 } 70 71 static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en) 72 { 73 if (en) 74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN, 75 RX_EDPKT_CFG_ENABLE); 76 else 77 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); 78 } 79 80 static void bcmasp_set_rx_mode(struct net_device *dev) 81 { 82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 83 struct bcmasp_intf *intf = netdev_priv(dev); 84 struct netdev_hw_addr *ha; 85 int ret; 86 87 spin_lock_bh(&intf->parent->mda_lock); 88 89 bcmasp_disable_all_filters(intf); 90 91 if (dev->flags & IFF_PROMISC) 92 goto set_promisc; 93 94 bcmasp_set_promisc(intf, 0); 95 96 bcmasp_set_broad(intf, 1); 97 98 bcmasp_set_oaddr(intf, dev->dev_addr, 1); 99 100 if (dev->flags & IFF_ALLMULTI) { 101 bcmasp_set_allmulti(intf, 1); 102 } else { 103 bcmasp_set_allmulti(intf, 0); 104 105 netdev_for_each_mc_addr(ha, dev) { 106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); 107 if (ret) { 108 intf->mib.mc_filters_full_cnt++; 109 goto set_promisc; 110 } 111 } 112 } 113 114 netdev_for_each_uc_addr(ha, dev) { 115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); 116 if (ret) { 117 intf->mib.uc_filters_full_cnt++; 118 goto set_promisc; 119 } 120 } 121 122 spin_unlock_bh(&intf->parent->mda_lock); 123 return; 124 125 set_promisc: 126 bcmasp_set_promisc(intf, 1); 127 intf->mib.promisc_filters_cnt++; 128 129 /* disable all filters used by this port */ 130 bcmasp_disable_all_filters(intf); 131 132 spin_unlock_bh(&intf->parent->mda_lock); 133 } 134 135 static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index) 136 { 137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index]; 138 139 txcb->skb = NULL; 140 dma_unmap_addr_set(txcb, dma_addr, 0); 141 dma_unmap_len_set(txcb, dma_len, 0); 142 txcb->last = false; 143 } 144 145 static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt) 146 { 147 int next_index, i; 148 149 /* Check if we have enough room for cnt descriptors */ 150 for (i = 0; i < cnt; i++) { 151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT); 152 if (next_index == intf->tx_spb_clean_index) 153 return 1; 154 } 155 156 return 0; 157 } 158 159 static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, 160 struct sk_buff *skb, 161 bool *csum_hw) 162 { 163 struct bcmasp_intf *intf = netdev_priv(dev); 164 u32 header = 0, header2 = 0, epkt = 0; 165 struct bcmasp_pkt_offload *offload; 166 unsigned int header_cnt = 0; 167 u8 ip_proto; 168 int ret; 169 170 if (skb->ip_summed != CHECKSUM_PARTIAL) 171 return skb; 172 173 ret = skb_cow_head(skb, sizeof(*offload)); 174 if (ret < 0) { 175 intf->mib.tx_realloc_offload_failed++; 176 goto help; 177 } 178 179 switch (skb->protocol) { 180 case htons(ETH_P_IP): 181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf); 182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff); 183 epkt |= PKT_OFFLOAD_EPKT_IP(0); 184 ip_proto = ip_hdr(skb)->protocol; 185 header_cnt += 2; 186 break; 187 case htons(ETH_P_IPV6): 188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf); 189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff); 190 epkt |= PKT_OFFLOAD_EPKT_IP(1); 191 ip_proto = ipv6_hdr(skb)->nexthdr; 192 header_cnt += 2; 193 break; 194 default: 195 goto help; 196 } 197 198 switch (ip_proto) { 199 case IPPROTO_TCP: 200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb)); 201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4; 202 header_cnt++; 203 break; 204 case IPPROTO_UDP: 205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN); 206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4; 207 header_cnt++; 208 break; 209 default: 210 goto help; 211 } 212 213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload)); 214 215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) | 216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN); 217 epkt |= PKT_OFFLOAD_EPKT_OP; 218 219 offload->nop = htonl(PKT_OFFLOAD_NOP); 220 offload->header = htonl(header); 221 offload->header2 = htonl(header2); 222 offload->epkt = htonl(epkt); 223 offload->end = htonl(PKT_OFFLOAD_END_OP); 224 *csum_hw = true; 225 226 return skb; 227 228 help: 229 skb_checksum_help(skb); 230 231 return skb; 232 } 233 234 static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) 235 { 236 struct bcmasp_intf *intf = netdev_priv(dev); 237 unsigned int total_bytes, size; 238 int spb_index, nr_frags, i, j; 239 struct bcmasp_tx_cb *txcb; 240 dma_addr_t mapping, valid; 241 struct bcmasp_desc *desc; 242 bool csum_hw = false; 243 struct device *kdev; 244 skb_frag_t *frag; 245 246 kdev = &intf->parent->pdev->dev; 247 248 nr_frags = skb_shinfo(skb)->nr_frags; 249 250 if (tx_spb_ring_full(intf, nr_frags + 1)) { 251 netif_stop_queue(dev); 252 if (net_ratelimit()) 253 netdev_err(dev, "Tx Ring Full!\n"); 254 return NETDEV_TX_BUSY; 255 } 256 257 /* Save skb len before adding csum offload header */ 258 total_bytes = skb->len; 259 skb = bcmasp_csum_offload(dev, skb, &csum_hw); 260 if (!skb) 261 return NETDEV_TX_OK; 262 263 spb_index = intf->tx_spb_index; 264 valid = intf->tx_spb_dma_valid; 265 for (i = 0; i <= nr_frags; i++) { 266 if (!i) { 267 size = skb_headlen(skb); 268 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) { 269 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN)) 270 return NETDEV_TX_OK; 271 size = skb->len; 272 } 273 mapping = dma_map_single(kdev, skb->data, size, 274 DMA_TO_DEVICE); 275 } else { 276 frag = &skb_shinfo(skb)->frags[i - 1]; 277 size = skb_frag_size(frag); 278 mapping = skb_frag_dma_map(kdev, frag, 0, size, 279 DMA_TO_DEVICE); 280 } 281 282 if (dma_mapping_error(kdev, mapping)) { 283 intf->mib.tx_dma_failed++; 284 spb_index = intf->tx_spb_index; 285 for (j = 0; j < i; j++) { 286 bcmasp_clean_txcb(intf, spb_index); 287 spb_index = incr_ring(spb_index, 288 DESC_RING_COUNT); 289 } 290 /* Rewind so we do not have a hole */ 291 spb_index = intf->tx_spb_index; 292 dev_kfree_skb(skb); 293 return NETDEV_TX_OK; 294 } 295 296 txcb = &intf->tx_cbs[spb_index]; 297 desc = &intf->tx_spb_cpu[spb_index]; 298 memset(desc, 0, sizeof(*desc)); 299 txcb->skb = skb; 300 txcb->bytes_sent = total_bytes; 301 dma_unmap_addr_set(txcb, dma_addr, mapping); 302 dma_unmap_len_set(txcb, dma_len, size); 303 if (!i) { 304 desc->flags |= DESC_SOF; 305 if (csum_hw) 306 desc->flags |= DESC_EPKT_CMD; 307 } 308 309 if (i == nr_frags) { 310 desc->flags |= DESC_EOF; 311 txcb->last = true; 312 } 313 314 desc->buf = mapping; 315 desc->size = size; 316 desc->flags |= DESC_INT_EN; 317 318 netif_dbg(intf, tx_queued, dev, 319 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n", 320 __func__, &mapping, desc->size, desc->flags, 321 spb_index); 322 323 spb_index = incr_ring(spb_index, DESC_RING_COUNT); 324 valid = incr_last_byte(valid, intf->tx_spb_dma_addr, 325 DESC_RING_COUNT); 326 } 327 328 /* Ensure all descriptors have been written to DRAM for the 329 * hardware to see up-to-date contents. 330 */ 331 wmb(); 332 333 intf->tx_spb_index = spb_index; 334 intf->tx_spb_dma_valid = valid; 335 336 skb_tx_timestamp(skb); 337 338 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); 339 340 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1)) 341 netif_stop_queue(dev); 342 343 return NETDEV_TX_OK; 344 } 345 346 static void bcmasp_netif_start(struct net_device *dev) 347 { 348 struct bcmasp_intf *intf = netdev_priv(dev); 349 350 bcmasp_set_rx_mode(dev); 351 napi_enable(&intf->tx_napi); 352 napi_enable(&intf->rx_napi); 353 354 bcmasp_enable_rx_irq(intf, 1); 355 bcmasp_enable_tx_irq(intf, 1); 356 bcmasp_enable_phy_irq(intf, 1); 357 358 phy_start(dev->phydev); 359 } 360 361 static void umac_reset(struct bcmasp_intf *intf) 362 { 363 umac_wl(intf, 0x0, UMC_CMD); 364 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD); 365 usleep_range(10, 100); 366 /* We hold the umac in reset and bring it out of 367 * reset when phy link is up. 368 */ 369 } 370 371 static void umac_set_hw_addr(struct bcmasp_intf *intf, 372 const unsigned char *addr) 373 { 374 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 375 addr[3]; 376 u32 mac1 = (addr[4] << 8) | addr[5]; 377 378 umac_wl(intf, mac0, UMC_MAC0); 379 umac_wl(intf, mac1, UMC_MAC1); 380 } 381 382 static void umac_enable_set(struct bcmasp_intf *intf, u32 mask, 383 unsigned int enable) 384 { 385 u32 reg; 386 387 reg = umac_rl(intf, UMC_CMD); 388 if (reg & UMC_CMD_SW_RESET) 389 return; 390 if (enable) 391 reg |= mask; 392 else 393 reg &= ~mask; 394 umac_wl(intf, reg, UMC_CMD); 395 396 /* UniMAC stops on a packet boundary, wait for a full-sized packet 397 * to be processed (1 msec). 398 */ 399 if (enable == 0) 400 usleep_range(1000, 2000); 401 } 402 403 static void umac_init(struct bcmasp_intf *intf) 404 { 405 umac_wl(intf, 0x800, UMC_FRM_LEN); 406 umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL); 407 umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ); 408 } 409 410 static int bcmasp_tx_reclaim(struct bcmasp_intf *intf) 411 { 412 struct bcmasp_intf_stats64 *stats = &intf->stats64; 413 struct device *kdev = &intf->parent->pdev->dev; 414 unsigned long read, released = 0; 415 struct bcmasp_tx_cb *txcb; 416 struct bcmasp_desc *desc; 417 dma_addr_t mapping; 418 419 read = tx_spb_dma_rq(intf, TX_SPB_DMA_READ); 420 while (intf->tx_spb_dma_read != read) { 421 txcb = &intf->tx_cbs[intf->tx_spb_clean_index]; 422 mapping = dma_unmap_addr(txcb, dma_addr); 423 424 dma_unmap_single(kdev, mapping, 425 dma_unmap_len(txcb, dma_len), 426 DMA_TO_DEVICE); 427 428 if (txcb->last) { 429 dev_consume_skb_any(txcb->skb); 430 431 u64_stats_update_begin(&stats->syncp); 432 u64_stats_inc(&stats->tx_packets); 433 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent); 434 u64_stats_update_end(&stats->syncp); 435 } 436 437 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index]; 438 439 netif_dbg(intf, tx_done, intf->ndev, 440 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n", 441 __func__, &mapping, desc->size, desc->flags, 442 intf->tx_spb_clean_index); 443 444 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index); 445 released++; 446 447 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index, 448 DESC_RING_COUNT); 449 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read, 450 intf->tx_spb_dma_addr, 451 DESC_RING_COUNT); 452 } 453 454 return released; 455 } 456 457 static int bcmasp_tx_poll(struct napi_struct *napi, int budget) 458 { 459 struct bcmasp_intf *intf = 460 container_of(napi, struct bcmasp_intf, tx_napi); 461 int released = 0; 462 463 released = bcmasp_tx_reclaim(intf); 464 465 napi_complete(&intf->tx_napi); 466 467 bcmasp_enable_tx_irq(intf, 1); 468 469 if (released) 470 netif_wake_queue(intf->ndev); 471 472 return 0; 473 } 474 475 static int bcmasp_rx_poll(struct napi_struct *napi, int budget) 476 { 477 struct bcmasp_intf *intf = 478 container_of(napi, struct bcmasp_intf, rx_napi); 479 struct bcmasp_intf_stats64 *stats = &intf->stats64; 480 struct device *kdev = &intf->parent->pdev->dev; 481 unsigned long processed = 0; 482 struct bcmasp_desc *desc; 483 struct sk_buff *skb; 484 dma_addr_t valid; 485 void *data; 486 u64 flags; 487 u32 len; 488 489 valid = rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID) + 1; 490 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE) 491 valid = intf->rx_edpkt_dma_addr; 492 493 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) { 494 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index]; 495 496 /* Ensure that descriptor has been fully written to DRAM by 497 * hardware before reading by the CPU 498 */ 499 rmb(); 500 501 /* Calculate virt addr by offsetting from physical addr */ 502 data = intf->rx_ring_cpu + 503 (DESC_ADDR(desc->buf) - intf->rx_ring_dma); 504 505 flags = DESC_FLAGS(desc->buf); 506 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) { 507 if (net_ratelimit()) { 508 netif_err(intf, rx_status, intf->ndev, 509 "flags=0x%llx\n", flags); 510 } 511 512 u64_stats_update_begin(&stats->syncp); 513 if (flags & DESC_CRC_ERR) 514 u64_stats_inc(&stats->rx_crc_errs); 515 if (flags & DESC_RX_SYM_ERR) 516 u64_stats_inc(&stats->rx_sym_errs); 517 u64_stats_update_end(&stats->syncp); 518 519 goto next; 520 } 521 522 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size, 523 DMA_FROM_DEVICE); 524 525 len = desc->size; 526 527 skb = napi_alloc_skb(napi, len); 528 if (!skb) { 529 u64_stats_update_begin(&stats->syncp); 530 u64_stats_inc(&stats->rx_dropped); 531 u64_stats_update_end(&stats->syncp); 532 intf->mib.alloc_rx_skb_failed++; 533 534 goto next; 535 } 536 537 skb_put(skb, len); 538 memcpy(skb->data, data, len); 539 540 skb_pull(skb, 2); 541 len -= 2; 542 if (likely(intf->crc_fwd)) { 543 skb_trim(skb, len - ETH_FCS_LEN); 544 len -= ETH_FCS_LEN; 545 } 546 547 if ((intf->ndev->features & NETIF_F_RXCSUM) && 548 (desc->buf & DESC_CHKSUM)) 549 skb->ip_summed = CHECKSUM_UNNECESSARY; 550 551 skb->protocol = eth_type_trans(skb, intf->ndev); 552 553 napi_gro_receive(napi, skb); 554 555 u64_stats_update_begin(&stats->syncp); 556 u64_stats_inc(&stats->rx_packets); 557 u64_stats_add(&stats->rx_bytes, len); 558 u64_stats_update_end(&stats->syncp); 559 560 next: 561 rx_edpkt_cfg_wq(intf, (DESC_ADDR(desc->buf) + desc->size), 562 RX_EDPKT_RING_BUFFER_READ); 563 564 processed++; 565 intf->rx_edpkt_dma_read = 566 incr_first_byte(intf->rx_edpkt_dma_read, 567 intf->rx_edpkt_dma_addr, 568 DESC_RING_COUNT); 569 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index, 570 DESC_RING_COUNT); 571 } 572 573 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_read, RX_EDPKT_DMA_READ); 574 575 if (processed < budget && napi_complete_done(&intf->rx_napi, processed)) 576 bcmasp_enable_rx_irq(intf, 1); 577 578 return processed; 579 } 580 581 static void bcmasp_adj_link(struct net_device *dev) 582 { 583 struct bcmasp_intf *intf = netdev_priv(dev); 584 struct phy_device *phydev = dev->phydev; 585 u32 cmd_bits = 0, reg; 586 int changed = 0; 587 588 if (intf->old_link != phydev->link) { 589 changed = 1; 590 intf->old_link = phydev->link; 591 } 592 593 if (intf->old_duplex != phydev->duplex) { 594 changed = 1; 595 intf->old_duplex = phydev->duplex; 596 } 597 598 switch (phydev->speed) { 599 case SPEED_2500: 600 cmd_bits = UMC_CMD_SPEED_2500; 601 break; 602 case SPEED_1000: 603 cmd_bits = UMC_CMD_SPEED_1000; 604 break; 605 case SPEED_100: 606 cmd_bits = UMC_CMD_SPEED_100; 607 break; 608 case SPEED_10: 609 cmd_bits = UMC_CMD_SPEED_10; 610 break; 611 default: 612 break; 613 } 614 cmd_bits <<= UMC_CMD_SPEED_SHIFT; 615 616 if (phydev->duplex == DUPLEX_HALF) 617 cmd_bits |= UMC_CMD_HD_EN; 618 619 if (intf->old_pause != phydev->pause) { 620 changed = 1; 621 intf->old_pause = phydev->pause; 622 } 623 624 if (!phydev->pause) 625 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE; 626 627 if (!changed) 628 return; 629 630 if (phydev->link) { 631 reg = umac_rl(intf, UMC_CMD); 632 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) | 633 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE | 634 UMC_CMD_TX_PAUSE_IGNORE); 635 reg |= cmd_bits; 636 if (reg & UMC_CMD_SW_RESET) { 637 reg &= ~UMC_CMD_SW_RESET; 638 umac_wl(intf, reg, UMC_CMD); 639 udelay(2); 640 reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC; 641 } 642 umac_wl(intf, reg, UMC_CMD); 643 644 umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER); 645 reg = umac_rl(intf, UMC_EEE_CTRL); 646 if (phydev->enable_tx_lpi) 647 reg |= EEE_EN; 648 else 649 reg &= ~EEE_EN; 650 umac_wl(intf, reg, UMC_EEE_CTRL); 651 } 652 653 reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 654 if (phydev->link) 655 reg |= RGMII_LINK; 656 else 657 reg &= ~RGMII_LINK; 658 rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 659 660 if (changed) 661 phy_print_status(phydev); 662 } 663 664 static int bcmasp_alloc_buffers(struct bcmasp_intf *intf) 665 { 666 struct device *kdev = &intf->parent->pdev->dev; 667 struct page *buffer_pg; 668 669 /* Alloc RX */ 670 intf->rx_buf_order = get_order(RING_BUFFER_SIZE); 671 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); 672 if (!buffer_pg) 673 return -ENOMEM; 674 675 intf->rx_ring_cpu = page_to_virt(buffer_pg); 676 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, 677 DMA_FROM_DEVICE); 678 if (dma_mapping_error(kdev, intf->rx_ring_dma)) 679 goto free_rx_buffer; 680 681 intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, 682 &intf->rx_edpkt_dma_addr, GFP_KERNEL); 683 if (!intf->rx_edpkt_cpu) 684 goto free_rx_buffer_dma; 685 686 /* Alloc TX */ 687 intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, 688 &intf->tx_spb_dma_addr, GFP_KERNEL); 689 if (!intf->tx_spb_cpu) 690 goto free_rx_edpkt_dma; 691 692 intf->tx_cbs = kzalloc_objs(struct bcmasp_tx_cb, DESC_RING_COUNT); 693 if (!intf->tx_cbs) 694 goto free_tx_spb_dma; 695 696 return 0; 697 698 free_tx_spb_dma: 699 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, 700 intf->tx_spb_dma_addr); 701 free_rx_edpkt_dma: 702 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, 703 intf->rx_edpkt_dma_addr); 704 free_rx_buffer_dma: 705 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, 706 DMA_FROM_DEVICE); 707 free_rx_buffer: 708 __free_pages(buffer_pg, intf->rx_buf_order); 709 710 return -ENOMEM; 711 } 712 713 static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf) 714 { 715 struct device *kdev = &intf->parent->pdev->dev; 716 717 /* RX buffers */ 718 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, 719 intf->rx_edpkt_dma_addr); 720 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, 721 DMA_FROM_DEVICE); 722 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); 723 724 /* TX buffers */ 725 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, 726 intf->tx_spb_dma_addr); 727 kfree(intf->tx_cbs); 728 } 729 730 static void bcmasp_init_rx(struct bcmasp_intf *intf) 731 { 732 /* Restart from index 0 */ 733 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; 734 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1); 735 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; 736 intf->rx_edpkt_index = 0; 737 738 /* Make sure channels are disabled */ 739 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); 740 741 /* Rx SPB */ 742 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ); 743 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE); 744 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE); 745 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, 746 RX_EDPKT_RING_BUFFER_END); 747 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, 748 RX_EDPKT_RING_BUFFER_VALID); 749 750 /* EDPKT */ 751 rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K << 752 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) | 753 (RX_EDPKT_CFG_CFG0_64_ALN << 754 RX_EDPKT_CFG_CFG0_BALN_SHIFT) | 755 (RX_EDPKT_CFG_CFG0_EFRM_STUF), 756 RX_EDPKT_CFG_CFG0); 757 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); 758 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); 759 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); 760 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END); 761 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID); 762 763 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) << 764 UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT), 765 UMAC2FB_CFG); 766 } 767 768 769 static void bcmasp_init_tx(struct bcmasp_intf *intf) 770 { 771 /* Restart from index 0 */ 772 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; 773 intf->tx_spb_dma_read = intf->tx_spb_dma_addr; 774 intf->tx_spb_index = 0; 775 intf->tx_spb_clean_index = 0; 776 memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT); 777 778 /* Make sure channels are disabled */ 779 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); 780 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); 781 782 /* Tx SPB */ 783 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), 784 TX_SPB_CTRL_XF_CTRL2); 785 786 if (intf->parent->tx_chan_offset) 787 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); 788 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); 789 790 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); 791 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); 792 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); 793 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); 794 } 795 796 static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) 797 { 798 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN | 799 RGMII_EPHY_CFG_IDDQ_GLOBAL; 800 u32 reg; 801 802 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL); 803 if (enable) { 804 reg &= ~RGMII_EPHY_CK25_DIS; 805 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 806 mdelay(1); 807 808 reg &= ~mask; 809 reg |= RGMII_EPHY_RESET; 810 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 811 mdelay(1); 812 813 reg &= ~RGMII_EPHY_RESET; 814 } else { 815 reg |= mask | RGMII_EPHY_RESET; 816 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 817 mdelay(1); 818 reg |= RGMII_EPHY_CK25_DIS; 819 } 820 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 821 mdelay(1); 822 823 /* Set or clear the LED control override to avoid lighting up LEDs 824 * while the EPHY is powered off and drawing unnecessary current. 825 */ 826 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL); 827 if (enable) 828 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD; 829 else 830 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD; 831 rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL); 832 } 833 834 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable) 835 { 836 u32 reg; 837 838 reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 839 reg &= ~RGMII_OOB_DIS; 840 if (enable) 841 reg |= RGMII_MODE_EN; 842 else 843 reg &= ~RGMII_MODE_EN; 844 rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 845 } 846 847 static void bcmasp_netif_deinit(struct net_device *dev) 848 { 849 struct bcmasp_intf *intf = netdev_priv(dev); 850 u32 reg, timeout = 1000; 851 852 napi_disable(&intf->tx_napi); 853 854 bcmasp_enable_tx(intf, 0); 855 856 /* Flush any TX packets in the pipe */ 857 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL); 858 do { 859 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS); 860 if (!(reg & TX_SPB_DMA_FIFO_FLUSH)) 861 break; 862 usleep_range(1000, 2000); 863 } while (timeout-- > 0); 864 tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL); 865 866 bcmasp_tx_reclaim(intf); 867 868 umac_enable_set(intf, UMC_CMD_TX_EN, 0); 869 870 phy_stop(dev->phydev); 871 872 umac_enable_set(intf, UMC_CMD_RX_EN, 0); 873 874 bcmasp_flush_rx_port(intf); 875 usleep_range(1000, 2000); 876 bcmasp_enable_rx(intf, 0); 877 878 napi_disable(&intf->rx_napi); 879 880 /* Disable interrupts */ 881 bcmasp_enable_tx_irq(intf, 0); 882 bcmasp_enable_rx_irq(intf, 0); 883 bcmasp_enable_phy_irq(intf, 0); 884 885 netif_napi_del(&intf->tx_napi); 886 netif_napi_del(&intf->rx_napi); 887 } 888 889 static int bcmasp_stop(struct net_device *dev) 890 { 891 struct bcmasp_intf *intf = netdev_priv(dev); 892 893 netif_dbg(intf, ifdown, dev, "bcmasp stop\n"); 894 895 /* Stop tx from updating HW */ 896 netif_tx_disable(dev); 897 898 bcmasp_netif_deinit(dev); 899 900 bcmasp_reclaim_free_buffers(intf); 901 902 phy_disconnect(dev->phydev); 903 904 /* Disable internal EPHY or external PHY */ 905 if (intf->internal_phy) 906 bcmasp_ephy_enable_set(intf, false); 907 else 908 bcmasp_rgmii_mode_en_set(intf, false); 909 910 /* Disable the interface clocks */ 911 bcmasp_core_clock_set_intf(intf, false); 912 913 clk_disable_unprepare(intf->parent->clk); 914 915 return 0; 916 } 917 918 static void bcmasp_configure_port(struct bcmasp_intf *intf) 919 { 920 u32 reg, id_mode_dis = 0; 921 922 reg = rgmii_rl(intf, RGMII_PORT_CNTRL); 923 reg &= ~RGMII_PORT_MODE_MASK; 924 925 switch (intf->phy_interface) { 926 case PHY_INTERFACE_MODE_RGMII: 927 /* RGMII_NO_ID: TXC transitions at the same time as TXD 928 * (requires PCB or receiver-side delay) 929 * RGMII: Add 2ns delay on TXC (90 degree shift) 930 * 931 * ID is implicitly disabled for 100Mbps (RG)MII operation. 932 */ 933 id_mode_dis = RGMII_ID_MODE_DIS; 934 fallthrough; 935 case PHY_INTERFACE_MODE_RGMII_TXID: 936 reg |= RGMII_PORT_MODE_EXT_GPHY; 937 break; 938 case PHY_INTERFACE_MODE_MII: 939 reg |= RGMII_PORT_MODE_EXT_EPHY; 940 break; 941 default: 942 break; 943 } 944 945 if (intf->internal_phy) 946 reg |= RGMII_PORT_MODE_EPHY; 947 948 rgmii_wl(intf, reg, RGMII_PORT_CNTRL); 949 950 reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 951 reg &= ~RGMII_ID_MODE_DIS; 952 reg |= id_mode_dis; 953 rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 954 } 955 956 static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) 957 { 958 struct bcmasp_intf *intf = netdev_priv(dev); 959 phy_interface_t phy_iface = intf->phy_interface; 960 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | 961 PHY_BRCM_DIS_TXCRXC_NOENRGY | 962 PHY_BRCM_IDDQ_SUSPEND; 963 struct phy_device *phydev = NULL; 964 int ret; 965 966 /* Always enable interface clocks */ 967 bcmasp_core_clock_set_intf(intf, true); 968 969 /* Enable internal PHY or external PHY before any MAC activity */ 970 if (intf->internal_phy) 971 bcmasp_ephy_enable_set(intf, true); 972 else 973 bcmasp_rgmii_mode_en_set(intf, true); 974 bcmasp_configure_port(intf); 975 976 /* This is an ugly quirk but we have not been correctly 977 * interpreting the phy_interface values and we have done that 978 * across different drivers, so at least we are consistent in 979 * our mistakes. 980 * 981 * When the Generic PHY driver is in use either the PHY has 982 * been strapped or programmed correctly by the boot loader so 983 * we should stick to our incorrect interpretation since we 984 * have validated it. 985 * 986 * Now when a dedicated PHY driver is in use, we need to 987 * reverse the meaning of the phy_interface_mode values to 988 * something that the PHY driver will interpret and act on such 989 * that we have two mistakes canceling themselves so to speak. 990 * We only do this for the two modes that GENET driver 991 * officially supports on Broadcom STB chips: 992 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. 993 * Other modes are not *officially* supported with the boot 994 * loader and the scripted environment generating Device Tree 995 * blobs for those platforms. 996 * 997 * Note that internal PHY and fixed-link configurations are not 998 * affected because they use different phy_interface_t values 999 * or the Generic PHY driver. 1000 */ 1001 switch (phy_iface) { 1002 case PHY_INTERFACE_MODE_RGMII: 1003 phy_iface = PHY_INTERFACE_MODE_RGMII_ID; 1004 break; 1005 case PHY_INTERFACE_MODE_RGMII_TXID: 1006 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; 1007 break; 1008 default: 1009 break; 1010 } 1011 1012 if (phy_connect) { 1013 phydev = of_phy_connect(dev, intf->phy_dn, 1014 bcmasp_adj_link, phy_flags, 1015 phy_iface); 1016 if (!phydev) { 1017 ret = -ENODEV; 1018 netdev_err(dev, "could not attach to PHY\n"); 1019 goto err_phy_disable; 1020 } 1021 1022 if (intf->internal_phy) 1023 dev->phydev->irq = PHY_MAC_INTERRUPT; 1024 1025 /* Indicate that the MAC is responsible for PHY PM */ 1026 phydev->mac_managed_pm = true; 1027 1028 /* Set phylib's copy of the LPI timer */ 1029 phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); 1030 } 1031 1032 umac_reset(intf); 1033 1034 umac_init(intf); 1035 1036 umac_set_hw_addr(intf, dev->dev_addr); 1037 1038 intf->old_duplex = -1; 1039 intf->old_link = -1; 1040 intf->old_pause = -1; 1041 1042 bcmasp_init_tx(intf); 1043 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); 1044 bcmasp_enable_tx(intf, 1); 1045 1046 bcmasp_init_rx(intf); 1047 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); 1048 bcmasp_enable_rx(intf, 1); 1049 1050 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD); 1051 1052 bcmasp_netif_start(dev); 1053 1054 netif_start_queue(dev); 1055 1056 return 0; 1057 1058 err_phy_disable: 1059 if (intf->internal_phy) 1060 bcmasp_ephy_enable_set(intf, false); 1061 else 1062 bcmasp_rgmii_mode_en_set(intf, false); 1063 return ret; 1064 } 1065 1066 static int bcmasp_open(struct net_device *dev) 1067 { 1068 struct bcmasp_intf *intf = netdev_priv(dev); 1069 int ret; 1070 1071 netif_dbg(intf, ifup, dev, "bcmasp open\n"); 1072 1073 ret = bcmasp_alloc_buffers(intf); 1074 if (ret) 1075 return ret; 1076 1077 ret = clk_prepare_enable(intf->parent->clk); 1078 if (ret) 1079 goto err_free_mem; 1080 1081 ret = bcmasp_netif_init(dev, true); 1082 if (ret) { 1083 clk_disable_unprepare(intf->parent->clk); 1084 goto err_free_mem; 1085 } 1086 1087 return ret; 1088 1089 err_free_mem: 1090 bcmasp_reclaim_free_buffers(intf); 1091 1092 return ret; 1093 } 1094 1095 static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue) 1096 { 1097 struct bcmasp_intf *intf = netdev_priv(dev); 1098 1099 netif_dbg(intf, tx_err, dev, "transmit timeout!\n"); 1100 intf->mib.tx_timeout_cnt++; 1101 } 1102 1103 static int bcmasp_get_phys_port_name(struct net_device *dev, 1104 char *name, size_t len) 1105 { 1106 struct bcmasp_intf *intf = netdev_priv(dev); 1107 1108 if (snprintf(name, len, "p%d", intf->port) >= len) 1109 return -EINVAL; 1110 1111 return 0; 1112 } 1113 1114 static void bcmasp_get_stats64(struct net_device *dev, 1115 struct rtnl_link_stats64 *stats) 1116 { 1117 struct bcmasp_intf *intf = netdev_priv(dev); 1118 struct bcmasp_intf_stats64 *lstats; 1119 unsigned int start; 1120 1121 lstats = &intf->stats64; 1122 1123 do { 1124 start = u64_stats_fetch_begin(&lstats->syncp); 1125 stats->rx_packets = u64_stats_read(&lstats->rx_packets); 1126 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes); 1127 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped); 1128 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs); 1129 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs); 1130 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; 1131 1132 stats->tx_packets = u64_stats_read(&lstats->tx_packets); 1133 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes); 1134 } while (u64_stats_fetch_retry(&lstats->syncp, start)); 1135 } 1136 1137 static const struct net_device_ops bcmasp_netdev_ops = { 1138 .ndo_open = bcmasp_open, 1139 .ndo_stop = bcmasp_stop, 1140 .ndo_start_xmit = bcmasp_xmit, 1141 .ndo_tx_timeout = bcmasp_tx_timeout, 1142 .ndo_set_rx_mode = bcmasp_set_rx_mode, 1143 .ndo_get_phys_port_name = bcmasp_get_phys_port_name, 1144 .ndo_eth_ioctl = phy_do_ioctl_running, 1145 .ndo_set_mac_address = eth_mac_addr, 1146 .ndo_get_stats64 = bcmasp_get_stats64, 1147 }; 1148 1149 static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) 1150 { 1151 /* Per port */ 1152 intf->res.umac = priv->base + UMC_OFFSET(intf); 1153 intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset + 1154 (intf->port * 0x4)); 1155 intf->res.rgmii = priv->base + RGMII_OFFSET(intf); 1156 1157 /* Per ch */ 1158 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf); 1159 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf); 1160 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf); 1161 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf); 1162 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf); 1163 1164 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf); 1165 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); 1166 } 1167 1168 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, 1169 struct device_node *ndev_dn, int i) 1170 { 1171 struct device *dev = &priv->pdev->dev; 1172 struct bcmasp_intf *intf; 1173 struct net_device *ndev; 1174 int ch, port, ret; 1175 1176 if (of_property_read_u32(ndev_dn, "reg", &port)) { 1177 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name); 1178 goto err; 1179 } 1180 1181 if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) { 1182 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name); 1183 goto err; 1184 } 1185 1186 ndev = alloc_etherdev(sizeof(struct bcmasp_intf)); 1187 if (!ndev) { 1188 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name); 1189 goto err; 1190 } 1191 intf = netdev_priv(ndev); 1192 1193 intf->parent = priv; 1194 intf->ndev = ndev; 1195 intf->channel = ch; 1196 intf->port = port; 1197 intf->ndev_dn = ndev_dn; 1198 intf->index = i; 1199 1200 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface); 1201 if (ret < 0) { 1202 dev_err(dev, "invalid PHY mode property\n"); 1203 goto err_free_netdev; 1204 } 1205 1206 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL) 1207 intf->internal_phy = true; 1208 1209 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0); 1210 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) { 1211 ret = of_phy_register_fixed_link(ndev_dn); 1212 if (ret) { 1213 dev_warn(dev, "%s: failed to register fixed PHY\n", 1214 ndev_dn->name); 1215 goto err_free_netdev; 1216 } 1217 intf->phy_dn = ndev_dn; 1218 } 1219 1220 /* Map resource */ 1221 bcmasp_map_res(priv, intf); 1222 1223 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) && 1224 intf->phy_interface != PHY_INTERFACE_MODE_MII && 1225 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) || 1226 (intf->port != 1 && intf->internal_phy)) { 1227 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", 1228 phy_modes(intf->phy_interface), intf->port); 1229 ret = -EINVAL; 1230 goto err_deregister_fixed_link; 1231 } 1232 1233 ret = of_get_ethdev_address(ndev_dn, ndev); 1234 if (ret) { 1235 netdev_warn(ndev, "using random Ethernet MAC\n"); 1236 eth_hw_addr_random(ndev); 1237 } 1238 1239 SET_NETDEV_DEV(ndev, dev); 1240 ndev->netdev_ops = &bcmasp_netdev_ops; 1241 ndev->ethtool_ops = &bcmasp_ethtool_ops; 1242 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | 1243 NETIF_MSG_PROBE | 1244 NETIF_MSG_LINK); 1245 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 1246 NETIF_F_RXCSUM; 1247 ndev->hw_features |= ndev->features; 1248 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); 1249 1250 netdev_sw_irq_coalesce_default_on(ndev); 1251 1252 return intf; 1253 1254 err_deregister_fixed_link: 1255 if (of_phy_is_fixed_link(ndev_dn)) 1256 of_phy_deregister_fixed_link(ndev_dn); 1257 err_free_netdev: 1258 free_netdev(ndev); 1259 err: 1260 return NULL; 1261 } 1262 1263 void bcmasp_interface_destroy(struct bcmasp_intf *intf) 1264 { 1265 if (intf->ndev->reg_state == NETREG_REGISTERED) 1266 unregister_netdev(intf->ndev); 1267 if (of_phy_is_fixed_link(intf->ndev_dn)) 1268 of_phy_deregister_fixed_link(intf->ndev_dn); 1269 free_netdev(intf->ndev); 1270 } 1271 1272 static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) 1273 { 1274 struct net_device *ndev = intf->ndev; 1275 u32 reg; 1276 1277 reg = umac_rl(intf, UMC_MPD_CTRL); 1278 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 1279 reg |= UMC_MPD_CTRL_MPD_EN; 1280 reg &= ~UMC_MPD_CTRL_PSW_EN; 1281 if (intf->wolopts & WAKE_MAGICSECURE) { 1282 /* Program the SecureOn password */ 1283 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]), 1284 UMC_PSW_MS); 1285 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]), 1286 UMC_PSW_LS); 1287 reg |= UMC_MPD_CTRL_PSW_EN; 1288 } 1289 umac_wl(intf, reg, UMC_MPD_CTRL); 1290 1291 if (intf->wolopts & WAKE_FILTER) 1292 bcmasp_netfilt_suspend(intf); 1293 1294 /* Bring UniMAC out of reset if needed and enable RX */ 1295 reg = umac_rl(intf, UMC_CMD); 1296 if (reg & UMC_CMD_SW_RESET) 1297 reg &= ~UMC_CMD_SW_RESET; 1298 1299 reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC; 1300 umac_wl(intf, reg, UMC_CMD); 1301 1302 umac_enable_set(intf, UMC_CMD_RX_EN, 1); 1303 1304 wakeup_intr2_core_wl(intf->parent, 0xffffffff, 1305 ASP_WAKEUP_INTR2_MASK_CLEAR); 1306 1307 if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled && 1308 intf->parent->eee_fixup) 1309 intf->parent->eee_fixup(intf, true); 1310 1311 netif_dbg(intf, wol, ndev, "entered WOL mode\n"); 1312 } 1313 1314 int bcmasp_interface_suspend(struct bcmasp_intf *intf) 1315 { 1316 struct device *kdev = &intf->parent->pdev->dev; 1317 struct net_device *dev = intf->ndev; 1318 1319 if (!netif_running(dev)) 1320 return 0; 1321 1322 netif_device_detach(dev); 1323 1324 bcmasp_netif_deinit(dev); 1325 1326 if (!intf->wolopts) { 1327 if (intf->internal_phy) 1328 bcmasp_ephy_enable_set(intf, false); 1329 else 1330 bcmasp_rgmii_mode_en_set(intf, false); 1331 1332 /* If Wake-on-LAN is disabled, we can safely 1333 * disable the network interface clocks. 1334 */ 1335 bcmasp_core_clock_set_intf(intf, false); 1336 } 1337 1338 if (device_may_wakeup(kdev) && intf->wolopts) 1339 bcmasp_suspend_to_wol(intf); 1340 1341 clk_disable_unprepare(intf->parent->clk); 1342 1343 return 0; 1344 } 1345 1346 static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) 1347 { 1348 u32 reg; 1349 1350 if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled && 1351 intf->parent->eee_fixup) 1352 intf->parent->eee_fixup(intf, false); 1353 1354 reg = umac_rl(intf, UMC_MPD_CTRL); 1355 reg &= ~UMC_MPD_CTRL_MPD_EN; 1356 umac_wl(intf, reg, UMC_MPD_CTRL); 1357 1358 wakeup_intr2_core_wl(intf->parent, 0xffffffff, 1359 ASP_WAKEUP_INTR2_MASK_SET); 1360 } 1361 1362 int bcmasp_interface_resume(struct bcmasp_intf *intf) 1363 { 1364 struct net_device *dev = intf->ndev; 1365 int ret; 1366 1367 if (!netif_running(dev)) 1368 return 0; 1369 1370 ret = clk_prepare_enable(intf->parent->clk); 1371 if (ret) 1372 return ret; 1373 1374 ret = bcmasp_netif_init(dev, false); 1375 if (ret) 1376 goto out; 1377 1378 bcmasp_resume_from_wol(intf); 1379 1380 netif_device_attach(dev); 1381 1382 return 0; 1383 1384 out: 1385 clk_disable_unprepare(intf->parent->clk); 1386 return ret; 1387 } 1388