1 // SPDX-License-Identifier: GPL-2.0 2 #define pr_fmt(fmt) "bcmasp_intf: " fmt 3 4 #include <asm/byteorder.h> 5 #include <linux/brcmphy.h> 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/etherdevice.h> 9 #include <linux/netdevice.h> 10 #include <linux/of_net.h> 11 #include <linux/of_mdio.h> 12 #include <linux/phy.h> 13 #include <linux/phy_fixed.h> 14 #include <linux/ptp_classify.h> 15 #include <linux/platform_device.h> 16 #include <net/ip.h> 17 #include <net/ipv6.h> 18 19 #include "bcmasp.h" 20 #include "bcmasp_intf_defs.h" 21 22 static int incr_ring(int index, int ring_count) 23 { 24 index++; 25 if (index == ring_count) 26 return 0; 27 28 return index; 29 } 30 31 /* Points to last byte of descriptor */ 32 static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg, 33 int ring_count) 34 { 35 dma_addr_t end = beg + (ring_count * DESC_SIZE); 36 37 addr += DESC_SIZE; 38 if (addr > end) 39 return beg + DESC_SIZE - 1; 40 41 return addr; 42 } 43 44 /* Points to first byte of descriptor */ 45 static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg, 46 int ring_count) 47 { 48 dma_addr_t end = beg + (ring_count * DESC_SIZE); 49 50 addr += DESC_SIZE; 51 if (addr >= end) 52 return beg; 53 54 return addr; 55 } 56 57 static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en) 58 { 59 if (en) { 60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE); 61 tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN | 62 TX_EPKT_C_CFG_MISC_PT | 63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)), 64 TX_EPKT_C_CFG_MISC); 65 } else { 66 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); 67 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); 68 } 69 } 70 71 static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en) 72 { 73 if (en) 74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN, 75 RX_EDPKT_CFG_ENABLE); 76 else 77 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); 78 } 79 80 static void bcmasp_set_rx_mode(struct net_device *dev) 81 { 82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 83 struct bcmasp_intf *intf = netdev_priv(dev); 84 struct netdev_hw_addr *ha; 85 int ret; 86 87 spin_lock_bh(&intf->parent->mda_lock); 88 89 bcmasp_disable_all_filters(intf); 90 91 if (dev->flags & IFF_PROMISC) 92 goto set_promisc; 93 94 bcmasp_set_promisc(intf, 0); 95 96 bcmasp_set_broad(intf, 1); 97 98 bcmasp_set_oaddr(intf, dev->dev_addr, 1); 99 100 if (dev->flags & IFF_ALLMULTI) { 101 bcmasp_set_allmulti(intf, 1); 102 } else { 103 bcmasp_set_allmulti(intf, 0); 104 105 netdev_for_each_mc_addr(ha, dev) { 106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); 107 if (ret) { 108 intf->mib.mc_filters_full_cnt++; 109 goto set_promisc; 110 } 111 } 112 } 113 114 netdev_for_each_uc_addr(ha, dev) { 115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); 116 if (ret) { 117 intf->mib.uc_filters_full_cnt++; 118 goto set_promisc; 119 } 120 } 121 122 spin_unlock_bh(&intf->parent->mda_lock); 123 return; 124 125 set_promisc: 126 bcmasp_set_promisc(intf, 1); 127 intf->mib.promisc_filters_cnt++; 128 129 /* disable all filters used by this port */ 130 bcmasp_disable_all_filters(intf); 131 132 spin_unlock_bh(&intf->parent->mda_lock); 133 } 134 135 static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index) 136 { 137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index]; 138 139 txcb->skb = NULL; 140 dma_unmap_addr_set(txcb, dma_addr, 0); 141 dma_unmap_len_set(txcb, dma_len, 0); 142 txcb->last = false; 143 } 144 145 static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt) 146 { 147 int next_index, i; 148 149 /* Check if we have enough room for cnt descriptors */ 150 for (i = 0; i < cnt; i++) { 151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT); 152 if (next_index == intf->tx_spb_clean_index) 153 return 1; 154 } 155 156 return 0; 157 } 158 159 static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, 160 struct sk_buff *skb, 161 bool *csum_hw) 162 { 163 struct bcmasp_intf *intf = netdev_priv(dev); 164 u32 header = 0, header2 = 0, epkt = 0; 165 struct bcmasp_pkt_offload *offload; 166 unsigned int header_cnt = 0; 167 u8 ip_proto; 168 int ret; 169 170 if (skb->ip_summed != CHECKSUM_PARTIAL) 171 return skb; 172 173 ret = skb_cow_head(skb, sizeof(*offload)); 174 if (ret < 0) { 175 intf->mib.tx_realloc_offload_failed++; 176 goto help; 177 } 178 179 switch (skb->protocol) { 180 case htons(ETH_P_IP): 181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf); 182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff); 183 epkt |= PKT_OFFLOAD_EPKT_IP(0); 184 ip_proto = ip_hdr(skb)->protocol; 185 header_cnt += 2; 186 break; 187 case htons(ETH_P_IPV6): 188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf); 189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff); 190 epkt |= PKT_OFFLOAD_EPKT_IP(1); 191 ip_proto = ipv6_hdr(skb)->nexthdr; 192 header_cnt += 2; 193 break; 194 default: 195 goto help; 196 } 197 198 switch (ip_proto) { 199 case IPPROTO_TCP: 200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb)); 201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4; 202 header_cnt++; 203 break; 204 case IPPROTO_UDP: 205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN); 206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4; 207 header_cnt++; 208 break; 209 default: 210 goto help; 211 } 212 213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload)); 214 215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) | 216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN); 217 epkt |= PKT_OFFLOAD_EPKT_OP; 218 219 offload->nop = htonl(PKT_OFFLOAD_NOP); 220 offload->header = htonl(header); 221 offload->header2 = htonl(header2); 222 offload->epkt = htonl(epkt); 223 offload->end = htonl(PKT_OFFLOAD_END_OP); 224 *csum_hw = true; 225 226 return skb; 227 228 help: 229 skb_checksum_help(skb); 230 231 return skb; 232 } 233 234 static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) 235 { 236 struct bcmasp_intf *intf = netdev_priv(dev); 237 unsigned int total_bytes, size; 238 int spb_index, nr_frags, i, j; 239 struct bcmasp_tx_cb *txcb; 240 dma_addr_t mapping, valid; 241 struct bcmasp_desc *desc; 242 bool csum_hw = false; 243 struct device *kdev; 244 skb_frag_t *frag; 245 246 kdev = &intf->parent->pdev->dev; 247 248 nr_frags = skb_shinfo(skb)->nr_frags; 249 250 if (tx_spb_ring_full(intf, nr_frags + 1)) { 251 netif_stop_queue(dev); 252 if (net_ratelimit()) 253 netdev_err(dev, "Tx Ring Full!\n"); 254 return NETDEV_TX_BUSY; 255 } 256 257 /* Save skb len before adding csum offload header */ 258 total_bytes = skb->len; 259 skb = bcmasp_csum_offload(dev, skb, &csum_hw); 260 if (!skb) 261 return NETDEV_TX_OK; 262 263 spb_index = intf->tx_spb_index; 264 valid = intf->tx_spb_dma_valid; 265 for (i = 0; i <= nr_frags; i++) { 266 if (!i) { 267 size = skb_headlen(skb); 268 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) { 269 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN)) 270 return NETDEV_TX_OK; 271 size = skb->len; 272 } 273 mapping = dma_map_single(kdev, skb->data, size, 274 DMA_TO_DEVICE); 275 } else { 276 frag = &skb_shinfo(skb)->frags[i - 1]; 277 size = skb_frag_size(frag); 278 mapping = skb_frag_dma_map(kdev, frag, 0, size, 279 DMA_TO_DEVICE); 280 } 281 282 if (dma_mapping_error(kdev, mapping)) { 283 intf->mib.tx_dma_failed++; 284 spb_index = intf->tx_spb_index; 285 for (j = 0; j < i; j++) { 286 bcmasp_clean_txcb(intf, spb_index); 287 spb_index = incr_ring(spb_index, 288 DESC_RING_COUNT); 289 } 290 /* Rewind so we do not have a hole */ 291 spb_index = intf->tx_spb_index; 292 dev_kfree_skb(skb); 293 return NETDEV_TX_OK; 294 } 295 296 txcb = &intf->tx_cbs[spb_index]; 297 desc = &intf->tx_spb_cpu[spb_index]; 298 memset(desc, 0, sizeof(*desc)); 299 txcb->skb = skb; 300 txcb->bytes_sent = total_bytes; 301 dma_unmap_addr_set(txcb, dma_addr, mapping); 302 dma_unmap_len_set(txcb, dma_len, size); 303 if (!i) { 304 desc->flags |= DESC_SOF; 305 if (csum_hw) 306 desc->flags |= DESC_EPKT_CMD; 307 } 308 309 if (i == nr_frags) { 310 desc->flags |= DESC_EOF; 311 txcb->last = true; 312 } 313 314 desc->buf = mapping; 315 desc->size = size; 316 desc->flags |= DESC_INT_EN; 317 318 netif_dbg(intf, tx_queued, dev, 319 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n", 320 __func__, &mapping, desc->size, desc->flags, 321 spb_index); 322 323 spb_index = incr_ring(spb_index, DESC_RING_COUNT); 324 valid = incr_last_byte(valid, intf->tx_spb_dma_addr, 325 DESC_RING_COUNT); 326 } 327 328 /* Ensure all descriptors have been written to DRAM for the 329 * hardware to see up-to-date contents. 330 */ 331 wmb(); 332 333 intf->tx_spb_index = spb_index; 334 intf->tx_spb_dma_valid = valid; 335 336 skb_tx_timestamp(skb); 337 338 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); 339 340 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1)) 341 netif_stop_queue(dev); 342 343 return NETDEV_TX_OK; 344 } 345 346 static void bcmasp_netif_start(struct net_device *dev) 347 { 348 struct bcmasp_intf *intf = netdev_priv(dev); 349 350 bcmasp_set_rx_mode(dev); 351 napi_enable(&intf->tx_napi); 352 napi_enable(&intf->rx_napi); 353 354 bcmasp_enable_rx_irq(intf, 1); 355 bcmasp_enable_tx_irq(intf, 1); 356 bcmasp_enable_phy_irq(intf, 1); 357 358 phy_start(dev->phydev); 359 } 360 361 static void umac_reset(struct bcmasp_intf *intf) 362 { 363 umac_wl(intf, 0x0, UMC_CMD); 364 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD); 365 usleep_range(10, 100); 366 /* We hold the umac in reset and bring it out of 367 * reset when phy link is up. 368 */ 369 } 370 371 static void umac_set_hw_addr(struct bcmasp_intf *intf, 372 const unsigned char *addr) 373 { 374 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 375 addr[3]; 376 u32 mac1 = (addr[4] << 8) | addr[5]; 377 378 umac_wl(intf, mac0, UMC_MAC0); 379 umac_wl(intf, mac1, UMC_MAC1); 380 } 381 382 static void umac_enable_set(struct bcmasp_intf *intf, u32 mask, 383 unsigned int enable) 384 { 385 u32 reg; 386 387 reg = umac_rl(intf, UMC_CMD); 388 if (reg & UMC_CMD_SW_RESET) 389 return; 390 if (enable) 391 reg |= mask; 392 else 393 reg &= ~mask; 394 umac_wl(intf, reg, UMC_CMD); 395 396 /* UniMAC stops on a packet boundary, wait for a full-sized packet 397 * to be processed (1 msec). 398 */ 399 if (enable == 0) 400 usleep_range(1000, 2000); 401 } 402 403 static void umac_init(struct bcmasp_intf *intf) 404 { 405 umac_wl(intf, 0x800, UMC_FRM_LEN); 406 umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL); 407 umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ); 408 } 409 410 static int bcmasp_tx_reclaim(struct bcmasp_intf *intf) 411 { 412 struct bcmasp_intf_stats64 *stats = &intf->stats64; 413 struct device *kdev = &intf->parent->pdev->dev; 414 unsigned long read, released = 0; 415 struct bcmasp_tx_cb *txcb; 416 struct bcmasp_desc *desc; 417 dma_addr_t mapping; 418 419 read = tx_spb_dma_rq(intf, TX_SPB_DMA_READ); 420 while (intf->tx_spb_dma_read != read) { 421 txcb = &intf->tx_cbs[intf->tx_spb_clean_index]; 422 mapping = dma_unmap_addr(txcb, dma_addr); 423 424 dma_unmap_single(kdev, mapping, 425 dma_unmap_len(txcb, dma_len), 426 DMA_TO_DEVICE); 427 428 if (txcb->last) { 429 dev_consume_skb_any(txcb->skb); 430 431 u64_stats_update_begin(&stats->syncp); 432 u64_stats_inc(&stats->tx_packets); 433 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent); 434 u64_stats_update_end(&stats->syncp); 435 } 436 437 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index]; 438 439 netif_dbg(intf, tx_done, intf->ndev, 440 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n", 441 __func__, &mapping, desc->size, desc->flags, 442 intf->tx_spb_clean_index); 443 444 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index); 445 released++; 446 447 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index, 448 DESC_RING_COUNT); 449 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read, 450 intf->tx_spb_dma_addr, 451 DESC_RING_COUNT); 452 } 453 454 return released; 455 } 456 457 static int bcmasp_tx_poll(struct napi_struct *napi, int budget) 458 { 459 struct bcmasp_intf *intf = 460 container_of(napi, struct bcmasp_intf, tx_napi); 461 int released = 0; 462 463 released = bcmasp_tx_reclaim(intf); 464 465 napi_complete(&intf->tx_napi); 466 467 bcmasp_enable_tx_irq(intf, 1); 468 469 if (released) 470 netif_wake_queue(intf->ndev); 471 472 return 0; 473 } 474 475 static int bcmasp_rx_poll(struct napi_struct *napi, int budget) 476 { 477 struct bcmasp_intf *intf = 478 container_of(napi, struct bcmasp_intf, rx_napi); 479 struct bcmasp_intf_stats64 *stats = &intf->stats64; 480 struct device *kdev = &intf->parent->pdev->dev; 481 unsigned long processed = 0; 482 struct bcmasp_desc *desc; 483 struct sk_buff *skb; 484 dma_addr_t valid; 485 void *data; 486 u64 flags; 487 u32 len; 488 489 valid = rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID) + 1; 490 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE) 491 valid = intf->rx_edpkt_dma_addr; 492 493 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) { 494 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index]; 495 496 /* Ensure that descriptor has been fully written to DRAM by 497 * hardware before reading by the CPU 498 */ 499 rmb(); 500 501 /* Calculate virt addr by offsetting from physical addr */ 502 data = intf->rx_ring_cpu + 503 (DESC_ADDR(desc->buf) - intf->rx_ring_dma); 504 505 flags = DESC_FLAGS(desc->buf); 506 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) { 507 if (net_ratelimit()) { 508 netif_err(intf, rx_status, intf->ndev, 509 "flags=0x%llx\n", flags); 510 } 511 512 u64_stats_update_begin(&stats->syncp); 513 if (flags & DESC_CRC_ERR) 514 u64_stats_inc(&stats->rx_crc_errs); 515 if (flags & DESC_RX_SYM_ERR) 516 u64_stats_inc(&stats->rx_sym_errs); 517 u64_stats_update_end(&stats->syncp); 518 519 goto next; 520 } 521 522 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size, 523 DMA_FROM_DEVICE); 524 525 len = desc->size; 526 527 skb = napi_alloc_skb(napi, len); 528 if (!skb) { 529 u64_stats_update_begin(&stats->syncp); 530 u64_stats_inc(&stats->rx_dropped); 531 u64_stats_update_end(&stats->syncp); 532 intf->mib.alloc_rx_skb_failed++; 533 534 goto next; 535 } 536 537 skb_put(skb, len); 538 memcpy(skb->data, data, len); 539 540 skb_pull(skb, 2); 541 len -= 2; 542 if (likely(intf->crc_fwd)) { 543 skb_trim(skb, len - ETH_FCS_LEN); 544 len -= ETH_FCS_LEN; 545 } 546 547 if ((intf->ndev->features & NETIF_F_RXCSUM) && 548 (desc->buf & DESC_CHKSUM)) 549 skb->ip_summed = CHECKSUM_UNNECESSARY; 550 551 skb->protocol = eth_type_trans(skb, intf->ndev); 552 553 napi_gro_receive(napi, skb); 554 555 u64_stats_update_begin(&stats->syncp); 556 u64_stats_inc(&stats->rx_packets); 557 u64_stats_add(&stats->rx_bytes, len); 558 u64_stats_update_end(&stats->syncp); 559 560 next: 561 rx_edpkt_cfg_wq(intf, (DESC_ADDR(desc->buf) + desc->size), 562 RX_EDPKT_RING_BUFFER_READ); 563 564 processed++; 565 intf->rx_edpkt_dma_read = 566 incr_first_byte(intf->rx_edpkt_dma_read, 567 intf->rx_edpkt_dma_addr, 568 DESC_RING_COUNT); 569 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index, 570 DESC_RING_COUNT); 571 } 572 573 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_read, RX_EDPKT_DMA_READ); 574 575 if (processed < budget && napi_complete_done(&intf->rx_napi, processed)) 576 bcmasp_enable_rx_irq(intf, 1); 577 578 return processed; 579 } 580 581 static void bcmasp_adj_link(struct net_device *dev) 582 { 583 struct bcmasp_intf *intf = netdev_priv(dev); 584 struct phy_device *phydev = dev->phydev; 585 u32 cmd_bits = 0, reg; 586 int changed = 0; 587 588 if (intf->old_link != phydev->link) { 589 changed = 1; 590 intf->old_link = phydev->link; 591 } 592 593 if (intf->old_duplex != phydev->duplex) { 594 changed = 1; 595 intf->old_duplex = phydev->duplex; 596 } 597 598 switch (phydev->speed) { 599 case SPEED_2500: 600 cmd_bits = UMC_CMD_SPEED_2500; 601 break; 602 case SPEED_1000: 603 cmd_bits = UMC_CMD_SPEED_1000; 604 break; 605 case SPEED_100: 606 cmd_bits = UMC_CMD_SPEED_100; 607 break; 608 case SPEED_10: 609 cmd_bits = UMC_CMD_SPEED_10; 610 break; 611 default: 612 break; 613 } 614 cmd_bits <<= UMC_CMD_SPEED_SHIFT; 615 616 if (phydev->duplex == DUPLEX_HALF) 617 cmd_bits |= UMC_CMD_HD_EN; 618 619 if (intf->old_pause != phydev->pause) { 620 changed = 1; 621 intf->old_pause = phydev->pause; 622 } 623 624 if (!phydev->pause) 625 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE; 626 627 if (!changed) 628 return; 629 630 if (phydev->link) { 631 reg = umac_rl(intf, UMC_CMD); 632 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) | 633 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE | 634 UMC_CMD_TX_PAUSE_IGNORE); 635 reg |= cmd_bits; 636 if (reg & UMC_CMD_SW_RESET) { 637 reg &= ~UMC_CMD_SW_RESET; 638 umac_wl(intf, reg, UMC_CMD); 639 udelay(2); 640 reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC; 641 } 642 umac_wl(intf, reg, UMC_CMD); 643 644 umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER); 645 reg = umac_rl(intf, UMC_EEE_CTRL); 646 if (phydev->enable_tx_lpi) 647 reg |= EEE_EN; 648 else 649 reg &= ~EEE_EN; 650 umac_wl(intf, reg, UMC_EEE_CTRL); 651 } 652 653 reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 654 if (phydev->link) 655 reg |= RGMII_LINK; 656 else 657 reg &= ~RGMII_LINK; 658 rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 659 660 if (changed) 661 phy_print_status(phydev); 662 } 663 664 static int bcmasp_alloc_buffers(struct bcmasp_intf *intf) 665 { 666 struct device *kdev = &intf->parent->pdev->dev; 667 struct page *buffer_pg; 668 669 /* Alloc RX */ 670 intf->rx_buf_order = get_order(RING_BUFFER_SIZE); 671 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); 672 if (!buffer_pg) 673 return -ENOMEM; 674 675 intf->rx_ring_cpu = page_to_virt(buffer_pg); 676 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, 677 DMA_FROM_DEVICE); 678 if (dma_mapping_error(kdev, intf->rx_ring_dma)) 679 goto free_rx_buffer; 680 681 intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, 682 &intf->rx_edpkt_dma_addr, GFP_KERNEL); 683 if (!intf->rx_edpkt_cpu) 684 goto free_rx_buffer_dma; 685 686 /* Alloc TX */ 687 intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, 688 &intf->tx_spb_dma_addr, GFP_KERNEL); 689 if (!intf->tx_spb_cpu) 690 goto free_rx_edpkt_dma; 691 692 intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), 693 GFP_KERNEL); 694 if (!intf->tx_cbs) 695 goto free_tx_spb_dma; 696 697 return 0; 698 699 free_tx_spb_dma: 700 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, 701 intf->tx_spb_dma_addr); 702 free_rx_edpkt_dma: 703 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, 704 intf->rx_edpkt_dma_addr); 705 free_rx_buffer_dma: 706 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, 707 DMA_FROM_DEVICE); 708 free_rx_buffer: 709 __free_pages(buffer_pg, intf->rx_buf_order); 710 711 return -ENOMEM; 712 } 713 714 static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf) 715 { 716 struct device *kdev = &intf->parent->pdev->dev; 717 718 /* RX buffers */ 719 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, 720 intf->rx_edpkt_dma_addr); 721 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, 722 DMA_FROM_DEVICE); 723 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); 724 725 /* TX buffers */ 726 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, 727 intf->tx_spb_dma_addr); 728 kfree(intf->tx_cbs); 729 } 730 731 static void bcmasp_init_rx(struct bcmasp_intf *intf) 732 { 733 /* Restart from index 0 */ 734 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; 735 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1); 736 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; 737 intf->rx_edpkt_index = 0; 738 739 /* Make sure channels are disabled */ 740 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); 741 742 /* Rx SPB */ 743 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ); 744 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE); 745 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE); 746 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, 747 RX_EDPKT_RING_BUFFER_END); 748 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, 749 RX_EDPKT_RING_BUFFER_VALID); 750 751 /* EDPKT */ 752 rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K << 753 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) | 754 (RX_EDPKT_CFG_CFG0_64_ALN << 755 RX_EDPKT_CFG_CFG0_BALN_SHIFT) | 756 (RX_EDPKT_CFG_CFG0_EFRM_STUF), 757 RX_EDPKT_CFG_CFG0); 758 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); 759 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); 760 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); 761 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END); 762 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID); 763 764 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) << 765 UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT), 766 UMAC2FB_CFG); 767 } 768 769 770 static void bcmasp_init_tx(struct bcmasp_intf *intf) 771 { 772 /* Restart from index 0 */ 773 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; 774 intf->tx_spb_dma_read = intf->tx_spb_dma_addr; 775 intf->tx_spb_index = 0; 776 intf->tx_spb_clean_index = 0; 777 memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT); 778 779 /* Make sure channels are disabled */ 780 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); 781 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); 782 783 /* Tx SPB */ 784 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), 785 TX_SPB_CTRL_XF_CTRL2); 786 787 if (intf->parent->tx_chan_offset) 788 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); 789 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); 790 791 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); 792 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); 793 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); 794 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); 795 } 796 797 static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) 798 { 799 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN | 800 RGMII_EPHY_CFG_IDDQ_GLOBAL; 801 u32 reg; 802 803 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL); 804 if (enable) { 805 reg &= ~RGMII_EPHY_CK25_DIS; 806 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 807 mdelay(1); 808 809 reg &= ~mask; 810 reg |= RGMII_EPHY_RESET; 811 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 812 mdelay(1); 813 814 reg &= ~RGMII_EPHY_RESET; 815 } else { 816 reg |= mask | RGMII_EPHY_RESET; 817 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 818 mdelay(1); 819 reg |= RGMII_EPHY_CK25_DIS; 820 } 821 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 822 mdelay(1); 823 824 /* Set or clear the LED control override to avoid lighting up LEDs 825 * while the EPHY is powered off and drawing unnecessary current. 826 */ 827 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL); 828 if (enable) 829 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD; 830 else 831 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD; 832 rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL); 833 } 834 835 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable) 836 { 837 u32 reg; 838 839 reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 840 reg &= ~RGMII_OOB_DIS; 841 if (enable) 842 reg |= RGMII_MODE_EN; 843 else 844 reg &= ~RGMII_MODE_EN; 845 rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 846 } 847 848 static void bcmasp_netif_deinit(struct net_device *dev) 849 { 850 struct bcmasp_intf *intf = netdev_priv(dev); 851 u32 reg, timeout = 1000; 852 853 napi_disable(&intf->tx_napi); 854 855 bcmasp_enable_tx(intf, 0); 856 857 /* Flush any TX packets in the pipe */ 858 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL); 859 do { 860 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS); 861 if (!(reg & TX_SPB_DMA_FIFO_FLUSH)) 862 break; 863 usleep_range(1000, 2000); 864 } while (timeout-- > 0); 865 tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL); 866 867 bcmasp_tx_reclaim(intf); 868 869 umac_enable_set(intf, UMC_CMD_TX_EN, 0); 870 871 phy_stop(dev->phydev); 872 873 umac_enable_set(intf, UMC_CMD_RX_EN, 0); 874 875 bcmasp_flush_rx_port(intf); 876 usleep_range(1000, 2000); 877 bcmasp_enable_rx(intf, 0); 878 879 napi_disable(&intf->rx_napi); 880 881 /* Disable interrupts */ 882 bcmasp_enable_tx_irq(intf, 0); 883 bcmasp_enable_rx_irq(intf, 0); 884 bcmasp_enable_phy_irq(intf, 0); 885 886 netif_napi_del(&intf->tx_napi); 887 netif_napi_del(&intf->rx_napi); 888 } 889 890 static int bcmasp_stop(struct net_device *dev) 891 { 892 struct bcmasp_intf *intf = netdev_priv(dev); 893 894 netif_dbg(intf, ifdown, dev, "bcmasp stop\n"); 895 896 /* Stop tx from updating HW */ 897 netif_tx_disable(dev); 898 899 bcmasp_netif_deinit(dev); 900 901 bcmasp_reclaim_free_buffers(intf); 902 903 phy_disconnect(dev->phydev); 904 905 /* Disable internal EPHY or external PHY */ 906 if (intf->internal_phy) 907 bcmasp_ephy_enable_set(intf, false); 908 else 909 bcmasp_rgmii_mode_en_set(intf, false); 910 911 /* Disable the interface clocks */ 912 bcmasp_core_clock_set_intf(intf, false); 913 914 clk_disable_unprepare(intf->parent->clk); 915 916 return 0; 917 } 918 919 static void bcmasp_configure_port(struct bcmasp_intf *intf) 920 { 921 u32 reg, id_mode_dis = 0; 922 923 reg = rgmii_rl(intf, RGMII_PORT_CNTRL); 924 reg &= ~RGMII_PORT_MODE_MASK; 925 926 switch (intf->phy_interface) { 927 case PHY_INTERFACE_MODE_RGMII: 928 /* RGMII_NO_ID: TXC transitions at the same time as TXD 929 * (requires PCB or receiver-side delay) 930 * RGMII: Add 2ns delay on TXC (90 degree shift) 931 * 932 * ID is implicitly disabled for 100Mbps (RG)MII operation. 933 */ 934 id_mode_dis = RGMII_ID_MODE_DIS; 935 fallthrough; 936 case PHY_INTERFACE_MODE_RGMII_TXID: 937 reg |= RGMII_PORT_MODE_EXT_GPHY; 938 break; 939 case PHY_INTERFACE_MODE_MII: 940 reg |= RGMII_PORT_MODE_EXT_EPHY; 941 break; 942 default: 943 break; 944 } 945 946 if (intf->internal_phy) 947 reg |= RGMII_PORT_MODE_EPHY; 948 949 rgmii_wl(intf, reg, RGMII_PORT_CNTRL); 950 951 reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 952 reg &= ~RGMII_ID_MODE_DIS; 953 reg |= id_mode_dis; 954 rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 955 } 956 957 static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) 958 { 959 struct bcmasp_intf *intf = netdev_priv(dev); 960 phy_interface_t phy_iface = intf->phy_interface; 961 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | 962 PHY_BRCM_DIS_TXCRXC_NOENRGY | 963 PHY_BRCM_IDDQ_SUSPEND; 964 struct phy_device *phydev = NULL; 965 int ret; 966 967 /* Always enable interface clocks */ 968 bcmasp_core_clock_set_intf(intf, true); 969 970 /* Enable internal PHY or external PHY before any MAC activity */ 971 if (intf->internal_phy) 972 bcmasp_ephy_enable_set(intf, true); 973 else 974 bcmasp_rgmii_mode_en_set(intf, true); 975 bcmasp_configure_port(intf); 976 977 /* This is an ugly quirk but we have not been correctly 978 * interpreting the phy_interface values and we have done that 979 * across different drivers, so at least we are consistent in 980 * our mistakes. 981 * 982 * When the Generic PHY driver is in use either the PHY has 983 * been strapped or programmed correctly by the boot loader so 984 * we should stick to our incorrect interpretation since we 985 * have validated it. 986 * 987 * Now when a dedicated PHY driver is in use, we need to 988 * reverse the meaning of the phy_interface_mode values to 989 * something that the PHY driver will interpret and act on such 990 * that we have two mistakes canceling themselves so to speak. 991 * We only do this for the two modes that GENET driver 992 * officially supports on Broadcom STB chips: 993 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. 994 * Other modes are not *officially* supported with the boot 995 * loader and the scripted environment generating Device Tree 996 * blobs for those platforms. 997 * 998 * Note that internal PHY and fixed-link configurations are not 999 * affected because they use different phy_interface_t values 1000 * or the Generic PHY driver. 1001 */ 1002 switch (phy_iface) { 1003 case PHY_INTERFACE_MODE_RGMII: 1004 phy_iface = PHY_INTERFACE_MODE_RGMII_ID; 1005 break; 1006 case PHY_INTERFACE_MODE_RGMII_TXID: 1007 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; 1008 break; 1009 default: 1010 break; 1011 } 1012 1013 if (phy_connect) { 1014 phydev = of_phy_connect(dev, intf->phy_dn, 1015 bcmasp_adj_link, phy_flags, 1016 phy_iface); 1017 if (!phydev) { 1018 ret = -ENODEV; 1019 netdev_err(dev, "could not attach to PHY\n"); 1020 goto err_phy_disable; 1021 } 1022 1023 if (intf->internal_phy) 1024 dev->phydev->irq = PHY_MAC_INTERRUPT; 1025 1026 /* Indicate that the MAC is responsible for PHY PM */ 1027 phydev->mac_managed_pm = true; 1028 1029 /* Set phylib's copy of the LPI timer */ 1030 phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); 1031 } 1032 1033 umac_reset(intf); 1034 1035 umac_init(intf); 1036 1037 umac_set_hw_addr(intf, dev->dev_addr); 1038 1039 intf->old_duplex = -1; 1040 intf->old_link = -1; 1041 intf->old_pause = -1; 1042 1043 bcmasp_init_tx(intf); 1044 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); 1045 bcmasp_enable_tx(intf, 1); 1046 1047 bcmasp_init_rx(intf); 1048 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); 1049 bcmasp_enable_rx(intf, 1); 1050 1051 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD); 1052 1053 bcmasp_netif_start(dev); 1054 1055 netif_start_queue(dev); 1056 1057 return 0; 1058 1059 err_phy_disable: 1060 if (intf->internal_phy) 1061 bcmasp_ephy_enable_set(intf, false); 1062 else 1063 bcmasp_rgmii_mode_en_set(intf, false); 1064 return ret; 1065 } 1066 1067 static int bcmasp_open(struct net_device *dev) 1068 { 1069 struct bcmasp_intf *intf = netdev_priv(dev); 1070 int ret; 1071 1072 netif_dbg(intf, ifup, dev, "bcmasp open\n"); 1073 1074 ret = bcmasp_alloc_buffers(intf); 1075 if (ret) 1076 return ret; 1077 1078 ret = clk_prepare_enable(intf->parent->clk); 1079 if (ret) 1080 goto err_free_mem; 1081 1082 ret = bcmasp_netif_init(dev, true); 1083 if (ret) { 1084 clk_disable_unprepare(intf->parent->clk); 1085 goto err_free_mem; 1086 } 1087 1088 return ret; 1089 1090 err_free_mem: 1091 bcmasp_reclaim_free_buffers(intf); 1092 1093 return ret; 1094 } 1095 1096 static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue) 1097 { 1098 struct bcmasp_intf *intf = netdev_priv(dev); 1099 1100 netif_dbg(intf, tx_err, dev, "transmit timeout!\n"); 1101 intf->mib.tx_timeout_cnt++; 1102 } 1103 1104 static int bcmasp_get_phys_port_name(struct net_device *dev, 1105 char *name, size_t len) 1106 { 1107 struct bcmasp_intf *intf = netdev_priv(dev); 1108 1109 if (snprintf(name, len, "p%d", intf->port) >= len) 1110 return -EINVAL; 1111 1112 return 0; 1113 } 1114 1115 static void bcmasp_get_stats64(struct net_device *dev, 1116 struct rtnl_link_stats64 *stats) 1117 { 1118 struct bcmasp_intf *intf = netdev_priv(dev); 1119 struct bcmasp_intf_stats64 *lstats; 1120 unsigned int start; 1121 1122 lstats = &intf->stats64; 1123 1124 do { 1125 start = u64_stats_fetch_begin(&lstats->syncp); 1126 stats->rx_packets = u64_stats_read(&lstats->rx_packets); 1127 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes); 1128 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped); 1129 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs); 1130 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs); 1131 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; 1132 1133 stats->tx_packets = u64_stats_read(&lstats->tx_packets); 1134 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes); 1135 } while (u64_stats_fetch_retry(&lstats->syncp, start)); 1136 } 1137 1138 static const struct net_device_ops bcmasp_netdev_ops = { 1139 .ndo_open = bcmasp_open, 1140 .ndo_stop = bcmasp_stop, 1141 .ndo_start_xmit = bcmasp_xmit, 1142 .ndo_tx_timeout = bcmasp_tx_timeout, 1143 .ndo_set_rx_mode = bcmasp_set_rx_mode, 1144 .ndo_get_phys_port_name = bcmasp_get_phys_port_name, 1145 .ndo_eth_ioctl = phy_do_ioctl_running, 1146 .ndo_set_mac_address = eth_mac_addr, 1147 .ndo_get_stats64 = bcmasp_get_stats64, 1148 }; 1149 1150 static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) 1151 { 1152 /* Per port */ 1153 intf->res.umac = priv->base + UMC_OFFSET(intf); 1154 intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset + 1155 (intf->port * 0x4)); 1156 intf->res.rgmii = priv->base + RGMII_OFFSET(intf); 1157 1158 /* Per ch */ 1159 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf); 1160 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf); 1161 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf); 1162 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf); 1163 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf); 1164 1165 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf); 1166 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); 1167 } 1168 1169 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, 1170 struct device_node *ndev_dn, int i) 1171 { 1172 struct device *dev = &priv->pdev->dev; 1173 struct bcmasp_intf *intf; 1174 struct net_device *ndev; 1175 int ch, port, ret; 1176 1177 if (of_property_read_u32(ndev_dn, "reg", &port)) { 1178 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name); 1179 goto err; 1180 } 1181 1182 if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) { 1183 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name); 1184 goto err; 1185 } 1186 1187 ndev = alloc_etherdev(sizeof(struct bcmasp_intf)); 1188 if (!ndev) { 1189 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name); 1190 goto err; 1191 } 1192 intf = netdev_priv(ndev); 1193 1194 intf->parent = priv; 1195 intf->ndev = ndev; 1196 intf->channel = ch; 1197 intf->port = port; 1198 intf->ndev_dn = ndev_dn; 1199 intf->index = i; 1200 1201 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface); 1202 if (ret < 0) { 1203 dev_err(dev, "invalid PHY mode property\n"); 1204 goto err_free_netdev; 1205 } 1206 1207 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL) 1208 intf->internal_phy = true; 1209 1210 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0); 1211 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) { 1212 ret = of_phy_register_fixed_link(ndev_dn); 1213 if (ret) { 1214 dev_warn(dev, "%s: failed to register fixed PHY\n", 1215 ndev_dn->name); 1216 goto err_free_netdev; 1217 } 1218 intf->phy_dn = ndev_dn; 1219 } 1220 1221 /* Map resource */ 1222 bcmasp_map_res(priv, intf); 1223 1224 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) && 1225 intf->phy_interface != PHY_INTERFACE_MODE_MII && 1226 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) || 1227 (intf->port != 1 && intf->internal_phy)) { 1228 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", 1229 phy_modes(intf->phy_interface), intf->port); 1230 ret = -EINVAL; 1231 goto err_deregister_fixed_link; 1232 } 1233 1234 ret = of_get_ethdev_address(ndev_dn, ndev); 1235 if (ret) { 1236 netdev_warn(ndev, "using random Ethernet MAC\n"); 1237 eth_hw_addr_random(ndev); 1238 } 1239 1240 SET_NETDEV_DEV(ndev, dev); 1241 ndev->netdev_ops = &bcmasp_netdev_ops; 1242 ndev->ethtool_ops = &bcmasp_ethtool_ops; 1243 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | 1244 NETIF_MSG_PROBE | 1245 NETIF_MSG_LINK); 1246 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 1247 NETIF_F_RXCSUM; 1248 ndev->hw_features |= ndev->features; 1249 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); 1250 1251 netdev_sw_irq_coalesce_default_on(ndev); 1252 1253 return intf; 1254 1255 err_deregister_fixed_link: 1256 if (of_phy_is_fixed_link(ndev_dn)) 1257 of_phy_deregister_fixed_link(ndev_dn); 1258 err_free_netdev: 1259 free_netdev(ndev); 1260 err: 1261 return NULL; 1262 } 1263 1264 void bcmasp_interface_destroy(struct bcmasp_intf *intf) 1265 { 1266 if (intf->ndev->reg_state == NETREG_REGISTERED) 1267 unregister_netdev(intf->ndev); 1268 if (of_phy_is_fixed_link(intf->ndev_dn)) 1269 of_phy_deregister_fixed_link(intf->ndev_dn); 1270 free_netdev(intf->ndev); 1271 } 1272 1273 static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) 1274 { 1275 struct net_device *ndev = intf->ndev; 1276 u32 reg; 1277 1278 reg = umac_rl(intf, UMC_MPD_CTRL); 1279 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 1280 reg |= UMC_MPD_CTRL_MPD_EN; 1281 reg &= ~UMC_MPD_CTRL_PSW_EN; 1282 if (intf->wolopts & WAKE_MAGICSECURE) { 1283 /* Program the SecureOn password */ 1284 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]), 1285 UMC_PSW_MS); 1286 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]), 1287 UMC_PSW_LS); 1288 reg |= UMC_MPD_CTRL_PSW_EN; 1289 } 1290 umac_wl(intf, reg, UMC_MPD_CTRL); 1291 1292 if (intf->wolopts & WAKE_FILTER) 1293 bcmasp_netfilt_suspend(intf); 1294 1295 /* Bring UniMAC out of reset if needed and enable RX */ 1296 reg = umac_rl(intf, UMC_CMD); 1297 if (reg & UMC_CMD_SW_RESET) 1298 reg &= ~UMC_CMD_SW_RESET; 1299 1300 reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC; 1301 umac_wl(intf, reg, UMC_CMD); 1302 1303 umac_enable_set(intf, UMC_CMD_RX_EN, 1); 1304 1305 wakeup_intr2_core_wl(intf->parent, 0xffffffff, 1306 ASP_WAKEUP_INTR2_MASK_CLEAR); 1307 1308 if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled && 1309 intf->parent->eee_fixup) 1310 intf->parent->eee_fixup(intf, true); 1311 1312 netif_dbg(intf, wol, ndev, "entered WOL mode\n"); 1313 } 1314 1315 int bcmasp_interface_suspend(struct bcmasp_intf *intf) 1316 { 1317 struct device *kdev = &intf->parent->pdev->dev; 1318 struct net_device *dev = intf->ndev; 1319 1320 if (!netif_running(dev)) 1321 return 0; 1322 1323 netif_device_detach(dev); 1324 1325 bcmasp_netif_deinit(dev); 1326 1327 if (!intf->wolopts) { 1328 if (intf->internal_phy) 1329 bcmasp_ephy_enable_set(intf, false); 1330 else 1331 bcmasp_rgmii_mode_en_set(intf, false); 1332 1333 /* If Wake-on-LAN is disabled, we can safely 1334 * disable the network interface clocks. 1335 */ 1336 bcmasp_core_clock_set_intf(intf, false); 1337 } 1338 1339 if (device_may_wakeup(kdev) && intf->wolopts) 1340 bcmasp_suspend_to_wol(intf); 1341 1342 clk_disable_unprepare(intf->parent->clk); 1343 1344 return 0; 1345 } 1346 1347 static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) 1348 { 1349 u32 reg; 1350 1351 if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled && 1352 intf->parent->eee_fixup) 1353 intf->parent->eee_fixup(intf, false); 1354 1355 reg = umac_rl(intf, UMC_MPD_CTRL); 1356 reg &= ~UMC_MPD_CTRL_MPD_EN; 1357 umac_wl(intf, reg, UMC_MPD_CTRL); 1358 1359 wakeup_intr2_core_wl(intf->parent, 0xffffffff, 1360 ASP_WAKEUP_INTR2_MASK_SET); 1361 } 1362 1363 int bcmasp_interface_resume(struct bcmasp_intf *intf) 1364 { 1365 struct net_device *dev = intf->ndev; 1366 int ret; 1367 1368 if (!netif_running(dev)) 1369 return 0; 1370 1371 ret = clk_prepare_enable(intf->parent->clk); 1372 if (ret) 1373 return ret; 1374 1375 ret = bcmasp_netif_init(dev, false); 1376 if (ret) 1377 goto out; 1378 1379 bcmasp_resume_from_wol(intf); 1380 1381 netif_device_attach(dev); 1382 1383 return 0; 1384 1385 out: 1386 clk_disable_unprepare(intf->parent->clk); 1387 return ret; 1388 } 1389