Lines Matching +full:umac +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0
39 return beg + DESC_SIZE - 1; in incr_last_byte()
63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)), in bcmasp_enable_tx()
87 spin_lock_bh(&intf->parent->mda_lock); in bcmasp_set_rx_mode()
91 if (dev->flags & IFF_PROMISC) in bcmasp_set_rx_mode()
98 bcmasp_set_oaddr(intf, dev->dev_addr, 1); in bcmasp_set_rx_mode()
100 if (dev->flags & IFF_ALLMULTI) { in bcmasp_set_rx_mode()
106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); in bcmasp_set_rx_mode()
108 intf->mib.mc_filters_full_cnt++; in bcmasp_set_rx_mode()
115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); in bcmasp_set_rx_mode()
117 intf->mib.uc_filters_full_cnt++; in bcmasp_set_rx_mode()
122 spin_unlock_bh(&intf->parent->mda_lock); in bcmasp_set_rx_mode()
127 intf->mib.promisc_filters_cnt++; in bcmasp_set_rx_mode()
132 spin_unlock_bh(&intf->parent->mda_lock); in bcmasp_set_rx_mode()
137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index]; in bcmasp_clean_txcb()
139 txcb->skb = NULL; in bcmasp_clean_txcb()
142 txcb->last = false; in bcmasp_clean_txcb()
151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT); in tx_spb_ring_full()
152 if (next_index == intf->tx_spb_clean_index) in tx_spb_ring_full()
170 if (skb->ip_summed != CHECKSUM_PARTIAL) in bcmasp_csum_offload()
175 intf->mib.tx_realloc_offload_failed++; in bcmasp_csum_offload()
179 switch (skb->protocol) { in bcmasp_csum_offload()
184 ip_proto = ip_hdr(skb)->protocol; in bcmasp_csum_offload()
191 ip_proto = ipv6_hdr(skb)->nexthdr; in bcmasp_csum_offload()
219 offload->nop = htonl(PKT_OFFLOAD_NOP); in bcmasp_csum_offload()
220 offload->header = htonl(header); in bcmasp_csum_offload()
221 offload->header2 = htonl(header2); in bcmasp_csum_offload()
222 offload->epkt = htonl(epkt); in bcmasp_csum_offload()
223 offload->end = htonl(PKT_OFFLOAD_END_OP); in bcmasp_csum_offload()
279 kdev = &intf->parent->pdev->dev; in bcmasp_xmit()
281 nr_frags = skb_shinfo(skb)->nr_frags; in bcmasp_xmit()
291 total_bytes = skb->len; in bcmasp_xmit()
296 spb_index = intf->tx_spb_index; in bcmasp_xmit()
297 valid = intf->tx_spb_dma_valid; in bcmasp_xmit()
304 size = skb->len; in bcmasp_xmit()
306 mapping = dma_map_single(kdev, skb->data, size, in bcmasp_xmit()
309 frag = &skb_shinfo(skb)->frags[i - 1]; in bcmasp_xmit()
316 intf->mib.tx_dma_failed++; in bcmasp_xmit()
317 spb_index = intf->tx_spb_index; in bcmasp_xmit()
324 spb_index = intf->tx_spb_index; in bcmasp_xmit()
329 txcb = &intf->tx_cbs[spb_index]; in bcmasp_xmit()
330 desc = &intf->tx_spb_cpu[spb_index]; in bcmasp_xmit()
332 txcb->skb = skb; in bcmasp_xmit()
333 txcb->bytes_sent = total_bytes; in bcmasp_xmit()
337 desc->flags |= DESC_SOF; in bcmasp_xmit()
339 desc->flags |= DESC_EPKT_CMD; in bcmasp_xmit()
343 desc->flags |= DESC_EOF; in bcmasp_xmit()
344 txcb->last = true; in bcmasp_xmit()
347 desc->buf = mapping; in bcmasp_xmit()
348 desc->size = size; in bcmasp_xmit()
349 desc->flags |= DESC_INT_EN; in bcmasp_xmit()
353 __func__, &mapping, desc->size, desc->flags, in bcmasp_xmit()
357 valid = incr_last_byte(valid, intf->tx_spb_dma_addr, in bcmasp_xmit()
362 * hardware to see up-to-date contents. in bcmasp_xmit()
366 intf->tx_spb_index = spb_index; in bcmasp_xmit()
367 intf->tx_spb_dma_valid = valid; in bcmasp_xmit()
371 bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid); in bcmasp_xmit()
384 napi_enable(&intf->tx_napi); in bcmasp_netif_start()
385 napi_enable(&intf->rx_napi); in bcmasp_netif_start()
391 phy_start(dev->phydev); in bcmasp_netif_start()
399 /* We hold the umac in reset and bring it out of in umac_reset()
400 * reset when phy link is up. in umac_reset()
429 /* UniMAC stops on a packet boundary, wait for a full-sized packet in umac_enable_set()
445 struct bcmasp_intf_stats64 *stats = &intf->stats64; in bcmasp_tx_reclaim()
446 struct device *kdev = &intf->parent->pdev->dev; in bcmasp_tx_reclaim()
453 while (intf->tx_spb_dma_read != read) { in bcmasp_tx_reclaim()
454 txcb = &intf->tx_cbs[intf->tx_spb_clean_index]; in bcmasp_tx_reclaim()
461 if (txcb->last) { in bcmasp_tx_reclaim()
462 dev_consume_skb_any(txcb->skb); in bcmasp_tx_reclaim()
464 u64_stats_update_begin(&stats->syncp); in bcmasp_tx_reclaim()
465 u64_stats_inc(&stats->tx_packets); in bcmasp_tx_reclaim()
466 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent); in bcmasp_tx_reclaim()
467 u64_stats_update_end(&stats->syncp); in bcmasp_tx_reclaim()
470 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index]; in bcmasp_tx_reclaim()
472 netif_dbg(intf, tx_done, intf->ndev, in bcmasp_tx_reclaim()
474 __func__, &mapping, desc->size, desc->flags, in bcmasp_tx_reclaim()
475 intf->tx_spb_clean_index); in bcmasp_tx_reclaim()
477 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index); in bcmasp_tx_reclaim()
480 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index, in bcmasp_tx_reclaim()
482 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read, in bcmasp_tx_reclaim()
483 intf->tx_spb_dma_addr, in bcmasp_tx_reclaim()
498 napi_complete(&intf->tx_napi); in bcmasp_tx_poll()
503 netif_wake_queue(intf->ndev); in bcmasp_tx_poll()
512 struct bcmasp_intf_stats64 *stats = &intf->stats64; in bcmasp_rx_poll()
513 struct device *kdev = &intf->parent->pdev->dev; in bcmasp_rx_poll()
523 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE) in bcmasp_rx_poll()
524 valid = intf->rx_edpkt_dma_addr; in bcmasp_rx_poll()
526 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) { in bcmasp_rx_poll()
527 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index]; in bcmasp_rx_poll()
535 data = intf->rx_ring_cpu + in bcmasp_rx_poll()
536 (DESC_ADDR(desc->buf) - intf->rx_ring_dma); in bcmasp_rx_poll()
538 flags = DESC_FLAGS(desc->buf); in bcmasp_rx_poll()
541 netif_err(intf, rx_status, intf->ndev, in bcmasp_rx_poll()
545 u64_stats_update_begin(&stats->syncp); in bcmasp_rx_poll()
547 u64_stats_inc(&stats->rx_crc_errs); in bcmasp_rx_poll()
549 u64_stats_inc(&stats->rx_sym_errs); in bcmasp_rx_poll()
550 u64_stats_update_end(&stats->syncp); in bcmasp_rx_poll()
555 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size, in bcmasp_rx_poll()
558 len = desc->size; in bcmasp_rx_poll()
562 u64_stats_update_begin(&stats->syncp); in bcmasp_rx_poll()
563 u64_stats_inc(&stats->rx_dropped); in bcmasp_rx_poll()
564 u64_stats_update_end(&stats->syncp); in bcmasp_rx_poll()
565 intf->mib.alloc_rx_skb_failed++; in bcmasp_rx_poll()
571 memcpy(skb->data, data, len); in bcmasp_rx_poll()
574 len -= 2; in bcmasp_rx_poll()
575 if (likely(intf->crc_fwd)) { in bcmasp_rx_poll()
576 skb_trim(skb, len - ETH_FCS_LEN); in bcmasp_rx_poll()
577 len -= ETH_FCS_LEN; in bcmasp_rx_poll()
580 if ((intf->ndev->features & NETIF_F_RXCSUM) && in bcmasp_rx_poll()
581 (desc->buf & DESC_CHKSUM)) in bcmasp_rx_poll()
582 skb->ip_summed = CHECKSUM_UNNECESSARY; in bcmasp_rx_poll()
584 skb->protocol = eth_type_trans(skb, intf->ndev); in bcmasp_rx_poll()
588 u64_stats_update_begin(&stats->syncp); in bcmasp_rx_poll()
589 u64_stats_inc(&stats->rx_packets); in bcmasp_rx_poll()
590 u64_stats_add(&stats->rx_bytes, len); in bcmasp_rx_poll()
591 u64_stats_update_end(&stats->syncp); in bcmasp_rx_poll()
594 bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) + in bcmasp_rx_poll()
595 desc->size)); in bcmasp_rx_poll()
598 intf->rx_edpkt_dma_read = in bcmasp_rx_poll()
599 incr_first_byte(intf->rx_edpkt_dma_read, in bcmasp_rx_poll()
600 intf->rx_edpkt_dma_addr, in bcmasp_rx_poll()
602 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index, in bcmasp_rx_poll()
606 bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read); in bcmasp_rx_poll()
608 if (processed < budget && napi_complete_done(&intf->rx_napi, processed)) in bcmasp_rx_poll()
617 struct phy_device *phydev = dev->phydev; in bcmasp_adj_link()
621 if (intf->old_link != phydev->link) { in bcmasp_adj_link()
623 intf->old_link = phydev->link; in bcmasp_adj_link()
626 if (intf->old_duplex != phydev->duplex) { in bcmasp_adj_link()
628 intf->old_duplex = phydev->duplex; in bcmasp_adj_link()
631 switch (phydev->speed) { in bcmasp_adj_link()
649 if (phydev->duplex == DUPLEX_HALF) in bcmasp_adj_link()
652 if (intf->old_pause != phydev->pause) { in bcmasp_adj_link()
654 intf->old_pause = phydev->pause; in bcmasp_adj_link()
657 if (!phydev->pause) in bcmasp_adj_link()
663 if (phydev->link) { in bcmasp_adj_link()
677 umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER); in bcmasp_adj_link()
679 if (phydev->enable_tx_lpi) in bcmasp_adj_link()
687 if (phydev->link) in bcmasp_adj_link()
699 struct device *kdev = &intf->parent->pdev->dev; in bcmasp_alloc_buffers()
703 intf->rx_buf_order = get_order(RING_BUFFER_SIZE); in bcmasp_alloc_buffers()
704 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); in bcmasp_alloc_buffers()
706 return -ENOMEM; in bcmasp_alloc_buffers()
708 intf->rx_ring_cpu = page_to_virt(buffer_pg); in bcmasp_alloc_buffers()
709 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, in bcmasp_alloc_buffers()
711 if (dma_mapping_error(kdev, intf->rx_ring_dma)) in bcmasp_alloc_buffers()
714 intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, in bcmasp_alloc_buffers()
715 &intf->rx_edpkt_dma_addr, GFP_KERNEL); in bcmasp_alloc_buffers()
716 if (!intf->rx_edpkt_cpu) in bcmasp_alloc_buffers()
720 intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, in bcmasp_alloc_buffers()
721 &intf->tx_spb_dma_addr, GFP_KERNEL); in bcmasp_alloc_buffers()
722 if (!intf->tx_spb_cpu) in bcmasp_alloc_buffers()
725 intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), in bcmasp_alloc_buffers()
727 if (!intf->tx_cbs) in bcmasp_alloc_buffers()
733 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, in bcmasp_alloc_buffers()
734 intf->tx_spb_dma_addr); in bcmasp_alloc_buffers()
736 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, in bcmasp_alloc_buffers()
737 intf->rx_edpkt_dma_addr); in bcmasp_alloc_buffers()
739 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, in bcmasp_alloc_buffers()
742 __free_pages(buffer_pg, intf->rx_buf_order); in bcmasp_alloc_buffers()
744 return -ENOMEM; in bcmasp_alloc_buffers()
749 struct device *kdev = &intf->parent->pdev->dev; in bcmasp_reclaim_free_buffers()
752 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, in bcmasp_reclaim_free_buffers()
753 intf->rx_edpkt_dma_addr); in bcmasp_reclaim_free_buffers()
754 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, in bcmasp_reclaim_free_buffers()
756 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); in bcmasp_reclaim_free_buffers()
759 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, in bcmasp_reclaim_free_buffers()
760 intf->tx_spb_dma_addr); in bcmasp_reclaim_free_buffers()
761 kfree(intf->tx_cbs); in bcmasp_reclaim_free_buffers()
767 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; in bcmasp_init_rx()
768 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1); in bcmasp_init_rx()
769 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; in bcmasp_init_rx()
770 intf->rx_edpkt_index = 0; in bcmasp_init_rx()
776 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ); in bcmasp_init_rx()
777 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE); in bcmasp_init_rx()
778 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE); in bcmasp_init_rx()
779 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, in bcmasp_init_rx()
781 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, in bcmasp_init_rx()
791 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); in bcmasp_init_rx()
792 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); in bcmasp_init_rx()
793 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); in bcmasp_init_rx()
794 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END); in bcmasp_init_rx()
795 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID); in bcmasp_init_rx()
797 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) << in bcmasp_init_rx()
806 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; in bcmasp_init_tx()
807 intf->tx_spb_dma_read = intf->tx_spb_dma_addr; in bcmasp_init_tx()
808 intf->tx_spb_index = 0; in bcmasp_init_tx()
809 intf->tx_spb_clean_index = 0; in bcmasp_init_tx()
810 memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT); in bcmasp_init_tx()
817 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), in bcmasp_init_tx()
820 if (intf->parent->tx_chan_offset) in bcmasp_init_tx()
821 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); in bcmasp_init_tx()
824 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); in bcmasp_init_tx()
825 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); in bcmasp_init_tx()
826 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); in bcmasp_init_tx()
827 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); in bcmasp_init_tx()
886 napi_disable(&intf->tx_napi); in bcmasp_netif_deinit()
897 } while (timeout-- > 0); in bcmasp_netif_deinit()
904 phy_stop(dev->phydev); in bcmasp_netif_deinit()
912 napi_disable(&intf->rx_napi); in bcmasp_netif_deinit()
919 netif_napi_del(&intf->tx_napi); in bcmasp_netif_deinit()
920 netif_napi_del(&intf->rx_napi); in bcmasp_netif_deinit()
936 phy_disconnect(dev->phydev); in bcmasp_stop()
939 if (intf->internal_phy) in bcmasp_stop()
947 clk_disable_unprepare(intf->parent->clk); in bcmasp_stop()
959 switch (intf->phy_interface) { in bcmasp_configure_port()
962 * (requires PCB or receiver-side delay) in bcmasp_configure_port()
979 if (intf->internal_phy) in bcmasp_configure_port()
993 phy_interface_t phy_iface = intf->phy_interface; in bcmasp_netif_init()
1004 if (intf->internal_phy) in bcmasp_netif_init()
1031 * Note that internal PHY and fixed-link configurations are not in bcmasp_netif_init()
1047 phydev = of_phy_connect(dev, intf->phy_dn, in bcmasp_netif_init()
1051 ret = -ENODEV; in bcmasp_netif_init()
1056 if (intf->internal_phy) in bcmasp_netif_init()
1057 dev->phydev->irq = PHY_MAC_INTERRUPT; in bcmasp_netif_init()
1060 phydev->mac_managed_pm = true; in bcmasp_netif_init()
1063 phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); in bcmasp_netif_init()
1070 umac_set_hw_addr(intf, dev->dev_addr); in bcmasp_netif_init()
1072 intf->old_duplex = -1; in bcmasp_netif_init()
1073 intf->old_link = -1; in bcmasp_netif_init()
1074 intf->old_pause = -1; in bcmasp_netif_init()
1077 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); in bcmasp_netif_init()
1081 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); in bcmasp_netif_init()
1084 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD); in bcmasp_netif_init()
1093 if (intf->internal_phy) in bcmasp_netif_init()
1111 ret = clk_prepare_enable(intf->parent->clk); in bcmasp_open()
1117 clk_disable_unprepare(intf->parent->clk); in bcmasp_open()
1134 intf->mib.tx_timeout_cnt++; in bcmasp_tx_timeout()
1142 if (snprintf(name, len, "p%d", intf->port) >= len) in bcmasp_get_phys_port_name()
1143 return -EINVAL; in bcmasp_get_phys_port_name()
1155 lstats = &intf->stats64; in bcmasp_get_stats64()
1158 start = u64_stats_fetch_begin(&lstats->syncp); in bcmasp_get_stats64()
1159 stats->rx_packets = u64_stats_read(&lstats->rx_packets); in bcmasp_get_stats64()
1160 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes); in bcmasp_get_stats64()
1161 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped); in bcmasp_get_stats64()
1162 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs); in bcmasp_get_stats64()
1163 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs); in bcmasp_get_stats64()
1164 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; in bcmasp_get_stats64()
1166 stats->tx_packets = u64_stats_read(&lstats->tx_packets); in bcmasp_get_stats64()
1167 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes); in bcmasp_get_stats64()
1168 } while (u64_stats_fetch_retry(&lstats->syncp, start)); in bcmasp_get_stats64()
1186 intf->res.umac = priv->base + UMC_OFFSET(intf); in bcmasp_map_res()
1187 intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset + in bcmasp_map_res()
1188 (intf->port * 0x4)); in bcmasp_map_res()
1189 intf->res.rgmii = priv->base + RGMII_OFFSET(intf); in bcmasp_map_res()
1192 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf); in bcmasp_map_res()
1193 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf); in bcmasp_map_res()
1194 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf); in bcmasp_map_res()
1195 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf); in bcmasp_map_res()
1196 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf); in bcmasp_map_res()
1198 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf); in bcmasp_map_res()
1199 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); in bcmasp_map_res()
1205 struct device *dev = &priv->pdev->dev; in bcmasp_interface_create()
1211 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name); in bcmasp_interface_create()
1216 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name); in bcmasp_interface_create()
1222 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name); in bcmasp_interface_create()
1227 intf->parent = priv; in bcmasp_interface_create()
1228 intf->ndev = ndev; in bcmasp_interface_create()
1229 intf->channel = ch; in bcmasp_interface_create()
1230 intf->port = port; in bcmasp_interface_create()
1231 intf->ndev_dn = ndev_dn; in bcmasp_interface_create()
1232 intf->index = i; in bcmasp_interface_create()
1234 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface); in bcmasp_interface_create()
1240 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL) in bcmasp_interface_create()
1241 intf->internal_phy = true; in bcmasp_interface_create()
1243 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0); in bcmasp_interface_create()
1244 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) { in bcmasp_interface_create()
1248 ndev_dn->name); in bcmasp_interface_create()
1251 intf->phy_dn = ndev_dn; in bcmasp_interface_create()
1257 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) && in bcmasp_interface_create()
1258 intf->phy_interface != PHY_INTERFACE_MODE_MII && in bcmasp_interface_create()
1259 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) || in bcmasp_interface_create()
1260 (intf->port != 1 && intf->internal_phy)) { in bcmasp_interface_create()
1261 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", in bcmasp_interface_create()
1262 phy_modes(intf->phy_interface), intf->port); in bcmasp_interface_create()
1263 ret = -EINVAL; in bcmasp_interface_create()
1274 intf->ops = &bcmasp_intf_ops; in bcmasp_interface_create()
1275 ndev->netdev_ops = &bcmasp_netdev_ops; in bcmasp_interface_create()
1276 ndev->ethtool_ops = &bcmasp_ethtool_ops; in bcmasp_interface_create()
1277 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | in bcmasp_interface_create()
1280 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | in bcmasp_interface_create()
1282 ndev->hw_features |= ndev->features; in bcmasp_interface_create()
1283 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); in bcmasp_interface_create()
1297 if (intf->ndev->reg_state == NETREG_REGISTERED) in bcmasp_interface_destroy()
1298 unregister_netdev(intf->ndev); in bcmasp_interface_destroy()
1299 if (of_phy_is_fixed_link(intf->ndev_dn)) in bcmasp_interface_destroy()
1300 of_phy_deregister_fixed_link(intf->ndev_dn); in bcmasp_interface_destroy()
1301 free_netdev(intf->ndev); in bcmasp_interface_destroy()
1306 struct net_device *ndev = intf->ndev; in bcmasp_suspend_to_wol()
1310 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) in bcmasp_suspend_to_wol()
1313 if (intf->wolopts & WAKE_MAGICSECURE) { in bcmasp_suspend_to_wol()
1315 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]), in bcmasp_suspend_to_wol()
1317 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]), in bcmasp_suspend_to_wol()
1323 if (intf->wolopts & WAKE_FILTER) in bcmasp_suspend_to_wol()
1326 /* Bring UniMAC out of reset if needed and enable RX */ in bcmasp_suspend_to_wol()
1336 if (intf->parent->wol_irq > 0) { in bcmasp_suspend_to_wol()
1337 wakeup_intr2_core_wl(intf->parent, 0xffffffff, in bcmasp_suspend_to_wol()
1341 if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled && in bcmasp_suspend_to_wol()
1342 intf->parent->eee_fixup) in bcmasp_suspend_to_wol()
1343 intf->parent->eee_fixup(intf, true); in bcmasp_suspend_to_wol()
1350 struct device *kdev = &intf->parent->pdev->dev; in bcmasp_interface_suspend()
1351 struct net_device *dev = intf->ndev; in bcmasp_interface_suspend()
1360 if (!intf->wolopts) { in bcmasp_interface_suspend()
1361 if (intf->internal_phy) in bcmasp_interface_suspend()
1366 /* If Wake-on-LAN is disabled, we can safely in bcmasp_interface_suspend()
1372 if (device_may_wakeup(kdev) && intf->wolopts) in bcmasp_interface_suspend()
1375 clk_disable_unprepare(intf->parent->clk); in bcmasp_interface_suspend()
1384 if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled && in bcmasp_resume_from_wol()
1385 intf->parent->eee_fixup) in bcmasp_resume_from_wol()
1386 intf->parent->eee_fixup(intf, false); in bcmasp_resume_from_wol()
1392 if (intf->parent->wol_irq > 0) { in bcmasp_resume_from_wol()
1393 wakeup_intr2_core_wl(intf->parent, 0xffffffff, in bcmasp_resume_from_wol()
1400 struct net_device *dev = intf->ndev; in bcmasp_interface_resume()
1406 ret = clk_prepare_enable(intf->parent->clk); in bcmasp_interface_resume()
1421 clk_disable_unprepare(intf->parent->clk); in bcmasp_interface_resume()