Lines Matching +full:systemport +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0-only
30 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
35 if (priv->is_lite && off >= RDMA_STATUS) in rdma_readl()
37 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off); in rdma_readl()
42 if (priv->is_lite && off >= RDMA_STATUS) in rdma_writel()
44 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off); in rdma_writel()
49 if (!priv->is_lite) { in tdma_control_bit()
59 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
60 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
66 priv->irq##which##_mask &= ~(mask); \
73 priv->irq##which##_mask |= (mask); \
80 * nanoseconds), so keep the check for 64-bits explicit here to save
81 * one register write per-packet on 32-bits platforms.
101 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); in bcm_sysport_set_rx_csum()
107 if (priv->rx_chk_en) in bcm_sysport_set_rx_csum()
113 * a valid CHK bit to be set in the per-packet status word in bcm_sysport_set_rx_csum()
115 if (priv->rx_chk_en && priv->crc_fwd) in bcm_sysport_set_rx_csum()
121 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom in bcm_sysport_set_rx_csum()
141 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in bcm_sysport_set_tx_csum()
144 if (priv->tsb_en) in bcm_sysport_set_tx_csum()
169 ret = clk_prepare_enable(priv->clk); in bcm_sysport_set_features()
174 if (!priv->is_lite) in bcm_sysport_set_features()
175 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); in bcm_sysport_set_features()
177 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) & in bcm_sysport_set_features()
183 clk_disable_unprepare(priv->clk); in bcm_sysport_set_features()
281 /* Per TX-queue statistics are dynamically appended */
289 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); in bcm_sysport_get_drvinfo()
290 strscpy(info->bus_info, "platform", sizeof(info->bus_info)); in bcm_sysport_get_drvinfo()
297 return priv->msg_enable; in bcm_sysport_get_msglvl()
304 priv->msg_enable = enable; in bcm_sysport_set_msglvl()
332 if (priv->is_lite && in bcm_sysport_get_sset_count()
333 !bcm_sysport_lite_stat_valid(s->type)) in bcm_sysport_get_sset_count()
337 /* Include per-queue statistics */ in bcm_sysport_get_sset_count()
338 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_sset_count()
340 return -EOPNOTSUPP; in bcm_sysport_get_sset_count()
355 if (priv->is_lite && in bcm_sysport_get_strings()
356 !bcm_sysport_lite_stat_valid(s->type)) in bcm_sysport_get_strings()
359 ethtool_puts(&data, s->stat_string); in bcm_sysport_get_strings()
362 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_strings()
383 switch (s->type) { in bcm_sysport_update_mib_counters()
391 if (priv->is_lite) in bcm_sysport_update_mib_counters()
394 if (s->type != BCM_SYSPORT_STAT_MIB_RX) in bcm_sysport_update_mib_counters()
399 val = rxchk_readl(priv, s->reg_offset); in bcm_sysport_update_mib_counters()
401 rxchk_writel(priv, 0, s->reg_offset); in bcm_sysport_update_mib_counters()
404 val = rbuf_readl(priv, s->reg_offset); in bcm_sysport_update_mib_counters()
406 rbuf_writel(priv, 0, s->reg_offset); in bcm_sysport_update_mib_counters()
409 if (!priv->is_lite) in bcm_sysport_update_mib_counters()
412 val = rdma_readl(priv, s->reg_offset); in bcm_sysport_update_mib_counters()
414 rdma_writel(priv, 0, s->reg_offset); in bcm_sysport_update_mib_counters()
418 j += s->stat_sizeof; in bcm_sysport_update_mib_counters()
419 p = (char *)priv + s->stat_offset; in bcm_sysport_update_mib_counters()
423 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); in bcm_sysport_update_mib_counters()
434 for (q = 0; q < priv->netdev->num_tx_queues; q++) { in bcm_sysport_update_tx_stats()
435 ring = &priv->tx_rings[q]; in bcm_sysport_update_tx_stats()
437 start = u64_stats_fetch_begin(&priv->syncp); in bcm_sysport_update_tx_stats()
438 bytes = ring->bytes; in bcm_sysport_update_tx_stats()
439 packets = ring->packets; in bcm_sysport_update_tx_stats()
440 } while (u64_stats_fetch_retry(&priv->syncp, start)); in bcm_sysport_update_tx_stats()
451 struct bcm_sysport_stats64 *stats64 = &priv->stats64; in bcm_sysport_get_stats()
452 struct u64_stats_sync *syncp = &priv->syncp; in bcm_sysport_get_stats()
461 stats64->tx_bytes = tx_bytes; in bcm_sysport_get_stats()
462 stats64->tx_packets = tx_packets; in bcm_sysport_get_stats()
470 if (s->type == BCM_SYSPORT_STAT_NETDEV) in bcm_sysport_get_stats()
471 p = (char *)&dev->stats; in bcm_sysport_get_stats()
472 else if (s->type == BCM_SYSPORT_STAT_NETDEV64) in bcm_sysport_get_stats()
477 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) in bcm_sysport_get_stats()
479 p += s->stat_offset; in bcm_sysport_get_stats()
481 if (s->stat_sizeof == sizeof(u64) && in bcm_sysport_get_stats()
482 s->type == BCM_SYSPORT_STAT_NETDEV64) { in bcm_sysport_get_stats()
492 /* For SYSTEMPORT Lite since we have holes in our statistics, j would in bcm_sysport_get_stats()
497 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) - in bcm_sysport_get_stats()
498 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_stats()
500 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_stats()
501 ring = &priv->tx_rings[i]; in bcm_sysport_get_stats()
502 data[j] = ring->packets; in bcm_sysport_get_stats()
504 data[j] = ring->bytes; in bcm_sysport_get_stats()
514 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; in bcm_sysport_get_wol()
515 wol->wolopts = priv->wolopts; in bcm_sysport_get_wol()
517 if (!(priv->wolopts & WAKE_MAGICSECURE)) in bcm_sysport_get_wol()
520 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); in bcm_sysport_get_wol()
527 struct device *kdev = &priv->pdev->dev; in bcm_sysport_set_wol()
531 return -ENOTSUPP; in bcm_sysport_set_wol()
533 if (wol->wolopts & ~supported) in bcm_sysport_set_wol()
534 return -EINVAL; in bcm_sysport_set_wol()
536 if (wol->wolopts & WAKE_MAGICSECURE) in bcm_sysport_set_wol()
537 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); in bcm_sysport_set_wol()
540 if (wol->wolopts) { in bcm_sysport_set_wol()
542 if (priv->wol_irq_disabled) in bcm_sysport_set_wol()
543 enable_irq_wake(priv->wol_irq); in bcm_sysport_set_wol()
544 priv->wol_irq_disabled = 0; in bcm_sysport_set_wol()
548 if (!priv->wol_irq_disabled) in bcm_sysport_set_wol()
549 disable_irq_wake(priv->wol_irq); in bcm_sysport_set_wol()
550 priv->wol_irq_disabled = 1; in bcm_sysport_set_wol()
553 priv->wolopts = wol->wolopts; in bcm_sysport_set_wol()
574 struct bcm_sysport_priv *priv = ring->priv; in bcm_sysport_set_tx_coalesce()
577 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); in bcm_sysport_set_tx_coalesce()
580 reg |= ec->tx_max_coalesced_frames; in bcm_sysport_set_tx_coalesce()
581 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << in bcm_sysport_set_tx_coalesce()
583 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); in bcm_sysport_set_tx_coalesce()
596 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; in bcm_sysport_get_coalesce()
597 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; in bcm_sysport_get_coalesce()
601 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; in bcm_sysport_get_coalesce()
602 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; in bcm_sysport_get_coalesce()
603 ec->use_adaptive_rx_coalesce = priv->dim.use_dim; in bcm_sysport_get_coalesce()
622 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK || in bcm_sysport_set_coalesce()
623 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 || in bcm_sysport_set_coalesce()
624 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK || in bcm_sysport_set_coalesce()
625 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1) in bcm_sysport_set_coalesce()
626 return -EINVAL; in bcm_sysport_set_coalesce()
628 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || in bcm_sysport_set_coalesce()
629 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) in bcm_sysport_set_coalesce()
630 return -EINVAL; in bcm_sysport_set_coalesce()
632 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_set_coalesce()
633 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); in bcm_sysport_set_coalesce()
635 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; in bcm_sysport_set_coalesce()
636 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; in bcm_sysport_set_coalesce()
637 usecs = priv->rx_coalesce_usecs; in bcm_sysport_set_coalesce()
638 pkts = priv->rx_max_coalesced_frames; in bcm_sysport_set_coalesce()
640 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { in bcm_sysport_set_coalesce()
641 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); in bcm_sysport_set_coalesce()
646 priv->dim.use_dim = ec->use_adaptive_rx_coalesce; in bcm_sysport_set_coalesce()
656 dev_consume_skb_any(cb->skb); in bcm_sysport_free_cb()
657 cb->skb = NULL; in bcm_sysport_free_cb()
664 struct device *kdev = &priv->pdev->dev; in bcm_sysport_rx_refill()
665 struct net_device *ndev = priv->netdev; in bcm_sysport_rx_refill()
670 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH, in bcm_sysport_rx_refill()
673 priv->mib.alloc_rx_buff_failed++; in bcm_sysport_rx_refill()
678 mapping = dma_map_single(kdev, skb->data, in bcm_sysport_rx_refill()
681 priv->mib.rx_dma_failed++; in bcm_sysport_rx_refill()
688 rx_skb = cb->skb; in bcm_sysport_rx_refill()
694 cb->skb = skb; in bcm_sysport_rx_refill()
696 dma_desc_set_addr(priv, cb->bd_addr, mapping); in bcm_sysport_rx_refill()
710 for (i = 0; i < priv->num_rx_bds; i++) { in bcm_sysport_alloc_rx_bufs()
711 cb = &priv->rx_cbs[i]; in bcm_sysport_alloc_rx_bufs()
714 if (!cb->skb) in bcm_sysport_alloc_rx_bufs()
715 return -ENOMEM; in bcm_sysport_alloc_rx_bufs()
725 struct bcm_sysport_stats64 *stats64 = &priv->stats64; in bcm_sysport_desc_rx()
726 struct net_device *ndev = priv->netdev; in bcm_sysport_desc_rx()
738 /* Determine how much we should process since last call, SYSTEMPORT Lite in bcm_sysport_desc_rx()
739 * groups the producer and consumer indexes into the same 32-bit in bcm_sysport_desc_rx()
742 if (!priv->is_lite) in bcm_sysport_desc_rx()
748 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK; in bcm_sysport_desc_rx()
752 p_index, priv->rx_c_index, to_process); in bcm_sysport_desc_rx()
755 cb = &priv->rx_cbs[priv->rx_read_ptr]; in bcm_sysport_desc_rx()
766 ndev->stats.rx_dropped++; in bcm_sysport_desc_rx()
767 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
772 rsb = (struct bcm_rsb *)skb->data; in bcm_sysport_desc_rx()
773 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; in bcm_sysport_desc_rx()
774 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & in bcm_sysport_desc_rx()
779 p_index, priv->rx_c_index, priv->rx_read_ptr, in bcm_sysport_desc_rx()
784 ndev->stats.rx_length_errors++; in bcm_sysport_desc_rx()
785 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
792 ndev->stats.rx_dropped++; in bcm_sysport_desc_rx()
793 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
801 ndev->stats.rx_over_errors++; in bcm_sysport_desc_rx()
802 ndev->stats.rx_dropped++; in bcm_sysport_desc_rx()
803 ndev->stats.rx_errors++; in bcm_sysport_desc_rx()
812 skb->ip_summed = CHECKSUM_UNNECESSARY; in bcm_sysport_desc_rx()
814 /* Hardware pre-pends packets with 2bytes before Ethernet in bcm_sysport_desc_rx()
819 len -= (sizeof(*rsb) + 2); in bcm_sysport_desc_rx()
823 if (priv->crc_fwd) { in bcm_sysport_desc_rx()
824 skb_trim(skb, len - ETH_FCS_LEN); in bcm_sysport_desc_rx()
825 len -= ETH_FCS_LEN; in bcm_sysport_desc_rx()
828 skb->protocol = eth_type_trans(skb, ndev); in bcm_sysport_desc_rx()
829 ndev->stats.rx_packets++; in bcm_sysport_desc_rx()
830 ndev->stats.rx_bytes += len; in bcm_sysport_desc_rx()
831 u64_stats_update_begin(&priv->syncp); in bcm_sysport_desc_rx()
832 stats64->rx_packets++; in bcm_sysport_desc_rx()
833 stats64->rx_bytes += len; in bcm_sysport_desc_rx()
834 u64_stats_update_end(&priv->syncp); in bcm_sysport_desc_rx()
836 napi_gro_receive(&priv->napi, skb); in bcm_sysport_desc_rx()
839 priv->rx_read_ptr++; in bcm_sysport_desc_rx()
841 if (priv->rx_read_ptr == priv->num_rx_bds) in bcm_sysport_desc_rx()
842 priv->rx_read_ptr = 0; in bcm_sysport_desc_rx()
845 priv->dim.packets = processed; in bcm_sysport_desc_rx()
846 priv->dim.bytes = processed_bytes; in bcm_sysport_desc_rx()
856 struct bcm_sysport_priv *priv = ring->priv; in bcm_sysport_tx_reclaim_one()
857 struct device *kdev = &priv->pdev->dev; in bcm_sysport_tx_reclaim_one()
859 if (cb->skb) { in bcm_sysport_tx_reclaim_one()
860 *bytes_compl += cb->skb->len; in bcm_sysport_tx_reclaim_one()
880 struct net_device *ndev = priv->netdev; in __bcm_sysport_tx_reclaim()
888 if (!ring->priv->is_lite) in __bcm_sysport_tx_reclaim()
889 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR); in __bcm_sysport_tx_reclaim()
891 intrl2_0_writel(ring->priv, BIT(ring->index + in __bcm_sysport_tx_reclaim()
895 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); in __bcm_sysport_tx_reclaim()
897 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; in __bcm_sysport_tx_reclaim()
901 ring->index, ring->c_index, c_index, txbds_ready); in __bcm_sysport_tx_reclaim()
904 cb = &ring->cbs[ring->clean_index]; in __bcm_sysport_tx_reclaim()
907 ring->desc_count++; in __bcm_sysport_tx_reclaim()
910 if (likely(ring->clean_index < ring->size - 1)) in __bcm_sysport_tx_reclaim()
911 ring->clean_index++; in __bcm_sysport_tx_reclaim()
913 ring->clean_index = 0; in __bcm_sysport_tx_reclaim()
916 u64_stats_update_begin(&priv->syncp); in __bcm_sysport_tx_reclaim()
917 ring->packets += pkts_compl; in __bcm_sysport_tx_reclaim()
918 ring->bytes += bytes_compl; in __bcm_sysport_tx_reclaim()
919 u64_stats_update_end(&priv->syncp); in __bcm_sysport_tx_reclaim()
921 ring->c_index = c_index; in __bcm_sysport_tx_reclaim()
925 ring->index, ring->c_index, pkts_compl, bytes_compl); in __bcm_sysport_tx_reclaim()
930 /* Locked version of the per-ring TX reclaim routine */
938 txq = netdev_get_tx_queue(priv->netdev, ring->index); in bcm_sysport_tx_reclaim()
940 spin_lock_irqsave(&ring->lock, flags); in bcm_sysport_tx_reclaim()
945 spin_unlock_irqrestore(&ring->lock, flags); in bcm_sysport_tx_reclaim()
950 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
956 spin_lock_irqsave(&ring->lock, flags); in bcm_sysport_tx_clean()
958 spin_unlock_irqrestore(&ring->lock, flags); in bcm_sysport_tx_clean()
967 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); in bcm_sysport_tx_poll()
971 /* re-enable TX interrupt */ in bcm_sysport_tx_poll()
972 if (!ring->priv->is_lite) in bcm_sysport_tx_poll()
973 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); in bcm_sysport_tx_poll()
975 intrl2_0_mask_clear(ring->priv, BIT(ring->index + in bcm_sysport_tx_poll()
988 for (q = 0; q < priv->netdev->num_tx_queues; q++) in bcm_sysport_tx_reclaim_all()
989 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); in bcm_sysport_tx_reclaim_all()
1001 priv->rx_c_index += work_done; in bcm_sysport_poll()
1002 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; in bcm_sysport_poll()
1004 /* SYSTEMPORT Lite groups the producer/consumer index, producer is in bcm_sysport_poll()
1008 if (!priv->is_lite) in bcm_sysport_poll()
1009 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); in bcm_sysport_poll()
1011 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX); in bcm_sysport_poll()
1015 /* re-enable RX interrupts */ in bcm_sysport_poll()
1019 if (priv->dim.use_dim) { in bcm_sysport_poll()
1020 dim_update_sample(priv->dim.event_ctr, priv->dim.packets, in bcm_sysport_poll()
1021 priv->dim.bytes, &dim_sample); in bcm_sysport_poll()
1022 net_dim(&priv->dim.dim, &dim_sample); in bcm_sysport_poll()
1039 if (priv->is_lite) in mpd_enable_set()
1066 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { in bcm_sysport_resume_from_wol()
1067 rxchk_writel(priv, priv->filters_loc[index] << in bcm_sysport_resume_from_wol()
1077 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); in bcm_sysport_resume_from_wol()
1082 netdev_info(priv->netdev, in bcm_sysport_resume_from_wol()
1083 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); in bcm_sysport_resume_from_wol()
1086 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); in bcm_sysport_resume_from_wol()
1096 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode, in bcm_sysport_dim_work()
1097 dim->profile_ix); in bcm_sysport_dim_work()
1100 dim->state = DIM_START_MEASURE; in bcm_sysport_dim_work()
1111 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & in bcm_sysport_rx_isr()
1113 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); in bcm_sysport_rx_isr()
1115 if (unlikely(priv->irq0_stat == 0)) { in bcm_sysport_rx_isr()
1116 netdev_warn(priv->netdev, "spurious RX interrupt\n"); in bcm_sysport_rx_isr()
1120 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { in bcm_sysport_rx_isr()
1121 priv->dim.event_ctr++; in bcm_sysport_rx_isr()
1122 if (likely(napi_schedule_prep(&priv->napi))) { in bcm_sysport_rx_isr()
1125 __napi_schedule_irqoff(&priv->napi); in bcm_sysport_rx_isr()
1132 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) in bcm_sysport_rx_isr()
1135 if (!priv->is_lite) in bcm_sysport_rx_isr()
1138 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_rx_isr()
1140 if (!(priv->irq0_stat & ring_bit)) in bcm_sysport_rx_isr()
1143 txr = &priv->tx_rings[ring]; in bcm_sysport_rx_isr()
1145 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_rx_isr()
1147 __napi_schedule(&txr->napi); in bcm_sysport_rx_isr()
1162 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & in bcm_sysport_tx_isr()
1166 if (unlikely(priv->irq1_stat == 0)) { in bcm_sysport_tx_isr()
1167 netdev_warn(priv->netdev, "spurious TX interrupt\n"); in bcm_sysport_tx_isr()
1171 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_tx_isr()
1172 if (!(priv->irq1_stat & BIT(ring))) in bcm_sysport_tx_isr()
1175 txr = &priv->tx_rings[ring]; in bcm_sysport_tx_isr()
1177 if (likely(napi_schedule_prep(&txr->napi))) { in bcm_sysport_tx_isr()
1179 __napi_schedule_irqoff(&txr->napi); in bcm_sysport_tx_isr()
1190 pm_wakeup_event(&priv->pdev->dev, 0); in bcm_sysport_wol_isr()
1200 disable_irq(priv->irq0); in bcm_sysport_poll_controller()
1201 bcm_sysport_rx_isr(priv->irq0, priv); in bcm_sysport_poll_controller()
1202 enable_irq(priv->irq0); in bcm_sysport_poll_controller()
1204 if (!priv->is_lite) { in bcm_sysport_poll_controller()
1205 disable_irq(priv->irq1); in bcm_sysport_poll_controller()
1206 bcm_sysport_tx_isr(priv->irq1, priv); in bcm_sysport_poll_controller()
1207 enable_irq(priv->irq1); in bcm_sysport_poll_controller()
1223 /* Re-allocate SKB if needed */ in bcm_sysport_insert_tsb()
1228 priv->mib.tx_realloc_tsb_failed++; in bcm_sysport_insert_tsb()
1229 dev->stats.tx_errors++; in bcm_sysport_insert_tsb()
1230 dev->stats.tx_dropped++; in bcm_sysport_insert_tsb()
1235 priv->mib.tx_realloc_tsb++; in bcm_sysport_insert_tsb()
1239 /* Zero-out TSB by default */ in bcm_sysport_insert_tsb()
1243 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK; in bcm_sysport_insert_tsb()
1244 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT; in bcm_sysport_insert_tsb()
1247 if (skb->ip_summed == CHECKSUM_PARTIAL) { in bcm_sysport_insert_tsb()
1248 ip_ver = skb->protocol; in bcm_sysport_insert_tsb()
1251 ip_proto = ip_hdr(skb)->protocol; in bcm_sysport_insert_tsb()
1254 ip_proto = ipv6_hdr(skb)->nexthdr; in bcm_sysport_insert_tsb()
1261 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); in bcm_sysport_insert_tsb()
1265 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; in bcm_sysport_insert_tsb()
1277 tsb->l4_ptr_dest_map = csum_info; in bcm_sysport_insert_tsb()
1287 struct device *kdev = &priv->pdev->dev; in bcm_sysport_xmit()
1300 ring = &priv->tx_rings[queue]; in bcm_sysport_xmit()
1303 spin_lock_irqsave(&ring->lock, flags); in bcm_sysport_xmit()
1304 if (unlikely(ring->desc_count == 0)) { in bcm_sysport_xmit()
1312 if (priv->tsb_en) { in bcm_sysport_xmit()
1320 skb_len = skb->len; in bcm_sysport_xmit()
1322 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); in bcm_sysport_xmit()
1324 priv->mib.tx_dma_failed++; in bcm_sysport_xmit()
1326 skb->data, skb_len); in bcm_sysport_xmit()
1333 cb = &ring->cbs[ring->curr_desc]; in bcm_sysport_xmit()
1334 cb->skb = skb; in bcm_sysport_xmit()
1343 if (skb->ip_summed == CHECKSUM_PARTIAL) in bcm_sysport_xmit()
1348 ring->curr_desc++; in bcm_sysport_xmit()
1349 if (ring->curr_desc == ring->size) in bcm_sysport_xmit()
1350 ring->curr_desc = 0; in bcm_sysport_xmit()
1351 ring->desc_count--; in bcm_sysport_xmit()
1354 spin_lock_irqsave(&priv->desc_lock, desc_flags); in bcm_sysport_xmit()
1355 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index)); in bcm_sysport_xmit()
1356 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index)); in bcm_sysport_xmit()
1357 spin_unlock_irqrestore(&priv->desc_lock, desc_flags); in bcm_sysport_xmit()
1360 if (ring->desc_count == 0) in bcm_sysport_xmit()
1364 ring->index, ring->desc_count, ring->curr_desc); in bcm_sysport_xmit()
1368 spin_unlock_irqrestore(&ring->lock, flags); in bcm_sysport_xmit()
1377 dev->stats.tx_errors++; in bcm_sysport_tx_timeout()
1386 struct phy_device *phydev = dev->phydev; in bcm_sysport_adj_link()
1390 if (priv->old_link != phydev->link) { in bcm_sysport_adj_link()
1392 priv->old_link = phydev->link; in bcm_sysport_adj_link()
1395 if (priv->old_duplex != phydev->duplex) { in bcm_sysport_adj_link()
1397 priv->old_duplex = phydev->duplex; in bcm_sysport_adj_link()
1400 if (priv->is_lite) in bcm_sysport_adj_link()
1403 switch (phydev->speed) { in bcm_sysport_adj_link()
1421 if (phydev->duplex == DUPLEX_HALF) in bcm_sysport_adj_link()
1424 if (priv->old_pause != phydev->pause) { in bcm_sysport_adj_link()
1426 priv->old_pause = phydev->pause; in bcm_sysport_adj_link()
1429 if (!phydev->pause) in bcm_sysport_adj_link()
1435 if (phydev->link) { in bcm_sysport_adj_link()
1451 struct bcm_sysport_net_dim *dim = &priv->dim; in bcm_sysport_init_dim()
1453 INIT_WORK(&dim->dim.work, cb); in bcm_sysport_init_dim()
1454 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in bcm_sysport_init_dim()
1455 dim->event_ctr = 0; in bcm_sysport_init_dim()
1456 dim->packets = 0; in bcm_sysport_init_dim()
1457 dim->bytes = 0; in bcm_sysport_init_dim()
1462 struct bcm_sysport_net_dim *dim = &priv->dim; in bcm_sysport_init_rx_coalesce()
1466 usecs = priv->rx_coalesce_usecs; in bcm_sysport_init_rx_coalesce()
1467 pkts = priv->rx_max_coalesced_frames; in bcm_sysport_init_rx_coalesce()
1469 /* If DIM was enabled, re-apply default parameters */ in bcm_sysport_init_rx_coalesce()
1470 if (dim->use_dim) { in bcm_sysport_init_rx_coalesce()
1471 moder = net_dim_get_def_rx_moderation(dim->dim.mode); in bcm_sysport_init_rx_coalesce()
1482 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_init_tx_ring()
1489 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); in bcm_sysport_init_tx_ring()
1490 if (!ring->cbs) { in bcm_sysport_init_tx_ring()
1491 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); in bcm_sysport_init_tx_ring()
1492 return -ENOMEM; in bcm_sysport_init_tx_ring()
1496 spin_lock_init(&ring->lock); in bcm_sysport_init_tx_ring()
1497 ring->priv = priv; in bcm_sysport_init_tx_ring()
1498 netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll); in bcm_sysport_init_tx_ring()
1499 ring->index = index; in bcm_sysport_init_tx_ring()
1500 ring->size = size; in bcm_sysport_init_tx_ring()
1501 ring->clean_index = 0; in bcm_sysport_init_tx_ring()
1502 ring->alloc_size = ring->size; in bcm_sysport_init_tx_ring()
1503 ring->desc_count = ring->size; in bcm_sysport_init_tx_ring()
1504 ring->curr_desc = 0; in bcm_sysport_init_tx_ring()
1515 if (ring->inspect) { in bcm_sysport_init_tx_ring()
1516 reg |= ring->switch_queue & RING_QID_MASK; in bcm_sysport_init_tx_ring()
1517 reg |= ring->switch_port << RING_PORT_ID_SHIFT; in bcm_sysport_init_tx_ring()
1523 /* Adjust the packet size calculations if SYSTEMPORT is responsible in bcm_sysport_init_tx_ring()
1526 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) in bcm_sysport_init_tx_ring()
1539 if (priv->is_lite) in bcm_sysport_init_tx_ring()
1551 tdma_writel(priv, ring->size | in bcm_sysport_init_tx_ring()
1560 napi_enable(&ring->napi); in bcm_sysport_init_tx_ring()
1562 netif_dbg(priv, hw, priv->netdev, in bcm_sysport_init_tx_ring()
1564 ring->size, ring->switch_queue, in bcm_sysport_init_tx_ring()
1565 ring->switch_port); in bcm_sysport_init_tx_ring()
1573 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; in bcm_sysport_fini_tx_ring()
1579 netdev_warn(priv->netdev, "TDMA not stopped!\n"); in bcm_sysport_fini_tx_ring()
1581 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could in bcm_sysport_fini_tx_ring()
1585 if (!ring->cbs) in bcm_sysport_fini_tx_ring()
1588 napi_disable(&ring->napi); in bcm_sysport_fini_tx_ring()
1589 netif_napi_del(&ring->napi); in bcm_sysport_fini_tx_ring()
1593 kfree(ring->cbs); in bcm_sysport_fini_tx_ring()
1594 ring->cbs = NULL; in bcm_sysport_fini_tx_ring()
1595 ring->size = 0; in bcm_sysport_fini_tx_ring()
1596 ring->alloc_size = 0; in bcm_sysport_fini_tx_ring()
1598 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); in bcm_sysport_fini_tx_ring()
1621 } while (timeout-- > 0); in rdma_enable_set()
1623 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); in rdma_enable_set()
1625 return -ETIMEDOUT; in rdma_enable_set()
1649 } while (timeout-- > 0); in tdma_enable_set()
1651 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); in tdma_enable_set()
1653 return -ETIMEDOUT; in tdma_enable_set()
1664 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC; in bcm_sysport_init_rx_ring()
1665 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; in bcm_sysport_init_rx_ring()
1666 priv->rx_c_index = 0; in bcm_sysport_init_rx_ring()
1667 priv->rx_read_ptr = 0; in bcm_sysport_init_rx_ring()
1668 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), in bcm_sysport_init_rx_ring()
1670 if (!priv->rx_cbs) { in bcm_sysport_init_rx_ring()
1671 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); in bcm_sysport_init_rx_ring()
1672 return -ENOMEM; in bcm_sysport_init_rx_ring()
1675 for (i = 0; i < priv->num_rx_bds; i++) { in bcm_sysport_init_rx_ring()
1676 cb = priv->rx_cbs + i; in bcm_sysport_init_rx_ring()
1677 cb->bd_addr = priv->rx_bds + i * DESC_SIZE; in bcm_sysport_init_rx_ring()
1682 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); in bcm_sysport_init_rx_ring()
1695 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | in bcm_sysport_init_rx_ring()
1701 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); in bcm_sysport_init_rx_ring()
1703 netif_dbg(priv, hw, priv->netdev, in bcm_sysport_init_rx_ring()
1705 priv->num_rx_bds, priv->rx_bds); in bcm_sysport_init_rx_ring()
1719 netdev_warn(priv->netdev, "RDMA not stopped!\n"); in bcm_sysport_fini_rx_ring()
1721 for (i = 0; i < priv->num_rx_bds; i++) { in bcm_sysport_fini_rx_ring()
1722 cb = &priv->rx_cbs[i]; in bcm_sysport_fini_rx_ring()
1724 dma_unmap_single(&priv->pdev->dev, in bcm_sysport_fini_rx_ring()
1730 kfree(priv->rx_cbs); in bcm_sysport_fini_rx_ring()
1731 priv->rx_cbs = NULL; in bcm_sysport_fini_rx_ring()
1733 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); in bcm_sysport_fini_rx_ring()
1741 if (priv->is_lite) in bcm_sysport_set_rx_mode()
1745 if (dev->flags & IFF_PROMISC) in bcm_sysport_set_rx_mode()
1752 if (dev->flags & IFF_ALLMULTI) in bcm_sysport_set_rx_mode()
1761 if (!priv->is_lite) { in umac_enable_set()
1777 /* UniMAC stops on a packet boundary, wait for a full-sized packet in umac_enable_set()
1788 if (priv->is_lite) in umac_reset()
1807 if (!priv->is_lite) { in umac_set_hw_addr()
1830 if (!is_valid_ether_addr(addr->sa_data)) in bcm_sysport_change_mac()
1831 return -EINVAL; in bcm_sysport_change_mac()
1833 eth_hw_addr_set(dev, addr->sa_data); in bcm_sysport_change_mac()
1841 umac_set_hw_addr(priv, dev->dev_addr); in bcm_sysport_change_mac()
1850 struct bcm_sysport_stats64 *stats64 = &priv->stats64; in bcm_sysport_get_stats64()
1853 netdev_stats_to_stats64(stats, &dev->stats); in bcm_sysport_get_stats64()
1855 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes, in bcm_sysport_get_stats64()
1856 &stats->tx_packets); in bcm_sysport_get_stats64()
1859 start = u64_stats_fetch_begin(&priv->syncp); in bcm_sysport_get_stats64()
1860 stats->rx_packets = stats64->rx_packets; in bcm_sysport_get_stats64()
1861 stats->rx_bytes = stats64->rx_bytes; in bcm_sysport_get_stats64()
1862 } while (u64_stats_fetch_retry(&priv->syncp, start)); in bcm_sysport_get_stats64()
1872 napi_enable(&priv->napi); in bcm_sysport_netif_start()
1877 phy_start(dev->phydev); in bcm_sysport_netif_start()
1880 if (!priv->is_lite) in bcm_sysport_netif_start()
1892 /* Set a correct RSB format on SYSTEMPORT Lite */ in rbuf_init()
1893 if (priv->is_lite) in rbuf_init()
1908 if (!priv->is_lite) { in bcm_sysport_mask_all_intrs()
1920 if (netdev_uses_dsa(priv->netdev)) { in gib_set_pad_extension()
1936 ret = clk_prepare_enable(priv->clk); in bcm_sysport_open()
1955 if (!priv->is_lite) in bcm_sysport_open()
1963 bcm_sysport_set_features(dev, dev->features); in bcm_sysport_open()
1966 umac_set_hw_addr(priv, dev->dev_addr); in bcm_sysport_open()
1968 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, in bcm_sysport_open()
1969 0, priv->phy_interface); in bcm_sysport_open()
1972 ret = -ENODEV; in bcm_sysport_open()
1977 phydev->mac_managed_pm = true; in bcm_sysport_open()
1980 priv->old_duplex = -1; in bcm_sysport_open()
1981 priv->old_link = -1; in bcm_sysport_open()
1982 priv->old_pause = -1; in bcm_sysport_open()
1987 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); in bcm_sysport_open()
1993 if (!priv->is_lite) { in bcm_sysport_open()
1994 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, in bcm_sysport_open()
1995 dev->name, dev); in bcm_sysport_open()
2003 spin_lock_init(&priv->desc_lock); in bcm_sysport_open()
2004 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_open()
2013 /* Initialize linked-list */ in bcm_sysport_open()
2047 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_open()
2049 if (!priv->is_lite) in bcm_sysport_open()
2050 free_irq(priv->irq1, dev); in bcm_sysport_open()
2052 free_irq(priv->irq0, dev); in bcm_sysport_open()
2056 clk_disable_unprepare(priv->clk); in bcm_sysport_open()
2066 napi_disable(&priv->napi); in bcm_sysport_netif_stop()
2067 cancel_work_sync(&priv->dim.dim.work); in bcm_sysport_netif_stop()
2068 phy_stop(dev->phydev); in bcm_sysport_netif_stop()
2104 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_stop()
2108 free_irq(priv->irq0, dev); in bcm_sysport_stop()
2109 if (!priv->is_lite) in bcm_sysport_stop()
2110 free_irq(priv->irq1, dev); in bcm_sysport_stop()
2113 phy_disconnect(dev->phydev); in bcm_sysport_stop()
2115 clk_disable_unprepare(priv->clk); in bcm_sysport_stop()
2126 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { in bcm_sysport_rule_find()
2134 return -EINVAL; in bcm_sysport_rule_find()
2143 index = bcm_sysport_rule_find(priv, nfc->fs.location); in bcm_sysport_rule_get()
2145 return -EOPNOTSUPP; in bcm_sysport_rule_get()
2147 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE; in bcm_sysport_rule_get()
2161 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK) in bcm_sysport_rule_set()
2162 return -E2BIG; in bcm_sysport_rule_set()
2164 /* We cannot support flows that are not destined for a wake-up */ in bcm_sysport_rule_set()
2165 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE) in bcm_sysport_rule_set()
2166 return -EOPNOTSUPP; in bcm_sysport_rule_set()
2168 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX); in bcm_sysport_rule_set()
2171 return -ENOSPC; in bcm_sysport_rule_set()
2178 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT; in bcm_sysport_rule_set()
2182 priv->filters_loc[index] = nfc->fs.location; in bcm_sysport_rule_set()
2183 set_bit(index, priv->filters); in bcm_sysport_rule_set()
2196 return -EOPNOTSUPP; in bcm_sysport_rule_del()
2201 clear_bit(index, priv->filters); in bcm_sysport_rule_del()
2202 priv->filters_loc[index] = 0; in bcm_sysport_rule_del()
2211 int ret = -EOPNOTSUPP; in bcm_sysport_get_rxnfc()
2213 switch (nfc->cmd) { in bcm_sysport_get_rxnfc()
2228 int ret = -EOPNOTSUPP; in bcm_sysport_set_rxnfc()
2230 switch (nfc->cmd) { in bcm_sysport_set_rxnfc()
2235 ret = bcm_sysport_rule_del(priv, nfc->fs.location); in bcm_sysport_set_rxnfc()
2279 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; in bcm_sysport_select_queue()
2284 return tx_ring->index; in bcm_sysport_select_queue()
2314 if (dp->ds->index) in bcm_sysport_map_queues()
2317 port = dp->index; in bcm_sysport_map_queues()
2319 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a in bcm_sysport_map_queues()
2321 * per-port (slave_dev) network devices queue, we achieve just that. in bcm_sysport_map_queues()
2325 if (priv->is_lite) in bcm_sysport_map_queues()
2327 slave_dev->num_tx_queues / 2); in bcm_sysport_map_queues()
2329 num_tx_queues = slave_dev->real_num_tx_queues; in bcm_sysport_map_queues()
2331 if (priv->per_port_num_tx_queues && in bcm_sysport_map_queues()
2332 priv->per_port_num_tx_queues != num_tx_queues) in bcm_sysport_map_queues()
2333 netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); in bcm_sysport_map_queues()
2335 priv->per_port_num_tx_queues = num_tx_queues; in bcm_sysport_map_queues()
2337 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; in bcm_sysport_map_queues()
2339 ring = &priv->tx_rings[q]; in bcm_sysport_map_queues()
2341 if (ring->inspect) in bcm_sysport_map_queues()
2347 ring->switch_queue = qp; in bcm_sysport_map_queues()
2348 ring->switch_port = port; in bcm_sysport_map_queues()
2349 ring->inspect = true; in bcm_sysport_map_queues()
2350 priv->ring_map[qp + port * num_tx_queues] = ring; in bcm_sysport_map_queues()
2366 port = dp->index; in bcm_sysport_unmap_queues()
2368 num_tx_queues = slave_dev->real_num_tx_queues; in bcm_sysport_unmap_queues()
2370 for (q = 0; q < dev->num_tx_queues; q++) { in bcm_sysport_unmap_queues()
2371 ring = &priv->tx_rings[q]; in bcm_sysport_unmap_queues()
2373 if (ring->switch_port != port) in bcm_sysport_unmap_queues()
2376 if (!ring->inspect) in bcm_sysport_unmap_queues()
2379 ring->inspect = false; in bcm_sysport_unmap_queues()
2380 qp = ring->switch_queue; in bcm_sysport_unmap_queues()
2381 priv->ring_map[qp + port * num_tx_queues] = NULL; in bcm_sysport_unmap_queues()
2396 if (priv->netdev != dev) in bcm_sysport_netdevice_event()
2401 if (dev->netdev_ops != &bcm_sysport_netdev_ops) in bcm_sysport_netdevice_event()
2404 if (!dsa_user_dev_check(info->upper_dev)) in bcm_sysport_netdevice_event()
2407 if (info->linking) in bcm_sysport_netdevice_event()
2408 ret = bcm_sysport_map_queues(dev, info->upper_dev); in bcm_sysport_netdevice_event()
2410 ret = bcm_sysport_unmap_queues(dev, info->upper_dev); in bcm_sysport_netdevice_event()
2420 [SYSTEMPORT] = {
2431 { .compatible = "brcm,systemportlite-v1.00",
2433 { .compatible = "brcm,systemport-v1.00",
2434 .data = &bcm_sysport_params[SYSTEMPORT] },
2435 { .compatible = "brcm,systemport",
2436 .data = &bcm_sysport_params[SYSTEMPORT] },
2451 dn = pdev->dev.of_node; in bcm_sysport_probe()
2453 if (!of_id || !of_id->data) in bcm_sysport_probe()
2454 return -EINVAL; in bcm_sysport_probe()
2456 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); in bcm_sysport_probe()
2458 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in bcm_sysport_probe()
2460 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret); in bcm_sysport_probe()
2465 params = of_id->data; in bcm_sysport_probe()
2468 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) in bcm_sysport_probe()
2470 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) in bcm_sysport_probe()
2475 return -EINVAL; in bcm_sysport_probe()
2479 return -ENOMEM; in bcm_sysport_probe()
2484 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport"); in bcm_sysport_probe()
2485 if (IS_ERR(priv->clk)) { in bcm_sysport_probe()
2486 ret = PTR_ERR(priv->clk); in bcm_sysport_probe()
2491 priv->tx_rings = devm_kcalloc(&pdev->dev, txq, in bcm_sysport_probe()
2494 if (!priv->tx_rings) { in bcm_sysport_probe()
2495 ret = -ENOMEM; in bcm_sysport_probe()
2499 priv->is_lite = params->is_lite; in bcm_sysport_probe()
2500 priv->num_rx_desc_words = params->num_rx_desc_words; in bcm_sysport_probe()
2502 priv->irq0 = platform_get_irq(pdev, 0); in bcm_sysport_probe()
2503 if (!priv->is_lite) { in bcm_sysport_probe()
2504 priv->irq1 = platform_get_irq(pdev, 1); in bcm_sysport_probe()
2505 priv->wol_irq = platform_get_irq_optional(pdev, 2); in bcm_sysport_probe()
2507 priv->wol_irq = platform_get_irq_optional(pdev, 1); in bcm_sysport_probe()
2509 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { in bcm_sysport_probe()
2510 ret = -EINVAL; in bcm_sysport_probe()
2514 priv->base = devm_platform_ioremap_resource(pdev, 0); in bcm_sysport_probe()
2515 if (IS_ERR(priv->base)) { in bcm_sysport_probe()
2516 ret = PTR_ERR(priv->base); in bcm_sysport_probe()
2520 priv->netdev = dev; in bcm_sysport_probe()
2521 priv->pdev = pdev; in bcm_sysport_probe()
2523 ret = of_get_phy_mode(dn, &priv->phy_interface); in bcm_sysport_probe()
2526 priv->phy_interface = PHY_INTERFACE_MODE_GMII; in bcm_sysport_probe()
2534 dev_err(&pdev->dev, "failed to register fixed PHY\n"); in bcm_sysport_probe()
2538 priv->phy_dn = dn; in bcm_sysport_probe()
2544 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); in bcm_sysport_probe()
2548 SET_NETDEV_DEV(dev, &pdev->dev); in bcm_sysport_probe()
2549 dev_set_drvdata(&pdev->dev, dev); in bcm_sysport_probe()
2550 dev->ethtool_ops = &bcm_sysport_ethtool_ops; in bcm_sysport_probe()
2551 dev->netdev_ops = &bcm_sysport_netdev_ops; in bcm_sysport_probe()
2552 netif_napi_add(dev, &priv->napi, bcm_sysport_poll); in bcm_sysport_probe()
2554 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | in bcm_sysport_probe()
2557 dev->hw_features |= dev->features; in bcm_sysport_probe()
2558 dev->vlan_features |= dev->features; in bcm_sysport_probe()
2559 dev->max_mtu = UMAC_MAX_MTU_SIZE; in bcm_sysport_probe()
2562 priv->wol_irq_disabled = 1; in bcm_sysport_probe()
2563 ret = devm_request_irq(&pdev->dev, priv->wol_irq, in bcm_sysport_probe()
2564 bcm_sysport_wol_isr, 0, dev->name, priv); in bcm_sysport_probe()
2566 device_set_wakeup_capable(&pdev->dev, 1); in bcm_sysport_probe()
2568 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol"); in bcm_sysport_probe()
2569 if (IS_ERR(priv->wol_clk)) { in bcm_sysport_probe()
2570 ret = PTR_ERR(priv->wol_clk); in bcm_sysport_probe()
2576 dev->needed_headroom += sizeof(struct bcm_tsb); in bcm_sysport_probe()
2581 priv->rx_max_coalesced_frames = 1; in bcm_sysport_probe()
2582 u64_stats_init(&priv->syncp); in bcm_sysport_probe()
2584 priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event; in bcm_sysport_probe()
2586 ret = register_netdevice_notifier(&priv->netdev_notifier); in bcm_sysport_probe()
2588 dev_err(&pdev->dev, "failed to register DSA notifier\n"); in bcm_sysport_probe()
2594 dev_err(&pdev->dev, "failed to register net_device\n"); in bcm_sysport_probe()
2598 ret = clk_prepare_enable(priv->clk); in bcm_sysport_probe()
2600 dev_err(&pdev->dev, "could not enable priv clock\n"); in bcm_sysport_probe()
2604 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; in bcm_sysport_probe()
2605 dev_info(&pdev->dev, in bcm_sysport_probe()
2606 "Broadcom SYSTEMPORT%s " REV_FMT in bcm_sysport_probe()
2608 priv->is_lite ? " Lite" : "", in bcm_sysport_probe()
2609 (priv->rev >> 8) & 0xff, priv->rev & 0xff, in bcm_sysport_probe()
2610 priv->irq0, priv->irq1, txq, rxq); in bcm_sysport_probe()
2612 clk_disable_unprepare(priv->clk); in bcm_sysport_probe()
2619 unregister_netdevice_notifier(&priv->netdev_notifier); in bcm_sysport_probe()
2630 struct net_device *dev = dev_get_drvdata(&pdev->dev); in bcm_sysport_remove()
2632 struct device_node *dn = pdev->dev.of_node; in bcm_sysport_remove()
2637 unregister_netdevice_notifier(&priv->netdev_notifier); in bcm_sysport_remove()
2642 dev_set_drvdata(&pdev->dev, NULL); in bcm_sysport_remove()
2647 struct net_device *ndev = priv->netdev; in bcm_sysport_suspend_to_wol()
2653 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) in bcm_sysport_suspend_to_wol()
2656 if (priv->wolopts & WAKE_MAGICSECURE) { in bcm_sysport_suspend_to_wol()
2658 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), in bcm_sysport_suspend_to_wol()
2660 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), in bcm_sysport_suspend_to_wol()
2666 if (priv->wolopts & WAKE_FILTER) { in bcm_sysport_suspend_to_wol()
2669 if (priv->is_lite) in bcm_sysport_suspend_to_wol()
2679 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { in bcm_sysport_suspend_to_wol()
2694 } while (timeout-- > 0); in bcm_sysport_suspend_to_wol()
2700 return -ETIMEDOUT; in bcm_sysport_suspend_to_wol()
2726 phy_suspend(dev->phydev); in bcm_sysport_suspend()
2738 if (priv->rx_chk_en) { in bcm_sysport_suspend()
2745 if (!priv->wolopts) in bcm_sysport_suspend()
2762 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_suspend()
2766 /* Get prepared for Wake-on-LAN */ in bcm_sysport_suspend()
2767 if (device_may_wakeup(d) && priv->wolopts) { in bcm_sysport_suspend()
2768 clk_prepare_enable(priv->wol_clk); in bcm_sysport_suspend()
2772 clk_disable_unprepare(priv->clk); in bcm_sysport_suspend()
2787 ret = clk_prepare_enable(priv->clk); in bcm_sysport_resume()
2793 if (priv->wolopts) in bcm_sysport_resume()
2794 clk_disable_unprepare(priv->wol_clk); in bcm_sysport_resume()
2807 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_resume()
2816 /* Initialize linked-list */ in bcm_sysport_resume()
2836 bcm_sysport_set_features(dev, dev->features); in bcm_sysport_resume()
2841 if (!priv->is_lite) in bcm_sysport_resume()
2847 umac_set_hw_addr(priv, dev->dev_addr); in bcm_sysport_resume()
2862 phy_resume(dev->phydev); in bcm_sysport_resume()
2873 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_resume()
2875 clk_disable_unprepare(priv->clk); in bcm_sysport_resume()
2886 .name = "brcm-systemport",
2895 MODULE_ALIAS("platform:brcm-systemport");