Lines Matching full:bp
73 #define TX_RING_GAP(BP) \ argument
74 (B44_TX_RING_SIZE - (BP)->tx_pending)
75 #define TX_BUFFS_AVAIL(BP) \ argument
76 (((BP)->tx_cons <= (BP)->tx_prod) ? \
77 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
78 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg) in br32() argument
166 return ssb_read32(bp->sdev, reg); in br32()
169 static inline void bw32(const struct b44 *bp, in bw32() argument
172 ssb_write32(bp->sdev, reg, val); in bw32()
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg, in b44_wait_bit() argument
181 u32 val = br32(bp, reg); in b44_wait_bit()
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", in b44_wait_bit()
199 static inline void __b44_cam_write(struct b44 *bp, in __b44_cam_write() argument
208 bw32(bp, B44_CAM_DATA_LO, val); in __b44_cam_write()
212 bw32(bp, B44_CAM_DATA_HI, val); in __b44_cam_write()
213 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | in __b44_cam_write()
215 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); in __b44_cam_write()
218 static inline void __b44_disable_ints(struct b44 *bp) in __b44_disable_ints() argument
220 bw32(bp, B44_IMASK, 0); in __b44_disable_ints()
223 static void b44_disable_ints(struct b44 *bp) in b44_disable_ints() argument
225 __b44_disable_ints(bp); in b44_disable_ints()
228 br32(bp, B44_IMASK); in b44_disable_ints()
231 static void b44_enable_ints(struct b44 *bp) in b44_enable_ints() argument
233 bw32(bp, B44_IMASK, bp->imask); in b44_enable_ints()
236 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) in __b44_readphy() argument
240 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); in __b44_readphy()
241 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | in __b44_readphy()
246 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); in __b44_readphy()
247 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; in __b44_readphy()
252 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) in __b44_writephy() argument
254 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); in __b44_writephy()
255 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | in __b44_writephy()
261 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); in __b44_writephy()
264 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) in b44_readphy() argument
266 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_readphy()
269 return __b44_readphy(bp, bp->phy_addr, reg, val); in b44_readphy()
272 static inline int b44_writephy(struct b44 *bp, int reg, u32 val) in b44_writephy() argument
274 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_writephy()
277 return __b44_writephy(bp, bp->phy_addr, reg, val); in b44_writephy()
284 struct b44 *bp = netdev_priv(dev); in b44_mdio_read_mii() local
285 int rc = __b44_readphy(bp, phy_id, location, &val); in b44_mdio_read_mii()
294 struct b44 *bp = netdev_priv(dev); in b44_mdio_write_mii() local
295 __b44_writephy(bp, phy_id, location, val); in b44_mdio_write_mii()
301 struct b44 *bp = bus->priv; in b44_mdio_read_phylib() local
302 int rc = __b44_readphy(bp, phy_id, location, &val); in b44_mdio_read_phylib()
311 struct b44 *bp = bus->priv; in b44_mdio_write_phylib() local
312 return __b44_writephy(bp, phy_id, location, val); in b44_mdio_write_phylib()
315 static int b44_phy_reset(struct b44 *bp) in b44_phy_reset() argument
320 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_phy_reset()
322 err = b44_writephy(bp, MII_BMCR, BMCR_RESET); in b44_phy_reset()
326 err = b44_readphy(bp, MII_BMCR, &val); in b44_phy_reset()
329 netdev_err(bp->dev, "PHY Reset would not complete\n"); in b44_phy_reset()
337 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) in __b44_set_flow_ctrl() argument
341 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); in __b44_set_flow_ctrl()
342 bp->flags |= pause_flags; in __b44_set_flow_ctrl()
344 val = br32(bp, B44_RXCONFIG); in __b44_set_flow_ctrl()
349 bw32(bp, B44_RXCONFIG, val); in __b44_set_flow_ctrl()
351 val = br32(bp, B44_MAC_FLOW); in __b44_set_flow_ctrl()
357 bw32(bp, B44_MAC_FLOW, val); in __b44_set_flow_ctrl()
360 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) in b44_set_flow_ctrl() argument
376 __b44_set_flow_ctrl(bp, pause_enab); in b44_set_flow_ctrl()
381 static void b44_wap54g10_workaround(struct b44 *bp) in b44_wap54g10_workaround() argument
395 err = __b44_readphy(bp, 0, MII_BMCR, &val); in b44_wap54g10_workaround()
401 err = __b44_writephy(bp, 0, MII_BMCR, val); in b44_wap54g10_workaround()
410 static inline void b44_wap54g10_workaround(struct b44 *bp) in b44_wap54g10_workaround() argument
415 static int b44_setup_phy(struct b44 *bp) in b44_setup_phy() argument
420 b44_wap54g10_workaround(bp); in b44_setup_phy()
422 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_setup_phy()
424 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) in b44_setup_phy()
426 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, in b44_setup_phy()
429 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) in b44_setup_phy()
431 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, in b44_setup_phy()
435 if (!(bp->flags & B44_FLAG_FORCE_LINK)) { in b44_setup_phy()
438 if (bp->flags & B44_FLAG_ADV_10HALF) in b44_setup_phy()
440 if (bp->flags & B44_FLAG_ADV_10FULL) in b44_setup_phy()
442 if (bp->flags & B44_FLAG_ADV_100HALF) in b44_setup_phy()
444 if (bp->flags & B44_FLAG_ADV_100FULL) in b44_setup_phy()
447 if (bp->flags & B44_FLAG_PAUSE_AUTO) in b44_setup_phy()
450 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) in b44_setup_phy()
452 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | in b44_setup_phy()
458 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) in b44_setup_phy()
461 if (bp->flags & B44_FLAG_100_BASE_T) in b44_setup_phy()
463 if (bp->flags & B44_FLAG_FULL_DUPLEX) in b44_setup_phy()
465 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) in b44_setup_phy()
472 b44_set_flow_ctrl(bp, 0, 0); in b44_setup_phy()
479 static void b44_stats_update(struct b44 *bp) in b44_stats_update() argument
484 val = &bp->hw_stats.tx_good_octets; in b44_stats_update()
485 u64_stats_update_begin(&bp->hw_stats.syncp); in b44_stats_update()
488 *val++ += br32(bp, reg); in b44_stats_update()
492 *val++ += br32(bp, reg); in b44_stats_update()
495 u64_stats_update_end(&bp->hw_stats.syncp); in b44_stats_update()
498 static void b44_link_report(struct b44 *bp) in b44_link_report() argument
500 if (!netif_carrier_ok(bp->dev)) { in b44_link_report()
501 netdev_info(bp->dev, "Link is down\n"); in b44_link_report()
503 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", in b44_link_report()
504 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, in b44_link_report()
505 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); in b44_link_report()
507 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", in b44_link_report()
508 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", in b44_link_report()
509 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); in b44_link_report()
513 static void b44_check_phy(struct b44 *bp) in b44_check_phy() argument
517 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { in b44_check_phy()
518 bp->flags |= B44_FLAG_100_BASE_T; in b44_check_phy()
519 if (!netif_carrier_ok(bp->dev)) { in b44_check_phy()
520 u32 val = br32(bp, B44_TX_CTRL); in b44_check_phy()
521 if (bp->flags & B44_FLAG_FULL_DUPLEX) in b44_check_phy()
525 bw32(bp, B44_TX_CTRL, val); in b44_check_phy()
526 netif_carrier_on(bp->dev); in b44_check_phy()
527 b44_link_report(bp); in b44_check_phy()
532 if (!b44_readphy(bp, MII_BMSR, &bmsr) && in b44_check_phy()
533 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && in b44_check_phy()
536 bp->flags |= B44_FLAG_100_BASE_T; in b44_check_phy()
538 bp->flags &= ~B44_FLAG_100_BASE_T; in b44_check_phy()
540 bp->flags |= B44_FLAG_FULL_DUPLEX; in b44_check_phy()
542 bp->flags &= ~B44_FLAG_FULL_DUPLEX; in b44_check_phy()
544 if (!netif_carrier_ok(bp->dev) && in b44_check_phy()
546 u32 val = br32(bp, B44_TX_CTRL); in b44_check_phy()
549 if (bp->flags & B44_FLAG_FULL_DUPLEX) in b44_check_phy()
553 bw32(bp, B44_TX_CTRL, val); in b44_check_phy()
555 if (!(bp->flags & B44_FLAG_FORCE_LINK) && in b44_check_phy()
556 !b44_readphy(bp, MII_ADVERTISE, &local_adv) && in b44_check_phy()
557 !b44_readphy(bp, MII_LPA, &remote_adv)) in b44_check_phy()
558 b44_set_flow_ctrl(bp, local_adv, remote_adv); in b44_check_phy()
561 netif_carrier_on(bp->dev); in b44_check_phy()
562 b44_link_report(bp); in b44_check_phy()
563 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { in b44_check_phy()
565 netif_carrier_off(bp->dev); in b44_check_phy()
566 b44_link_report(bp); in b44_check_phy()
570 netdev_warn(bp->dev, "Remote fault detected in PHY\n"); in b44_check_phy()
572 netdev_warn(bp->dev, "Jabber detected in PHY\n"); in b44_check_phy()
578 struct b44 *bp = timer_container_of(bp, t, timer); in b44_timer() local
580 spin_lock_irq(&bp->lock); in b44_timer()
582 b44_check_phy(bp); in b44_timer()
584 b44_stats_update(bp); in b44_timer()
586 spin_unlock_irq(&bp->lock); in b44_timer()
588 mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); in b44_timer()
591 static void b44_tx(struct b44 *bp) in b44_tx() argument
596 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; in b44_tx()
600 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { in b44_tx()
601 struct ring_info *rp = &bp->tx_buffers[cons]; in b44_tx()
606 dma_unmap_single(bp->sdev->dma_dev, in b44_tx()
618 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); in b44_tx()
619 bp->tx_cons = cons; in b44_tx()
620 if (netif_queue_stopped(bp->dev) && in b44_tx()
621 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) in b44_tx()
622 netif_wake_queue(bp->dev); in b44_tx()
624 bw32(bp, B44_GPTIMER, 0); in b44_tx()
632 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) in b44_alloc_rx_skb() argument
644 src_map = &bp->rx_buffers[src_idx]; in b44_alloc_rx_skb()
646 map = &bp->rx_buffers[dest_idx]; in b44_alloc_rx_skb()
647 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); in b44_alloc_rx_skb()
651 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, in b44_alloc_rx_skb()
657 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || in b44_alloc_rx_skb()
660 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) in b44_alloc_rx_skb()
661 dma_unmap_single(bp->sdev->dma_dev, mapping, in b44_alloc_rx_skb()
667 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, in b44_alloc_rx_skb()
670 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || in b44_alloc_rx_skb()
672 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) in b44_alloc_rx_skb()
673 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); in b44_alloc_rx_skb()
677 bp->force_copybreak = 1; in b44_alloc_rx_skb()
695 dp = &bp->rx_ring[dest_idx]; in b44_alloc_rx_skb()
697 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); in b44_alloc_rx_skb()
699 if (bp->flags & B44_FLAG_RX_RING_HACK) in b44_alloc_rx_skb()
700 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, in b44_alloc_rx_skb()
707 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) in b44_recycle_rx() argument
716 dest_desc = &bp->rx_ring[dest_idx]; in b44_recycle_rx()
717 dest_map = &bp->rx_buffers[dest_idx]; in b44_recycle_rx()
718 src_desc = &bp->rx_ring[src_idx]; in b44_recycle_rx()
719 src_map = &bp->rx_buffers[src_idx]; in b44_recycle_rx()
727 if (bp->flags & B44_FLAG_RX_RING_HACK) in b44_recycle_rx()
728 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, in b44_recycle_rx()
743 if (bp->flags & B44_FLAG_RX_RING_HACK) in b44_recycle_rx()
744 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, in b44_recycle_rx()
748 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, in b44_recycle_rx()
753 static int b44_rx(struct b44 *bp, int budget) in b44_rx() argument
759 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; in b44_rx()
761 cons = bp->rx_cons; in b44_rx()
764 struct ring_info *rp = &bp->rx_buffers[cons]; in b44_rx()
770 dma_sync_single_for_cpu(bp->sdev->dma_dev, map, in b44_rx()
778 b44_recycle_rx(bp, cons, bp->rx_prod); in b44_rx()
780 bp->dev->stats.rx_dropped++; in b44_rx()
799 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { in b44_rx()
801 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); in b44_rx()
804 dma_unmap_single(bp->sdev->dma_dev, map, in b44_rx()
812 b44_recycle_rx(bp, cons, bp->rx_prod); in b44_rx()
813 copy_skb = napi_alloc_skb(&bp->napi, len); in b44_rx()
824 skb->protocol = eth_type_trans(skb, bp->dev); in b44_rx()
829 bp->rx_prod = (bp->rx_prod + 1) & in b44_rx()
834 bp->rx_cons = cons; in b44_rx()
835 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); in b44_rx()
842 struct b44 *bp = container_of(napi, struct b44, napi); in b44_poll() local
846 spin_lock_irqsave(&bp->lock, flags); in b44_poll()
848 if (bp->istat & (ISTAT_TX | ISTAT_TO)) { in b44_poll()
849 /* spin_lock(&bp->tx_lock); */ in b44_poll()
850 b44_tx(bp); in b44_poll()
851 /* spin_unlock(&bp->tx_lock); */ in b44_poll()
853 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ in b44_poll()
854 bp->istat &= ~ISTAT_RFO; in b44_poll()
855 b44_disable_ints(bp); in b44_poll()
856 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ in b44_poll()
857 b44_init_rings(bp); in b44_poll()
858 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); in b44_poll()
859 netif_wake_queue(bp->dev); in b44_poll()
862 spin_unlock_irqrestore(&bp->lock, flags); in b44_poll()
865 if (bp->istat & ISTAT_RX) in b44_poll()
866 work_done += b44_rx(bp, budget); in b44_poll()
868 if (bp->istat & ISTAT_ERRORS) { in b44_poll()
869 spin_lock_irqsave(&bp->lock, flags); in b44_poll()
870 b44_halt(bp); in b44_poll()
871 b44_init_rings(bp); in b44_poll()
872 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); in b44_poll()
873 netif_wake_queue(bp->dev); in b44_poll()
874 spin_unlock_irqrestore(&bp->lock, flags); in b44_poll()
880 b44_enable_ints(bp); in b44_poll()
889 struct b44 *bp = netdev_priv(dev); in b44_interrupt() local
893 spin_lock(&bp->lock); in b44_interrupt()
895 istat = br32(bp, B44_ISTAT); in b44_interrupt()
896 imask = br32(bp, B44_IMASK); in b44_interrupt()
911 if (napi_schedule_prep(&bp->napi)) { in b44_interrupt()
915 bp->istat = istat; in b44_interrupt()
916 __b44_disable_ints(bp); in b44_interrupt()
917 __napi_schedule(&bp->napi); in b44_interrupt()
921 bw32(bp, B44_ISTAT, istat); in b44_interrupt()
922 br32(bp, B44_ISTAT); in b44_interrupt()
924 spin_unlock(&bp->lock); in b44_interrupt()
930 struct b44 *bp = netdev_priv(dev); in b44_tx_timeout() local
934 spin_lock_irq(&bp->lock); in b44_tx_timeout()
936 b44_halt(bp); in b44_tx_timeout()
937 b44_init_rings(bp); in b44_tx_timeout()
938 b44_init_hw(bp, B44_FULL_RESET); in b44_tx_timeout()
940 spin_unlock_irq(&bp->lock); in b44_tx_timeout()
942 b44_enable_ints(bp); in b44_tx_timeout()
949 struct b44 *bp = netdev_priv(dev); in b44_start_xmit() local
956 spin_lock_irqsave(&bp->lock, flags); in b44_start_xmit()
959 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { in b44_start_xmit()
965 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); in b44_start_xmit()
966 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { in b44_start_xmit()
970 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) in b44_start_xmit()
971 dma_unmap_single(bp->sdev->dma_dev, mapping, len, in b44_start_xmit()
978 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, in b44_start_xmit()
980 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { in b44_start_xmit()
981 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) in b44_start_xmit()
982 dma_unmap_single(bp->sdev->dma_dev, mapping, in b44_start_xmit()
993 entry = bp->tx_prod; in b44_start_xmit()
994 bp->tx_buffers[entry].skb = skb; in b44_start_xmit()
995 bp->tx_buffers[entry].mapping = mapping; in b44_start_xmit()
1002 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); in b44_start_xmit()
1003 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); in b44_start_xmit()
1005 if (bp->flags & B44_FLAG_TX_RING_HACK) in b44_start_xmit()
1006 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, in b44_start_xmit()
1007 entry * sizeof(bp->tx_ring[0]), in b44_start_xmit()
1012 bp->tx_prod = entry; in b44_start_xmit()
1016 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); in b44_start_xmit()
1017 if (bp->flags & B44_FLAG_BUGGY_TXPTR) in b44_start_xmit()
1018 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); in b44_start_xmit()
1019 if (bp->flags & B44_FLAG_REORDER_BUG) in b44_start_xmit()
1020 br32(bp, B44_DMATX_PTR); in b44_start_xmit()
1024 if (TX_BUFFS_AVAIL(bp) < 1) in b44_start_xmit()
1028 spin_unlock_irqrestore(&bp->lock, flags); in b44_start_xmit()
1039 struct b44 *bp = netdev_priv(dev); in b44_change_mtu() local
1049 spin_lock_irq(&bp->lock); in b44_change_mtu()
1050 b44_halt(bp); in b44_change_mtu()
1052 b44_init_rings(bp); in b44_change_mtu()
1053 b44_init_hw(bp, B44_FULL_RESET); in b44_change_mtu()
1054 spin_unlock_irq(&bp->lock); in b44_change_mtu()
1056 b44_enable_ints(bp); in b44_change_mtu()
1065 * end up in the driver. bp->lock is not held and we are not
1068 static void b44_free_rings(struct b44 *bp) in b44_free_rings() argument
1074 rp = &bp->rx_buffers[i]; in b44_free_rings()
1078 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, in b44_free_rings()
1086 rp = &bp->tx_buffers[i]; in b44_free_rings()
1090 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, in b44_free_rings()
1103 static void b44_init_rings(struct b44 *bp) in b44_init_rings() argument
1107 b44_free_rings(bp); in b44_init_rings()
1109 memset(bp->rx_ring, 0, B44_RX_RING_BYTES); in b44_init_rings()
1110 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); in b44_init_rings()
1112 if (bp->flags & B44_FLAG_RX_RING_HACK) in b44_init_rings()
1113 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, in b44_init_rings()
1116 if (bp->flags & B44_FLAG_TX_RING_HACK) in b44_init_rings()
1117 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, in b44_init_rings()
1120 for (i = 0; i < bp->rx_pending; i++) { in b44_init_rings()
1121 if (b44_alloc_rx_skb(bp, -1, i) < 0) in b44_init_rings()
1130 static void b44_free_consistent(struct b44 *bp) in b44_free_consistent() argument
1132 kfree(bp->rx_buffers); in b44_free_consistent()
1133 bp->rx_buffers = NULL; in b44_free_consistent()
1134 kfree(bp->tx_buffers); in b44_free_consistent()
1135 bp->tx_buffers = NULL; in b44_free_consistent()
1136 if (bp->rx_ring) { in b44_free_consistent()
1137 if (bp->flags & B44_FLAG_RX_RING_HACK) { in b44_free_consistent()
1138 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, in b44_free_consistent()
1140 kfree(bp->rx_ring); in b44_free_consistent()
1142 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, in b44_free_consistent()
1143 bp->rx_ring, bp->rx_ring_dma); in b44_free_consistent()
1144 bp->rx_ring = NULL; in b44_free_consistent()
1145 bp->flags &= ~B44_FLAG_RX_RING_HACK; in b44_free_consistent()
1147 if (bp->tx_ring) { in b44_free_consistent()
1148 if (bp->flags & B44_FLAG_TX_RING_HACK) { in b44_free_consistent()
1149 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, in b44_free_consistent()
1151 kfree(bp->tx_ring); in b44_free_consistent()
1153 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, in b44_free_consistent()
1154 bp->tx_ring, bp->tx_ring_dma); in b44_free_consistent()
1155 bp->tx_ring = NULL; in b44_free_consistent()
1156 bp->flags &= ~B44_FLAG_TX_RING_HACK; in b44_free_consistent()
1164 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) in b44_alloc_consistent() argument
1169 bp->rx_buffers = kzalloc(size, gfp); in b44_alloc_consistent()
1170 if (!bp->rx_buffers) in b44_alloc_consistent()
1174 bp->tx_buffers = kzalloc(size, gfp); in b44_alloc_consistent()
1175 if (!bp->tx_buffers) in b44_alloc_consistent()
1179 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, in b44_alloc_consistent()
1180 &bp->rx_ring_dma, gfp); in b44_alloc_consistent()
1181 if (!bp->rx_ring) { in b44_alloc_consistent()
1192 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, in b44_alloc_consistent()
1196 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || in b44_alloc_consistent()
1202 bp->rx_ring = rx_ring; in b44_alloc_consistent()
1203 bp->rx_ring_dma = rx_ring_dma; in b44_alloc_consistent()
1204 bp->flags |= B44_FLAG_RX_RING_HACK; in b44_alloc_consistent()
1207 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, in b44_alloc_consistent()
1208 &bp->tx_ring_dma, gfp); in b44_alloc_consistent()
1209 if (!bp->tx_ring) { in b44_alloc_consistent()
1220 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, in b44_alloc_consistent()
1224 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || in b44_alloc_consistent()
1230 bp->tx_ring = tx_ring; in b44_alloc_consistent()
1231 bp->tx_ring_dma = tx_ring_dma; in b44_alloc_consistent()
1232 bp->flags |= B44_FLAG_TX_RING_HACK; in b44_alloc_consistent()
1238 b44_free_consistent(bp); in b44_alloc_consistent()
1242 /* bp->lock is held. */
1243 static void b44_clear_stats(struct b44 *bp) in b44_clear_stats() argument
1247 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); in b44_clear_stats()
1249 br32(bp, reg); in b44_clear_stats()
1251 br32(bp, reg); in b44_clear_stats()
1254 /* bp->lock is held. */
1255 static void b44_chip_reset(struct b44 *bp, int reset_kind) in b44_chip_reset() argument
1257 struct ssb_device *sdev = bp->sdev; in b44_chip_reset()
1260 was_enabled = ssb_device_is_enabled(bp->sdev); in b44_chip_reset()
1262 ssb_device_enable(bp->sdev, 0); in b44_chip_reset()
1266 bw32(bp, B44_RCV_LAZY, 0); in b44_chip_reset()
1267 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); in b44_chip_reset()
1268 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); in b44_chip_reset()
1269 bw32(bp, B44_DMATX_CTRL, 0); in b44_chip_reset()
1270 bp->tx_prod = bp->tx_cons = 0; in b44_chip_reset()
1271 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { in b44_chip_reset()
1272 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, in b44_chip_reset()
1275 bw32(bp, B44_DMARX_CTRL, 0); in b44_chip_reset()
1276 bp->rx_prod = bp->rx_cons = 0; in b44_chip_reset()
1279 b44_clear_stats(bp); in b44_chip_reset()
1290 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | in b44_chip_reset()
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | in b44_chip_reset()
1305 br32(bp, B44_MDIO_CTRL); in b44_chip_reset()
1307 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { in b44_chip_reset()
1308 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); in b44_chip_reset()
1309 br32(bp, B44_ENET_CTRL); in b44_chip_reset()
1310 bp->flags |= B44_FLAG_EXTERNAL_PHY; in b44_chip_reset()
1312 u32 val = br32(bp, B44_DEVCTRL); in b44_chip_reset()
1315 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); in b44_chip_reset()
1316 br32(bp, B44_DEVCTRL); in b44_chip_reset()
1319 bp->flags &= ~B44_FLAG_EXTERNAL_PHY; in b44_chip_reset()
1323 /* bp->lock is held. */
1324 static void b44_halt(struct b44 *bp) in b44_halt() argument
1326 b44_disable_ints(bp); in b44_halt()
1328 b44_phy_reset(bp); in b44_halt()
1330 netdev_info(bp->dev, "powering down PHY\n"); in b44_halt()
1331 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); in b44_halt()
1334 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_halt()
1335 b44_chip_reset(bp, B44_CHIP_RESET_FULL); in b44_halt()
1337 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); in b44_halt()
1340 /* bp->lock is held. */
1341 static void __b44_set_mac_addr(struct b44 *bp) in __b44_set_mac_addr() argument
1343 bw32(bp, B44_CAM_CTRL, 0); in __b44_set_mac_addr()
1344 if (!(bp->dev->flags & IFF_PROMISC)) { in __b44_set_mac_addr()
1347 __b44_cam_write(bp, bp->dev->dev_addr, 0); in __b44_set_mac_addr()
1348 val = br32(bp, B44_CAM_CTRL); in __b44_set_mac_addr()
1349 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); in __b44_set_mac_addr()
1355 struct b44 *bp = netdev_priv(dev); in b44_set_mac_addr() local
1367 spin_lock_irq(&bp->lock); in b44_set_mac_addr()
1369 val = br32(bp, B44_RXCONFIG); in b44_set_mac_addr()
1371 __b44_set_mac_addr(bp); in b44_set_mac_addr()
1373 spin_unlock_irq(&bp->lock); in b44_set_mac_addr()
1379 * packet processing. Invoked with bp->lock held.
1382 static void b44_init_hw(struct b44 *bp, int reset_kind) in b44_init_hw() argument
1386 b44_chip_reset(bp, B44_CHIP_RESET_FULL); in b44_init_hw()
1388 b44_phy_reset(bp); in b44_init_hw()
1389 b44_setup_phy(bp); in b44_init_hw()
1393 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); in b44_init_hw()
1394 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); in b44_init_hw()
1397 __b44_set_rx_mode(bp->dev); in b44_init_hw()
1400 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); in b44_init_hw()
1401 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); in b44_init_hw()
1403 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ in b44_init_hw()
1405 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | in b44_init_hw()
1408 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); in b44_init_hw()
1409 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); in b44_init_hw()
1410 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | in b44_init_hw()
1412 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); in b44_init_hw()
1414 bw32(bp, B44_DMARX_PTR, bp->rx_pending); in b44_init_hw()
1415 bp->rx_prod = bp->rx_pending; in b44_init_hw()
1417 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); in b44_init_hw()
1420 val = br32(bp, B44_ENET_CTRL); in b44_init_hw()
1421 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); in b44_init_hw()
1423 netdev_reset_queue(bp->dev); in b44_init_hw()
1428 struct b44 *bp = netdev_priv(dev); in b44_open() local
1431 err = b44_alloc_consistent(bp, GFP_KERNEL); in b44_open()
1435 napi_enable(&bp->napi); in b44_open()
1437 b44_init_rings(bp); in b44_open()
1438 b44_init_hw(bp, B44_FULL_RESET); in b44_open()
1440 b44_check_phy(bp); in b44_open()
1444 napi_disable(&bp->napi); in b44_open()
1445 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); in b44_open()
1446 b44_free_rings(bp); in b44_open()
1447 b44_free_consistent(bp); in b44_open()
1451 timer_setup(&bp->timer, b44_timer, 0); in b44_open()
1452 bp->timer.expires = jiffies + HZ; in b44_open()
1453 add_timer(&bp->timer); in b44_open()
1455 b44_enable_ints(bp); in b44_open()
1457 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_open()
1478 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) in bwfilter_table() argument
1484 bw32(bp, B44_FILT_ADDR, table_offset + i); in bwfilter_table()
1485 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); in bwfilter_table()
1522 static void b44_setup_pseudo_magicp(struct b44 *bp) in b44_setup_pseudo_magicp() argument
1536 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, in b44_setup_pseudo_magicp()
1539 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); in b44_setup_pseudo_magicp()
1540 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); in b44_setup_pseudo_magicp()
1545 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, in b44_setup_pseudo_magicp()
1548 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, in b44_setup_pseudo_magicp()
1550 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, in b44_setup_pseudo_magicp()
1556 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, in b44_setup_pseudo_magicp()
1559 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, in b44_setup_pseudo_magicp()
1561 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, in b44_setup_pseudo_magicp()
1568 bw32(bp, B44_WKUP_LEN, val); in b44_setup_pseudo_magicp()
1571 val = br32(bp, B44_DEVCTRL); in b44_setup_pseudo_magicp()
1572 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); in b44_setup_pseudo_magicp()
1577 static void b44_setup_wol_pci(struct b44 *bp) in b44_setup_wol_pci() argument
1581 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { in b44_setup_wol_pci()
1582 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); in b44_setup_wol_pci()
1583 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); in b44_setup_wol_pci()
1584 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); in b44_setup_wol_pci()
1588 static inline void b44_setup_wol_pci(struct b44 *bp) { } in b44_setup_wol_pci() argument
1591 static void b44_setup_wol(struct b44 *bp) in b44_setup_wol() argument
1595 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); in b44_setup_wol()
1597 if (bp->flags & B44_FLAG_B0_ANDLATER) { in b44_setup_wol()
1599 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); in b44_setup_wol()
1601 val = bp->dev->dev_addr[2] << 24 | in b44_setup_wol()
1602 bp->dev->dev_addr[3] << 16 | in b44_setup_wol()
1603 bp->dev->dev_addr[4] << 8 | in b44_setup_wol()
1604 bp->dev->dev_addr[5]; in b44_setup_wol()
1605 bw32(bp, B44_ADDR_LO, val); in b44_setup_wol()
1607 val = bp->dev->dev_addr[0] << 8 | in b44_setup_wol()
1608 bp->dev->dev_addr[1]; in b44_setup_wol()
1609 bw32(bp, B44_ADDR_HI, val); in b44_setup_wol()
1611 val = br32(bp, B44_DEVCTRL); in b44_setup_wol()
1612 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); in b44_setup_wol()
1615 b44_setup_pseudo_magicp(bp); in b44_setup_wol()
1617 b44_setup_wol_pci(bp); in b44_setup_wol()
1622 struct b44 *bp = netdev_priv(dev); in b44_close() local
1626 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_close()
1629 napi_disable(&bp->napi); in b44_close()
1631 timer_delete_sync(&bp->timer); in b44_close()
1633 spin_lock_irq(&bp->lock); in b44_close()
1635 b44_halt(bp); in b44_close()
1636 b44_free_rings(bp); in b44_close()
1639 spin_unlock_irq(&bp->lock); in b44_close()
1643 if (bp->flags & B44_FLAG_WOL_ENABLE) { in b44_close()
1644 b44_init_hw(bp, B44_PARTIAL_RESET); in b44_close()
1645 b44_setup_wol(bp); in b44_close()
1648 b44_free_consistent(bp); in b44_close()
1656 struct b44 *bp = netdev_priv(dev); in b44_get_stats64() local
1657 struct b44_hw_stats *hwstat = &bp->hw_stats; in b44_get_stats64()
1699 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) in __b44_load_mcast() argument
1709 __b44_cam_write(bp, ha->addr, i++ + 1); in __b44_load_mcast()
1716 struct b44 *bp = netdev_priv(dev); in __b44_set_rx_mode() local
1719 val = br32(bp, B44_RXCONFIG); in __b44_set_rx_mode()
1723 bw32(bp, B44_RXCONFIG, val); in __b44_set_rx_mode()
1728 __b44_set_mac_addr(bp); in __b44_set_rx_mode()
1734 i = __b44_load_mcast(bp, dev); in __b44_set_rx_mode()
1737 __b44_cam_write(bp, zero, i); in __b44_set_rx_mode()
1739 bw32(bp, B44_RXCONFIG, val); in __b44_set_rx_mode()
1740 val = br32(bp, B44_CAM_CTRL); in __b44_set_rx_mode()
1741 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); in __b44_set_rx_mode()
1747 struct b44 *bp = netdev_priv(dev); in b44_set_rx_mode() local
1749 spin_lock_irq(&bp->lock); in b44_set_rx_mode()
1751 spin_unlock_irq(&bp->lock); in b44_set_rx_mode()
1756 struct b44 *bp = netdev_priv(dev); in b44_get_msglevel() local
1757 return bp->msg_enable; in b44_get_msglevel()
1762 struct b44 *bp = netdev_priv(dev); in b44_set_msglevel() local
1763 bp->msg_enable = value; in b44_set_msglevel()
1768 struct b44 *bp = netdev_priv(dev); in b44_get_drvinfo() local
1769 struct ssb_bus *bus = bp->sdev->bus; in b44_get_drvinfo()
1788 struct b44 *bp = netdev_priv(dev); in b44_nway_reset() local
1792 spin_lock_irq(&bp->lock); in b44_nway_reset()
1793 b44_readphy(bp, MII_BMCR, &bmcr); in b44_nway_reset()
1794 b44_readphy(bp, MII_BMCR, &bmcr); in b44_nway_reset()
1797 r = b44_writephy(bp, MII_BMCR, in b44_nway_reset()
1799 spin_unlock_irq(&bp->lock); in b44_nway_reset()
1807 struct b44 *bp = netdev_priv(dev); in b44_get_link_ksettings() local
1810 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { in b44_get_link_ksettings()
1825 if (bp->flags & B44_FLAG_ADV_10HALF) in b44_get_link_ksettings()
1827 if (bp->flags & B44_FLAG_ADV_10FULL) in b44_get_link_ksettings()
1829 if (bp->flags & B44_FLAG_ADV_100HALF) in b44_get_link_ksettings()
1831 if (bp->flags & B44_FLAG_ADV_100FULL) in b44_get_link_ksettings()
1834 cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ? in b44_get_link_ksettings()
1836 cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? in b44_get_link_ksettings()
1839 cmd->base.phy_address = bp->phy_addr; in b44_get_link_ksettings()
1840 cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? in b44_get_link_ksettings()
1861 struct b44 *bp = netdev_priv(dev); in b44_set_link_ksettings() local
1866 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { in b44_set_link_ksettings()
1868 spin_lock_irq(&bp->lock); in b44_set_link_ksettings()
1870 b44_setup_phy(bp); in b44_set_link_ksettings()
1874 spin_unlock_irq(&bp->lock); in b44_set_link_ksettings()
1897 spin_lock_irq(&bp->lock); in b44_set_link_ksettings()
1900 bp->flags &= ~(B44_FLAG_FORCE_LINK | in b44_set_link_ksettings()
1908 bp->flags |= (B44_FLAG_ADV_10HALF | in b44_set_link_ksettings()
1914 bp->flags |= B44_FLAG_ADV_10HALF; in b44_set_link_ksettings()
1916 bp->flags |= B44_FLAG_ADV_10FULL; in b44_set_link_ksettings()
1918 bp->flags |= B44_FLAG_ADV_100HALF; in b44_set_link_ksettings()
1920 bp->flags |= B44_FLAG_ADV_100FULL; in b44_set_link_ksettings()
1923 bp->flags |= B44_FLAG_FORCE_LINK; in b44_set_link_ksettings()
1924 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); in b44_set_link_ksettings()
1926 bp->flags |= B44_FLAG_100_BASE_T; in b44_set_link_ksettings()
1928 bp->flags |= B44_FLAG_FULL_DUPLEX; in b44_set_link_ksettings()
1932 b44_setup_phy(bp); in b44_set_link_ksettings()
1934 spin_unlock_irq(&bp->lock); in b44_set_link_ksettings()
1944 struct b44 *bp = netdev_priv(dev); in b44_get_ringparam() local
1947 ering->rx_pending = bp->rx_pending; in b44_get_ringparam()
1957 struct b44 *bp = netdev_priv(dev); in b44_set_ringparam() local
1965 spin_lock_irq(&bp->lock); in b44_set_ringparam()
1967 bp->rx_pending = ering->rx_pending; in b44_set_ringparam()
1968 bp->tx_pending = ering->tx_pending; in b44_set_ringparam()
1970 b44_halt(bp); in b44_set_ringparam()
1971 b44_init_rings(bp); in b44_set_ringparam()
1972 b44_init_hw(bp, B44_FULL_RESET); in b44_set_ringparam()
1973 netif_wake_queue(bp->dev); in b44_set_ringparam()
1974 spin_unlock_irq(&bp->lock); in b44_set_ringparam()
1976 b44_enable_ints(bp); in b44_set_ringparam()
1984 struct b44 *bp = netdev_priv(dev); in b44_get_pauseparam() local
1987 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; in b44_get_pauseparam()
1989 (bp->flags & B44_FLAG_RX_PAUSE) != 0; in b44_get_pauseparam()
1991 (bp->flags & B44_FLAG_TX_PAUSE) != 0; in b44_get_pauseparam()
1997 struct b44 *bp = netdev_priv(dev); in b44_set_pauseparam() local
1999 spin_lock_irq(&bp->lock); in b44_set_pauseparam()
2001 bp->flags |= B44_FLAG_PAUSE_AUTO; in b44_set_pauseparam()
2003 bp->flags &= ~B44_FLAG_PAUSE_AUTO; in b44_set_pauseparam()
2005 bp->flags |= B44_FLAG_RX_PAUSE; in b44_set_pauseparam()
2007 bp->flags &= ~B44_FLAG_RX_PAUSE; in b44_set_pauseparam()
2009 bp->flags |= B44_FLAG_TX_PAUSE; in b44_set_pauseparam()
2011 bp->flags &= ~B44_FLAG_TX_PAUSE; in b44_set_pauseparam()
2013 if (bp->flags & B44_FLAG_PAUSE_AUTO) { in b44_set_pauseparam()
2014 b44_halt(bp); in b44_set_pauseparam()
2015 b44_init_rings(bp); in b44_set_pauseparam()
2016 b44_init_hw(bp, B44_FULL_RESET); in b44_set_pauseparam()
2018 __b44_set_flow_ctrl(bp, bp->flags); in b44_set_pauseparam()
2021 spin_unlock_irq(&bp->lock); in b44_set_pauseparam()
2023 b44_enable_ints(bp); in b44_set_pauseparam()
2050 struct b44 *bp = netdev_priv(dev); in b44_get_ethtool_stats() local
2051 struct b44_hw_stats *hwstat = &bp->hw_stats; in b44_get_ethtool_stats()
2056 spin_lock_irq(&bp->lock); in b44_get_ethtool_stats()
2057 b44_stats_update(bp); in b44_get_ethtool_stats()
2058 spin_unlock_irq(&bp->lock); in b44_get_ethtool_stats()
2073 struct b44 *bp = netdev_priv(dev); in b44_get_wol() local
2076 if (bp->flags & B44_FLAG_WOL_ENABLE) in b44_get_wol()
2085 struct b44 *bp = netdev_priv(dev); in b44_set_wol() local
2087 spin_lock_irq(&bp->lock); in b44_set_wol()
2089 bp->flags |= B44_FLAG_WOL_ENABLE; in b44_set_wol()
2091 bp->flags &= ~B44_FLAG_WOL_ENABLE; in b44_set_wol()
2092 spin_unlock_irq(&bp->lock); in b44_set_wol()
2094 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC); in b44_set_wol()
2119 struct b44 *bp = netdev_priv(dev); in b44_ioctl() local
2125 spin_lock_irq(&bp->lock); in b44_ioctl()
2126 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { in b44_ioctl()
2130 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL); in b44_ioctl()
2132 spin_unlock_irq(&bp->lock); in b44_ioctl()
2137 static int b44_get_invariants(struct b44 *bp) in b44_get_invariants() argument
2139 struct ssb_device *sdev = bp->sdev; in b44_get_invariants()
2143 bp->dma_offset = ssb_dma_translation(sdev); in b44_get_invariants()
2148 bp->phy_addr = sdev->bus->sprom.et1phyaddr; in b44_get_invariants()
2151 bp->phy_addr = sdev->bus->sprom.et0phyaddr; in b44_get_invariants()
2156 bp->phy_addr &= 0x1F; in b44_get_invariants()
2158 eth_hw_addr_set(bp->dev, addr); in b44_get_invariants()
2160 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ in b44_get_invariants()
2165 bp->imask = IMASK_DEF; in b44_get_invariants()
2168 bp->flags |= B44_FLAG_BUGGY_TXPTR; in b44_get_invariants()
2171 if (bp->sdev->id.revision >= 7) in b44_get_invariants()
2172 bp->flags |= B44_FLAG_B0_ANDLATER; in b44_get_invariants()
2195 struct b44 *bp = netdev_priv(dev); in b44_adjust_link() local
2201 if (bp->old_link != phydev->link) { in b44_adjust_link()
2203 bp->old_link = phydev->link; in b44_adjust_link()
2209 (bp->flags & B44_FLAG_FULL_DUPLEX)) { in b44_adjust_link()
2211 bp->flags &= ~B44_FLAG_FULL_DUPLEX; in b44_adjust_link()
2213 !(bp->flags & B44_FLAG_FULL_DUPLEX)) { in b44_adjust_link()
2215 bp->flags |= B44_FLAG_FULL_DUPLEX; in b44_adjust_link()
2220 u32 val = br32(bp, B44_TX_CTRL); in b44_adjust_link()
2221 if (bp->flags & B44_FLAG_FULL_DUPLEX) in b44_adjust_link()
2225 bw32(bp, B44_TX_CTRL, val); in b44_adjust_link()
2230 static int b44_register_phy_one(struct b44 *bp) in b44_register_phy_one() argument
2234 struct ssb_device *sdev = bp->sdev; in b44_register_phy_one()
2247 mii_bus->priv = bp; in b44_register_phy_one()
2252 mii_bus->phy_mask = ~(1 << bp->phy_addr); in b44_register_phy_one()
2255 bp->mii_bus = mii_bus; in b44_register_phy_one()
2263 if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) && in b44_register_phy_one()
2268 bp->phy_addr); in b44_register_phy_one()
2270 bp->phy_addr = 0; in b44_register_phy_one()
2272 bp->phy_addr); in b44_register_phy_one()
2275 bp->phy_addr); in b44_register_phy_one()
2278 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, in b44_register_phy_one()
2282 bp->phy_addr); in b44_register_phy_one()
2295 bp->old_link = 0; in b44_register_phy_one()
2296 bp->phy_addr = phydev->mdio.addr; in b44_register_phy_one()
2312 static void b44_unregister_phy_one(struct b44 *bp) in b44_unregister_phy_one() argument
2314 struct net_device *dev = bp->dev; in b44_unregister_phy_one()
2315 struct mii_bus *mii_bus = bp->mii_bus; in b44_unregister_phy_one()
2326 struct b44 *bp; in b44_init_one() local
2331 dev = alloc_etherdev(sizeof(*bp)); in b44_init_one()
2342 bp = netdev_priv(dev); in b44_init_one()
2343 bp->sdev = sdev; in b44_init_one()
2344 bp->dev = dev; in b44_init_one()
2345 bp->force_copybreak = 0; in b44_init_one()
2347 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); in b44_init_one()
2349 spin_lock_init(&bp->lock); in b44_init_one()
2350 u64_stats_init(&bp->hw_stats.syncp); in b44_init_one()
2352 bp->rx_pending = B44_DEF_RX_RING_PENDING; in b44_init_one()
2353 bp->tx_pending = B44_DEF_TX_RING_PENDING; in b44_init_one()
2356 netif_napi_add(dev, &bp->napi, b44_poll); in b44_init_one()
2377 err = b44_get_invariants(bp); in b44_init_one()
2384 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { in b44_init_one()
2390 bp->mii_if.dev = dev; in b44_init_one()
2391 bp->mii_if.mdio_read = b44_mdio_read_mii; in b44_init_one()
2392 bp->mii_if.mdio_write = b44_mdio_write_mii; in b44_init_one()
2393 bp->mii_if.phy_id = bp->phy_addr; in b44_init_one()
2394 bp->mii_if.phy_id_mask = 0x1f; in b44_init_one()
2395 bp->mii_if.reg_num_mask = 0x1f; in b44_init_one()
2398 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | in b44_init_one()
2402 bp->flags |= B44_FLAG_PAUSE_AUTO; in b44_init_one()
2417 b44_chip_reset(bp, B44_CHIP_RESET_FULL); in b44_init_one()
2420 err = b44_phy_reset(bp); in b44_init_one()
2426 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { in b44_init_one()
2427 err = b44_register_phy_one(bp); in b44_init_one()
2445 netif_napi_del(&bp->napi); in b44_init_one()
2455 struct b44 *bp = netdev_priv(dev); in b44_remove_one() local
2458 if (bp->flags & B44_FLAG_EXTERNAL_PHY) in b44_remove_one()
2459 b44_unregister_phy_one(bp); in b44_remove_one()
2462 netif_napi_del(&bp->napi); in b44_remove_one()
2471 struct b44 *bp = netdev_priv(dev); in b44_suspend() local
2476 timer_delete_sync(&bp->timer); in b44_suspend()
2478 spin_lock_irq(&bp->lock); in b44_suspend()
2480 b44_halt(bp); in b44_suspend()
2481 netif_carrier_off(bp->dev); in b44_suspend()
2482 netif_device_detach(bp->dev); in b44_suspend()
2483 b44_free_rings(bp); in b44_suspend()
2485 spin_unlock_irq(&bp->lock); in b44_suspend()
2488 if (bp->flags & B44_FLAG_WOL_ENABLE) { in b44_suspend()
2489 b44_init_hw(bp, B44_PARTIAL_RESET); in b44_suspend()
2490 b44_setup_wol(bp); in b44_suspend()
2500 struct b44 *bp = netdev_priv(dev); in b44_resume() local
2513 spin_lock_irq(&bp->lock); in b44_resume()
2514 b44_init_rings(bp); in b44_resume()
2515 b44_init_hw(bp, B44_FULL_RESET); in b44_resume()
2516 spin_unlock_irq(&bp->lock); in b44_resume()
2526 spin_lock_irq(&bp->lock); in b44_resume()
2527 b44_halt(bp); in b44_resume()
2528 b44_free_rings(bp); in b44_resume()
2529 spin_unlock_irq(&bp->lock); in b44_resume()
2533 netif_device_attach(bp->dev); in b44_resume()
2535 b44_enable_ints(bp); in b44_resume()
2538 mod_timer(&bp->timer, jiffies + 1); in b44_resume()