Lines Matching full:cp
111 * also, we need to make cp->lock finer-grained.
161 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
230 static void cas_set_link_modes(struct cas *cp);
232 static inline void cas_lock_tx(struct cas *cp) in cas_lock_tx() argument
237 spin_lock_nested(&cp->tx_lock[i], i); in cas_lock_tx()
248 #define cas_lock_all_save(cp, flags) \ argument
250 struct cas *xxxcp = (cp); \
255 static inline void cas_unlock_tx(struct cas *cp) in cas_unlock_tx() argument
260 spin_unlock(&cp->tx_lock[i - 1]); in cas_unlock_tx()
263 #define cas_unlock_all_restore(cp, flags) \ argument
265 struct cas *xxxcp = (cp); \
270 static void cas_disable_irq(struct cas *cp, const int ring) in cas_disable_irq() argument
274 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); in cas_disable_irq()
279 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_disable_irq()
292 cp->regs + REG_PLUS_INTRN_MASK(ring)); in cas_disable_irq()
296 writel(INTRN_MASK_CLEAR_ALL, cp->regs + in cas_disable_irq()
303 static inline void cas_mask_intr(struct cas *cp) in cas_mask_intr() argument
308 cas_disable_irq(cp, i); in cas_mask_intr()
311 static void cas_enable_irq(struct cas *cp, const int ring) in cas_enable_irq() argument
314 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); in cas_enable_irq()
318 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_enable_irq()
330 writel(INTRN_MASK_RX_EN, cp->regs + in cas_enable_irq()
340 static inline void cas_unmask_intr(struct cas *cp) in cas_unmask_intr() argument
345 cas_enable_irq(cp, i); in cas_unmask_intr()
348 static inline void cas_entropy_gather(struct cas *cp) in cas_entropy_gather() argument
351 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) in cas_entropy_gather()
354 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), in cas_entropy_gather()
355 readl(cp->regs + REG_ENTROPY_IV), in cas_entropy_gather()
360 static inline void cas_entropy_reset(struct cas *cp) in cas_entropy_reset() argument
363 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) in cas_entropy_reset()
367 cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_entropy_reset()
368 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); in cas_entropy_reset()
369 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); in cas_entropy_reset()
372 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) in cas_entropy_reset()
373 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; in cas_entropy_reset()
380 static u16 cas_phy_read(struct cas *cp, int reg) in cas_phy_read() argument
386 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); in cas_phy_read()
389 writel(cmd, cp->regs + REG_MIF_FRAME); in cas_phy_read()
394 cmd = readl(cp->regs + REG_MIF_FRAME); in cas_phy_read()
401 static int cas_phy_write(struct cas *cp, int reg, u16 val) in cas_phy_write() argument
407 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); in cas_phy_write()
411 writel(cmd, cp->regs + REG_MIF_FRAME); in cas_phy_write()
416 cmd = readl(cp->regs + REG_MIF_FRAME); in cas_phy_write()
423 static void cas_phy_powerup(struct cas *cp) in cas_phy_powerup() argument
425 u16 ctl = cas_phy_read(cp, MII_BMCR); in cas_phy_powerup()
430 cas_phy_write(cp, MII_BMCR, ctl); in cas_phy_powerup()
433 static void cas_phy_powerdown(struct cas *cp) in cas_phy_powerdown() argument
435 u16 ctl = cas_phy_read(cp, MII_BMCR); in cas_phy_powerdown()
440 cas_phy_write(cp, MII_BMCR, ctl); in cas_phy_powerdown()
443 /* cp->lock held. note: the last put_page will free the buffer */
444 static int cas_page_free(struct cas *cp, cas_page_t *page) in cas_page_free() argument
446 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, in cas_page_free()
448 __free_pages(page->buffer, cp->page_order); in cas_page_free()
464 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) in cas_page_alloc() argument
474 page->buffer = alloc_pages(flags, cp->page_order); in cas_page_alloc()
477 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, in cas_page_alloc()
478 cp->page_size, DMA_FROM_DEVICE); in cas_page_alloc()
487 static void cas_spare_init(struct cas *cp) in cas_spare_init() argument
489 spin_lock(&cp->rx_inuse_lock); in cas_spare_init()
490 INIT_LIST_HEAD(&cp->rx_inuse_list); in cas_spare_init()
491 spin_unlock(&cp->rx_inuse_lock); in cas_spare_init()
493 spin_lock(&cp->rx_spare_lock); in cas_spare_init()
494 INIT_LIST_HEAD(&cp->rx_spare_list); in cas_spare_init()
495 cp->rx_spares_needed = RX_SPARE_COUNT; in cas_spare_init()
496 spin_unlock(&cp->rx_spare_lock); in cas_spare_init()
500 static void cas_spare_free(struct cas *cp) in cas_spare_free() argument
506 spin_lock(&cp->rx_spare_lock); in cas_spare_free()
507 list_splice_init(&cp->rx_spare_list, &list); in cas_spare_free()
508 spin_unlock(&cp->rx_spare_lock); in cas_spare_free()
510 cas_page_free(cp, list_entry(elem, cas_page_t, list)); in cas_spare_free()
519 spin_lock(&cp->rx_inuse_lock); in cas_spare_free()
520 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_free()
521 spin_unlock(&cp->rx_inuse_lock); in cas_spare_free()
523 spin_lock(&cp->rx_spare_lock); in cas_spare_free()
524 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_free()
525 spin_unlock(&cp->rx_spare_lock); in cas_spare_free()
528 cas_page_free(cp, list_entry(elem, cas_page_t, list)); in cas_spare_free()
533 static void cas_spare_recover(struct cas *cp, const gfp_t flags) in cas_spare_recover() argument
544 spin_lock(&cp->rx_inuse_lock); in cas_spare_recover()
545 list_splice_init(&cp->rx_inuse_list, &list); in cas_spare_recover()
546 spin_unlock(&cp->rx_inuse_lock); in cas_spare_recover()
567 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
568 if (cp->rx_spares_needed > 0) { in cas_spare_recover()
569 list_add(elem, &cp->rx_spare_list); in cas_spare_recover()
570 cp->rx_spares_needed--; in cas_spare_recover()
571 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
573 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
574 cas_page_free(cp, page); in cas_spare_recover()
580 spin_lock(&cp->rx_inuse_lock); in cas_spare_recover()
581 list_splice(&list, &cp->rx_inuse_list); in cas_spare_recover()
582 spin_unlock(&cp->rx_inuse_lock); in cas_spare_recover()
585 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
586 needed = cp->rx_spares_needed; in cas_spare_recover()
587 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
595 cas_page_t *spare = cas_page_alloc(cp, flags); in cas_spare_recover()
602 spin_lock(&cp->rx_spare_lock); in cas_spare_recover()
603 list_splice(&list, &cp->rx_spare_list); in cas_spare_recover()
604 cp->rx_spares_needed -= i; in cas_spare_recover()
605 spin_unlock(&cp->rx_spare_lock); in cas_spare_recover()
609 static cas_page_t *cas_page_dequeue(struct cas *cp) in cas_page_dequeue() argument
614 spin_lock(&cp->rx_spare_lock); in cas_page_dequeue()
615 if (list_empty(&cp->rx_spare_list)) { in cas_page_dequeue()
617 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
618 cas_spare_recover(cp, GFP_ATOMIC); in cas_page_dequeue()
619 spin_lock(&cp->rx_spare_lock); in cas_page_dequeue()
620 if (list_empty(&cp->rx_spare_list)) { in cas_page_dequeue()
621 netif_err(cp, rx_err, cp->dev, in cas_page_dequeue()
623 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
628 entry = cp->rx_spare_list.next; in cas_page_dequeue()
630 recover = ++cp->rx_spares_needed; in cas_page_dequeue()
631 spin_unlock(&cp->rx_spare_lock); in cas_page_dequeue()
636 atomic_inc(&cp->reset_task_pending); in cas_page_dequeue()
637 atomic_inc(&cp->reset_task_pending_spare); in cas_page_dequeue()
638 schedule_work(&cp->reset_task); in cas_page_dequeue()
640 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); in cas_page_dequeue()
641 schedule_work(&cp->reset_task); in cas_page_dequeue()
648 static void cas_mif_poll(struct cas *cp, const int enable) in cas_mif_poll() argument
652 cfg = readl(cp->regs + REG_MIF_CFG); in cas_mif_poll()
655 if (cp->phy_type & CAS_PHY_MII_MDIO1) in cas_mif_poll()
662 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); in cas_mif_poll()
665 cp->regs + REG_MIF_MASK); in cas_mif_poll()
666 writel(cfg, cp->regs + REG_MIF_CFG); in cas_mif_poll()
669 /* Must be invoked under cp->lock */
670 static void cas_begin_auto_negotiation(struct cas *cp, in cas_begin_auto_negotiation() argument
677 int oldstate = cp->lstate; in cas_begin_auto_negotiation()
683 lcntl = cp->link_cntl; in cas_begin_auto_negotiation()
685 cp->link_cntl = BMCR_ANENABLE; in cas_begin_auto_negotiation()
688 cp->link_cntl = 0; in cas_begin_auto_negotiation()
690 cp->link_cntl |= BMCR_SPEED100; in cas_begin_auto_negotiation()
692 cp->link_cntl |= CAS_BMCR_SPEED1000; in cas_begin_auto_negotiation()
694 cp->link_cntl |= BMCR_FULLDPLX; in cas_begin_auto_negotiation()
697 changed = (lcntl != cp->link_cntl); in cas_begin_auto_negotiation()
700 if (cp->lstate == link_up) { in cas_begin_auto_negotiation()
701 netdev_info(cp->dev, "PCS link down\n"); in cas_begin_auto_negotiation()
704 netdev_info(cp->dev, "link configuration changed\n"); in cas_begin_auto_negotiation()
707 cp->lstate = link_down; in cas_begin_auto_negotiation()
708 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_begin_auto_negotiation()
709 if (!cp->hw_running) in cas_begin_auto_negotiation()
718 netif_carrier_off(cp->dev); in cas_begin_auto_negotiation()
725 atomic_inc(&cp->reset_task_pending); in cas_begin_auto_negotiation()
726 atomic_inc(&cp->reset_task_pending_all); in cas_begin_auto_negotiation()
727 schedule_work(&cp->reset_task); in cas_begin_auto_negotiation()
728 cp->timer_ticks = 0; in cas_begin_auto_negotiation()
729 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_begin_auto_negotiation()
733 if (cp->phy_type & CAS_PHY_SERDES) { in cas_begin_auto_negotiation()
734 u32 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_begin_auto_negotiation()
736 if (cp->link_cntl & BMCR_ANENABLE) { in cas_begin_auto_negotiation()
738 cp->lstate = link_aneg; in cas_begin_auto_negotiation()
740 if (cp->link_cntl & BMCR_FULLDPLX) in cas_begin_auto_negotiation()
743 cp->lstate = link_force_ok; in cas_begin_auto_negotiation()
745 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_begin_auto_negotiation()
746 writel(val, cp->regs + REG_PCS_MII_CTRL); in cas_begin_auto_negotiation()
749 cas_mif_poll(cp, 0); in cas_begin_auto_negotiation()
750 ctl = cas_phy_read(cp, MII_BMCR); in cas_begin_auto_negotiation()
753 ctl |= cp->link_cntl; in cas_begin_auto_negotiation()
756 cp->lstate = link_aneg; in cas_begin_auto_negotiation()
758 cp->lstate = link_force_ok; in cas_begin_auto_negotiation()
760 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_begin_auto_negotiation()
761 cas_phy_write(cp, MII_BMCR, ctl); in cas_begin_auto_negotiation()
762 cas_mif_poll(cp, 1); in cas_begin_auto_negotiation()
765 cp->timer_ticks = 0; in cas_begin_auto_negotiation()
766 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_begin_auto_negotiation()
769 /* Must be invoked under cp->lock. */
770 static int cas_reset_mii_phy(struct cas *cp) in cas_reset_mii_phy() argument
775 cas_phy_write(cp, MII_BMCR, BMCR_RESET); in cas_reset_mii_phy()
778 val = cas_phy_read(cp, MII_BMCR); in cas_reset_mii_phy()
786 static void cas_saturn_firmware_init(struct cas *cp) in cas_saturn_firmware_init() argument
792 if (PHY_NS_DP83065 != cp->phy_id) in cas_saturn_firmware_init()
795 err = request_firmware(&fw, fw_name, &cp->pdev->dev); in cas_saturn_firmware_init()
806 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; in cas_saturn_firmware_init()
807 cp->fw_size = fw->size - 2; in cas_saturn_firmware_init()
808 cp->fw_data = vmalloc(cp->fw_size); in cas_saturn_firmware_init()
809 if (!cp->fw_data) in cas_saturn_firmware_init()
811 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); in cas_saturn_firmware_init()
816 static void cas_saturn_firmware_load(struct cas *cp) in cas_saturn_firmware_load() argument
820 if (!cp->fw_data) in cas_saturn_firmware_load()
823 cas_phy_powerdown(cp); in cas_saturn_firmware_load()
826 cas_phy_write(cp, DP83065_MII_MEM, 0x0); in cas_saturn_firmware_load()
829 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); in cas_saturn_firmware_load()
830 cas_phy_write(cp, DP83065_MII_REGD, 0xbd); in cas_saturn_firmware_load()
831 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); in cas_saturn_firmware_load()
832 cas_phy_write(cp, DP83065_MII_REGD, 0x82); in cas_saturn_firmware_load()
833 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); in cas_saturn_firmware_load()
834 cas_phy_write(cp, DP83065_MII_REGD, 0x0); in cas_saturn_firmware_load()
835 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); in cas_saturn_firmware_load()
836 cas_phy_write(cp, DP83065_MII_REGD, 0x39); in cas_saturn_firmware_load()
839 cas_phy_write(cp, DP83065_MII_MEM, 0x1); in cas_saturn_firmware_load()
840 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); in cas_saturn_firmware_load()
841 for (i = 0; i < cp->fw_size; i++) in cas_saturn_firmware_load()
842 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); in cas_saturn_firmware_load()
845 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); in cas_saturn_firmware_load()
846 cas_phy_write(cp, DP83065_MII_REGD, 0x1); in cas_saturn_firmware_load()
851 static void cas_phy_init(struct cas *cp) in cas_phy_init() argument
856 if (CAS_PHY_MII(cp->phy_type)) { in cas_phy_init()
858 cp->regs + REG_PCS_DATAPATH_MODE); in cas_phy_init()
860 cas_mif_poll(cp, 0); in cas_phy_init()
861 cas_reset_mii_phy(cp); /* take out of isolate mode */ in cas_phy_init()
863 if (PHY_LUCENT_B0 == cp->phy_id) { in cas_phy_init()
865 cas_phy_write(cp, LUCENT_MII_REG, 0x8000); in cas_phy_init()
866 cas_phy_write(cp, MII_BMCR, 0x00f1); in cas_phy_init()
867 cas_phy_write(cp, LUCENT_MII_REG, 0x0); in cas_phy_init()
869 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { in cas_phy_init()
871 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); in cas_phy_init()
872 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); in cas_phy_init()
873 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); in cas_phy_init()
874 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); in cas_phy_init()
875 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); in cas_phy_init()
876 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); in cas_phy_init()
877 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); in cas_phy_init()
878 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); in cas_phy_init()
879 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); in cas_phy_init()
880 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); in cas_phy_init()
881 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); in cas_phy_init()
883 } else if (PHY_BROADCOM_5411 == cp->phy_id) { in cas_phy_init()
884 val = cas_phy_read(cp, BROADCOM_MII_REG4); in cas_phy_init()
885 val = cas_phy_read(cp, BROADCOM_MII_REG4); in cas_phy_init()
888 cas_phy_write(cp, BROADCOM_MII_REG4, in cas_phy_init()
892 } else if (cp->cas_flags & CAS_FLAG_SATURN) { in cas_phy_init()
893 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? in cas_phy_init()
895 cp->regs + REG_SATURN_PCFG); in cas_phy_init()
901 if (PHY_NS_DP83065 == cp->phy_id) { in cas_phy_init()
902 cas_saturn_firmware_load(cp); in cas_phy_init()
904 cas_phy_powerup(cp); in cas_phy_init()
908 val = cas_phy_read(cp, MII_BMCR); in cas_phy_init()
910 cas_phy_write(cp, MII_BMCR, val); in cas_phy_init()
913 cas_phy_write(cp, MII_ADVERTISE, in cas_phy_init()
914 cas_phy_read(cp, MII_ADVERTISE) | in cas_phy_init()
920 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_phy_init()
924 val = cas_phy_read(cp, CAS_MII_1000_CTRL); in cas_phy_init()
927 cas_phy_write(cp, CAS_MII_1000_CTRL, val); in cas_phy_init()
936 cp->regs + REG_PCS_DATAPATH_MODE); in cas_phy_init()
939 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_phy_init()
940 writel(0, cp->regs + REG_SATURN_PCFG); in cas_phy_init()
943 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_phy_init()
945 writel(val, cp->regs + REG_PCS_MII_CTRL); in cas_phy_init()
950 if ((readl(cp->regs + REG_PCS_MII_CTRL) & in cas_phy_init()
955 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", in cas_phy_init()
956 readl(cp->regs + REG_PCS_STATE_MACHINE)); in cas_phy_init()
961 writel(0x0, cp->regs + REG_PCS_CFG); in cas_phy_init()
964 val = readl(cp->regs + REG_PCS_MII_ADVERT); in cas_phy_init()
968 writel(val, cp->regs + REG_PCS_MII_ADVERT); in cas_phy_init()
971 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); in cas_phy_init()
975 cp->regs + REG_PCS_SERDES_CTRL); in cas_phy_init()
980 static int cas_pcs_link_check(struct cas *cp) in cas_pcs_link_check() argument
989 stat = readl(cp->regs + REG_PCS_MII_STATUS); in cas_pcs_link_check()
991 stat = readl(cp->regs + REG_PCS_MII_STATUS); in cas_pcs_link_check()
999 netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); in cas_pcs_link_check()
1004 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); in cas_pcs_link_check()
1012 if (cp->lstate != link_up) { in cas_pcs_link_check()
1013 if (cp->opened) { in cas_pcs_link_check()
1014 cp->lstate = link_up; in cas_pcs_link_check()
1015 cp->link_transition = LINK_TRANSITION_LINK_UP; in cas_pcs_link_check()
1017 cas_set_link_modes(cp); in cas_pcs_link_check()
1018 netif_carrier_on(cp->dev); in cas_pcs_link_check()
1021 } else if (cp->lstate == link_up) { in cas_pcs_link_check()
1022 cp->lstate = link_down; in cas_pcs_link_check()
1024 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && in cas_pcs_link_check()
1025 !cp->link_transition_jiffies_valid) { in cas_pcs_link_check()
1039 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; in cas_pcs_link_check()
1040 cp->link_transition_jiffies = jiffies; in cas_pcs_link_check()
1041 cp->link_transition_jiffies_valid = 1; in cas_pcs_link_check()
1043 cp->link_transition = LINK_TRANSITION_ON_FAILURE; in cas_pcs_link_check()
1045 netif_carrier_off(cp->dev); in cas_pcs_link_check()
1046 if (cp->opened) in cas_pcs_link_check()
1047 netif_info(cp, link, cp->dev, "PCS link down\n"); in cas_pcs_link_check()
1057 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { in cas_pcs_link_check()
1059 stat = readl(cp->regs + REG_PCS_SERDES_STATE); in cas_pcs_link_check()
1063 } else if (cp->lstate == link_down) { in cas_pcs_link_check()
1065 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && in cas_pcs_link_check()
1066 !cp->link_transition_jiffies_valid) { in cas_pcs_link_check()
1073 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; in cas_pcs_link_check()
1074 cp->link_transition_jiffies = jiffies; in cas_pcs_link_check()
1075 cp->link_transition_jiffies_valid = 1; in cas_pcs_link_check()
1077 cp->link_transition = LINK_TRANSITION_STILL_FAILED; in cas_pcs_link_check()
1085 struct cas *cp, u32 status) in cas_pcs_interrupt() argument
1087 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); in cas_pcs_interrupt()
1091 return cas_pcs_link_check(cp); in cas_pcs_interrupt()
1095 struct cas *cp, u32 status) in cas_txmac_interrupt() argument
1097 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); in cas_txmac_interrupt()
1102 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_txmac_interrupt()
1112 spin_lock(&cp->stat_lock[0]); in cas_txmac_interrupt()
1115 cp->net_stats[0].tx_fifo_errors++; in cas_txmac_interrupt()
1120 cp->net_stats[0].tx_errors++; in cas_txmac_interrupt()
1127 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1130 cp->net_stats[0].tx_aborted_errors += 0x10000; in cas_txmac_interrupt()
1131 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1135 cp->net_stats[0].tx_aborted_errors += 0x10000; in cas_txmac_interrupt()
1136 cp->net_stats[0].collisions += 0x10000; in cas_txmac_interrupt()
1138 spin_unlock(&cp->stat_lock[0]); in cas_txmac_interrupt()
1146 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) in cas_load_firmware() argument
1154 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); in cas_load_firmware()
1158 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); in cas_load_firmware()
1167 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); in cas_load_firmware()
1173 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); in cas_load_firmware()
1179 static void cas_init_rx_dma(struct cas *cp) in cas_init_rx_dma() argument
1181 u64 desc_dma = cp->block_dvma; in cas_init_rx_dma()
1190 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ in cas_init_rx_dma()
1192 writel(val, cp->regs + REG_RX_CFG); in cas_init_rx_dma()
1194 val = (unsigned long) cp->init_rxds[0] - in cas_init_rx_dma()
1195 (unsigned long) cp->init_block; in cas_init_rx_dma()
1196 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); in cas_init_rx_dma()
1197 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); in cas_init_rx_dma()
1198 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); in cas_init_rx_dma()
1200 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1204 val = (unsigned long) cp->init_rxds[1] - in cas_init_rx_dma()
1205 (unsigned long) cp->init_block; in cas_init_rx_dma()
1206 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); in cas_init_rx_dma()
1207 writel((desc_dma + val) & 0xffffffff, cp->regs + in cas_init_rx_dma()
1209 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + in cas_init_rx_dma()
1214 val = (unsigned long) cp->init_rxcs[0] - in cas_init_rx_dma()
1215 (unsigned long) cp->init_block; in cas_init_rx_dma()
1216 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); in cas_init_rx_dma()
1217 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); in cas_init_rx_dma()
1219 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1222 val = (unsigned long) cp->init_rxcs[i] - in cas_init_rx_dma()
1223 (unsigned long) cp->init_block; in cas_init_rx_dma()
1224 writel((desc_dma + val) >> 32, cp->regs + in cas_init_rx_dma()
1226 writel((desc_dma + val) & 0xffffffff, cp->regs + in cas_init_rx_dma()
1235 readl(cp->regs + REG_INTR_STATUS_ALIAS); in cas_init_rx_dma()
1236 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); in cas_init_rx_dma()
1240 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); in cas_init_rx_dma()
1242 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); in cas_init_rx_dma()
1243 writel(val, cp->regs + REG_RX_PAUSE_THRESH); in cas_init_rx_dma()
1247 writel(i, cp->regs + REG_RX_TABLE_ADDR); in cas_init_rx_dma()
1248 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); in cas_init_rx_dma()
1249 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); in cas_init_rx_dma()
1250 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); in cas_init_rx_dma()
1254 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); in cas_init_rx_dma()
1255 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); in cas_init_rx_dma()
1261 writel(val, cp->regs + REG_RX_BLANK); in cas_init_rx_dma()
1263 writel(0x0, cp->regs + REG_RX_BLANK); in cas_init_rx_dma()
1273 writel(val, cp->regs + REG_RX_AE_THRESH); in cas_init_rx_dma()
1274 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_init_rx_dma()
1276 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); in cas_init_rx_dma()
1282 writel(0x0, cp->regs + REG_RX_RED); in cas_init_rx_dma()
1286 if (cp->page_size == 0x1000) in cas_init_rx_dma()
1288 else if (cp->page_size == 0x2000) in cas_init_rx_dma()
1290 else if (cp->page_size == 0x4000) in cas_init_rx_dma()
1294 size = cp->dev->mtu + 64; in cas_init_rx_dma()
1295 if (size > cp->page_size) in cas_init_rx_dma()
1296 size = cp->page_size; in cas_init_rx_dma()
1307 cp->mtu_stride = 1 << (i + 10); in cas_init_rx_dma()
1310 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); in cas_init_rx_dma()
1312 writel(val, cp->regs + REG_RX_PAGE_SIZE); in cas_init_rx_dma()
1321 writel(val, cp->regs + REG_HP_CFG); in cas_init_rx_dma()
1334 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) in cas_page_spare() argument
1336 cas_page_t *page = cp->rx_pages[1][index]; in cas_page_spare()
1342 new = cas_page_dequeue(cp); in cas_page_spare()
1344 spin_lock(&cp->rx_inuse_lock); in cas_page_spare()
1345 list_add(&page->list, &cp->rx_inuse_list); in cas_page_spare()
1346 spin_unlock(&cp->rx_inuse_lock); in cas_page_spare()
1352 static cas_page_t *cas_page_swap(struct cas *cp, const int ring, in cas_page_swap() argument
1355 cas_page_t **page0 = cp->rx_pages[0]; in cas_page_swap()
1356 cas_page_t **page1 = cp->rx_pages[1]; in cas_page_swap()
1360 cas_page_t *new = cas_page_spare(cp, index); in cas_page_swap()
1370 static void cas_clean_rxds(struct cas *cp) in cas_clean_rxds() argument
1373 struct cas_rx_desc *rxd = cp->init_rxds[0]; in cas_clean_rxds()
1379 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { in cas_clean_rxds()
1387 cas_page_t *page = cas_page_swap(cp, 0, i); in cas_clean_rxds()
1393 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; in cas_clean_rxds()
1394 cp->rx_last[0] = 0; in cas_clean_rxds()
1395 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); in cas_clean_rxds()
1398 static void cas_clean_rxcs(struct cas *cp) in cas_clean_rxcs() argument
1403 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); in cas_clean_rxcs()
1404 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); in cas_clean_rxcs()
1406 struct cas_rx_comp *rxc = cp->init_rxcs[i]; in cas_clean_rxcs()
1420 static int cas_rxmac_reset(struct cas *cp)
1422 struct net_device *dev = cp->dev;
1427 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1429 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1439 writel(0, cp->regs + REG_RX_CFG);
1441 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1453 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1455 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1465 cas_clean_rxds(cp);
1466 cas_clean_rxcs(cp);
1469 cas_init_rx_dma(cp);
1472 val = readl(cp->regs + REG_RX_CFG);
1473 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1474 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1475 val = readl(cp->regs + REG_MAC_RX_CFG);
1476 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1481 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, in cas_rxmac_interrupt() argument
1484 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); in cas_rxmac_interrupt()
1489 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); in cas_rxmac_interrupt()
1492 spin_lock(&cp->stat_lock[0]); in cas_rxmac_interrupt()
1494 cp->net_stats[0].rx_frame_errors += 0x10000; in cas_rxmac_interrupt()
1497 cp->net_stats[0].rx_crc_errors += 0x10000; in cas_rxmac_interrupt()
1500 cp->net_stats[0].rx_length_errors += 0x10000; in cas_rxmac_interrupt()
1503 cp->net_stats[0].rx_over_errors++; in cas_rxmac_interrupt()
1504 cp->net_stats[0].rx_fifo_errors++; in cas_rxmac_interrupt()
1510 spin_unlock(&cp->stat_lock[0]); in cas_rxmac_interrupt()
1514 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, in cas_mac_interrupt() argument
1517 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); in cas_mac_interrupt()
1522 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_mac_interrupt()
1530 cp->pause_entered++; in cas_mac_interrupt()
1533 cp->pause_last_time_recvd = (stat >> 16); in cas_mac_interrupt()
1539 /* Must be invoked under cp->lock. */
1540 static inline int cas_mdio_link_not_up(struct cas *cp) in cas_mdio_link_not_up() argument
1544 switch (cp->lstate) { in cas_mdio_link_not_up()
1546 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); in cas_mdio_link_not_up()
1547 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); in cas_mdio_link_not_up()
1548 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1549 cp->lstate = link_force_ok; in cas_mdio_link_not_up()
1550 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mdio_link_not_up()
1554 val = cas_phy_read(cp, MII_BMCR); in cas_mdio_link_not_up()
1561 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? in cas_mdio_link_not_up()
1563 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1564 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1565 cp->lstate = link_force_try; in cas_mdio_link_not_up()
1566 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mdio_link_not_up()
1571 val = cas_phy_read(cp, MII_BMCR); in cas_mdio_link_not_up()
1572 cp->timer_ticks = 5; in cas_mdio_link_not_up()
1576 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1586 cas_phy_write(cp, MII_BMCR, val); in cas_mdio_link_not_up()
1597 /* must be invoked with cp->lock held */
1598 static int cas_mii_link_check(struct cas *cp, const u16 bmsr) in cas_mii_link_check() argument
1608 if ((cp->lstate == link_force_try) && in cas_mii_link_check()
1609 (cp->link_cntl & BMCR_ANENABLE)) { in cas_mii_link_check()
1610 cp->lstate = link_force_ret; in cas_mii_link_check()
1611 cp->link_transition = LINK_TRANSITION_LINK_CONFIG; in cas_mii_link_check()
1612 cas_mif_poll(cp, 0); in cas_mii_link_check()
1613 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); in cas_mii_link_check()
1614 cp->timer_ticks = 5; in cas_mii_link_check()
1615 if (cp->opened) in cas_mii_link_check()
1616 netif_info(cp, link, cp->dev, in cas_mii_link_check()
1618 cas_phy_write(cp, MII_BMCR, in cas_mii_link_check()
1619 cp->link_fcntl | BMCR_ANENABLE | in cas_mii_link_check()
1621 cas_mif_poll(cp, 1); in cas_mii_link_check()
1623 } else if (cp->lstate != link_up) { in cas_mii_link_check()
1624 cp->lstate = link_up; in cas_mii_link_check()
1625 cp->link_transition = LINK_TRANSITION_LINK_UP; in cas_mii_link_check()
1627 if (cp->opened) { in cas_mii_link_check()
1628 cas_set_link_modes(cp); in cas_mii_link_check()
1629 netif_carrier_on(cp->dev); in cas_mii_link_check()
1639 if (cp->lstate == link_up) { in cas_mii_link_check()
1640 cp->lstate = link_down; in cas_mii_link_check()
1641 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_mii_link_check()
1643 netif_carrier_off(cp->dev); in cas_mii_link_check()
1644 if (cp->opened) in cas_mii_link_check()
1645 netif_info(cp, link, cp->dev, "Link down\n"); in cas_mii_link_check()
1648 } else if (++cp->timer_ticks > 10) in cas_mii_link_check()
1649 cas_mdio_link_not_up(cp); in cas_mii_link_check()
1654 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, in cas_mif_interrupt() argument
1657 u32 stat = readl(cp->regs + REG_MIF_STATUS); in cas_mif_interrupt()
1665 return cas_mii_link_check(cp, bmsr); in cas_mif_interrupt()
1668 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, in cas_pci_interrupt() argument
1671 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); in cas_pci_interrupt()
1677 stat, readl(cp->regs + REG_BIM_DIAG)); in cas_pci_interrupt()
1681 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) in cas_pci_interrupt()
1700 pci_errs = pci_status_get_and_clear_errors(cp->pdev); in cas_pci_interrupt()
1726 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, in cas_abnormal_irq() argument
1731 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_abnormal_irq()
1733 spin_lock(&cp->stat_lock[0]); in cas_abnormal_irq()
1734 cp->net_stats[0].rx_errors++; in cas_abnormal_irq()
1735 spin_unlock(&cp->stat_lock[0]); in cas_abnormal_irq()
1741 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_abnormal_irq()
1743 spin_lock(&cp->stat_lock[0]); in cas_abnormal_irq()
1744 cp->net_stats[0].rx_errors++; in cas_abnormal_irq()
1745 spin_unlock(&cp->stat_lock[0]); in cas_abnormal_irq()
1750 if (cas_pcs_interrupt(dev, cp, status)) in cas_abnormal_irq()
1755 if (cas_txmac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1760 if (cas_rxmac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1765 if (cas_mac_interrupt(dev, cp, status)) in cas_abnormal_irq()
1770 if (cas_mif_interrupt(dev, cp, status)) in cas_abnormal_irq()
1775 if (cas_pci_interrupt(dev, cp, status)) in cas_abnormal_irq()
1782 atomic_inc(&cp->reset_task_pending); in cas_abnormal_irq()
1783 atomic_inc(&cp->reset_task_pending_all); in cas_abnormal_irq()
1785 schedule_work(&cp->reset_task); in cas_abnormal_irq()
1787 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_abnormal_irq()
1789 schedule_work(&cp->reset_task); in cas_abnormal_irq()
1799 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, in cas_calc_tabort() argument
1804 if (CAS_TABORT(cp) == 1) in cas_calc_tabort()
1811 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) in cas_tx_ringN() argument
1815 struct net_device *dev = cp->dev; in cas_tx_ringN()
1818 spin_lock(&cp->tx_lock[ring]); in cas_tx_ringN()
1819 txds = cp->init_txds[ring]; in cas_tx_ringN()
1820 skbs = cp->tx_skbs[ring]; in cas_tx_ringN()
1821 entry = cp->tx_old[ring]; in cas_tx_ringN()
1838 + cp->tx_tiny_use[ring][entry].nbufs + 1; in cas_tx_ringN()
1842 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, in cas_tx_ringN()
1846 cp->tx_tiny_use[ring][entry].nbufs = 0; in cas_tx_ringN()
1854 dma_unmap_page(&cp->pdev->dev, daddr, dlen, in cas_tx_ringN()
1859 if (cp->tx_tiny_use[ring][entry].used) { in cas_tx_ringN()
1860 cp->tx_tiny_use[ring][entry].used = 0; in cas_tx_ringN()
1865 spin_lock(&cp->stat_lock[ring]); in cas_tx_ringN()
1866 cp->net_stats[ring].tx_packets++; in cas_tx_ringN()
1867 cp->net_stats[ring].tx_bytes += skb->len; in cas_tx_ringN()
1868 spin_unlock(&cp->stat_lock[ring]); in cas_tx_ringN()
1871 cp->tx_old[ring] = entry; in cas_tx_ringN()
1878 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) in cas_tx_ringN()
1880 spin_unlock(&cp->tx_lock[ring]); in cas_tx_ringN()
1883 static void cas_tx(struct net_device *dev, struct cas *cp, in cas_tx() argument
1888 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); in cas_tx()
1890 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_tx()
1901 limit = readl(cp->regs + REG_TX_COMPN(ring)); in cas_tx()
1903 if (cp->tx_old[ring] != limit) in cas_tx()
1904 cas_tx_ringN(cp, ring, limit); in cas_tx()
1909 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, in cas_rx_process_pkt() argument
1930 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); in cas_rx_process_pkt()
1941 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
1947 i += cp->crc_size; in cas_rx_process_pkt()
1948 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
1951 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
1965 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
1968 hlen = min(cp->page_size - off, dlen); in cas_rx_process_pkt()
1970 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_rx_process_pkt()
1977 i += cp->crc_size; in cas_rx_process_pkt()
1978 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
1986 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
1991 RX_USED_ADD(page, cp->mtu_stride); in cas_rx_process_pkt()
2011 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2012 dma_sync_single_for_cpu(&cp->pdev->dev, in cas_rx_process_pkt()
2014 hlen + cp->crc_size, in cas_rx_process_pkt()
2016 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2018 hlen + cp->crc_size, in cas_rx_process_pkt()
2028 RX_USED_ADD(page, hlen + cp->crc_size); in cas_rx_process_pkt()
2031 if (cp->crc_size) in cas_rx_process_pkt()
2040 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2042 hlen = min(cp->page_size - off, dlen); in cas_rx_process_pkt()
2044 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, in cas_rx_process_pkt()
2051 i += cp->crc_size; in cas_rx_process_pkt()
2052 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, in cas_rx_process_pkt()
2055 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2059 RX_USED_ADD(page, cp->mtu_stride); in cas_rx_process_pkt()
2067 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; in cas_rx_process_pkt()
2068 dma_sync_single_for_cpu(&cp->pdev->dev, in cas_rx_process_pkt()
2070 dlen + cp->crc_size, in cas_rx_process_pkt()
2072 memcpy(p, page_address(page->buffer), dlen + cp->crc_size); in cas_rx_process_pkt()
2073 dma_sync_single_for_device(&cp->pdev->dev, in cas_rx_process_pkt()
2075 dlen + cp->crc_size, in cas_rx_process_pkt()
2077 RX_USED_ADD(page, dlen + cp->crc_size); in cas_rx_process_pkt()
2080 if (cp->crc_size) in cas_rx_process_pkt()
2087 if (cp->crc_size) { in cas_rx_process_pkt()
2089 csum = csum_fold(csum_partial(crcaddr, cp->crc_size, in cas_rx_process_pkt()
2092 skb->protocol = eth_type_trans(skb, cp->dev); in cas_rx_process_pkt()
2116 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, in cas_rx_flow_pkt() argument
2120 struct sk_buff_head *flow = &cp->rx_flows[flowid]; in cas_rx_flow_pkt()
2137 static void cas_post_page(struct cas *cp, const int ring, const int index) in cas_post_page() argument
2142 entry = cp->rx_old[ring]; in cas_post_page()
2144 new = cas_page_swap(cp, ring, index); in cas_post_page()
2145 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); in cas_post_page()
2146 cp->init_rxds[ring][entry].index = in cas_post_page()
2151 cp->rx_old[ring] = entry; in cas_post_page()
2157 writel(entry, cp->regs + REG_RX_KICK); in cas_post_page()
2159 (cp->cas_flags & CAS_FLAG_REG_PLUS)) in cas_post_page()
2160 writel(entry, cp->regs + REG_PLUS_RX_KICK1); in cas_post_page()
2165 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) in cas_post_rxds_ringN() argument
2169 cas_page_t **page = cp->rx_pages[ring]; in cas_post_rxds_ringN()
2171 entry = cp->rx_old[ring]; in cas_post_rxds_ringN()
2173 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_post_rxds_ringN()
2183 cas_page_t *new = cas_page_dequeue(cp); in cas_post_rxds_ringN()
2188 cp->cas_flags |= CAS_FLAG_RXD_POST(ring); in cas_post_rxds_ringN()
2189 if (!timer_pending(&cp->link_timer)) in cas_post_rxds_ringN()
2190 mod_timer(&cp->link_timer, jiffies + in cas_post_rxds_ringN()
2192 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2193 cp->rx_last[ring] = num ? num - released : 0; in cas_post_rxds_ringN()
2196 spin_lock(&cp->rx_inuse_lock); in cas_post_rxds_ringN()
2197 list_add(&page[entry]->list, &cp->rx_inuse_list); in cas_post_rxds_ringN()
2198 spin_unlock(&cp->rx_inuse_lock); in cas_post_rxds_ringN()
2199 cp->init_rxds[ring][entry].buffer = in cas_post_rxds_ringN()
2212 cp->rx_old[ring] = entry; in cas_post_rxds_ringN()
2218 writel(cluster, cp->regs + REG_RX_KICK); in cas_post_rxds_ringN()
2220 (cp->cas_flags & CAS_FLAG_REG_PLUS)) in cas_post_rxds_ringN()
2221 writel(cluster, cp->regs + REG_PLUS_RX_KICK1); in cas_post_rxds_ringN()
2238 static int cas_rx_ringN(struct cas *cp, int ring, int budget) in cas_rx_ringN() argument
2240 struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; in cas_rx_ringN()
2244 netif_printk(cp, intr, KERN_DEBUG, cp->dev, in cas_rx_ringN()
2247 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); in cas_rx_ringN()
2249 entry = cp->rx_new[ring]; in cas_rx_ringN()
2275 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2276 cp->net_stats[ring].rx_errors++; in cas_rx_ringN()
2278 cp->net_stats[ring].rx_length_errors++; in cas_rx_ringN()
2280 cp->net_stats[ring].rx_crc_errors++; in cas_rx_ringN()
2281 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2285 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2286 ++cp->net_stats[ring].rx_dropped; in cas_rx_ringN()
2287 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2291 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); in cas_rx_ringN()
2304 cas_rx_flow_pkt(cp, words, skb); in cas_rx_ringN()
2307 spin_lock(&cp->stat_lock[ring]); in cas_rx_ringN()
2308 cp->net_stats[ring].rx_packets++; in cas_rx_ringN()
2309 cp->net_stats[ring].rx_bytes += len; in cas_rx_ringN()
2310 spin_unlock(&cp->stat_lock[ring]); in cas_rx_ringN()
2320 cas_post_page(cp, dring, i); in cas_rx_ringN()
2327 cas_post_page(cp, dring, i); in cas_rx_ringN()
2334 cas_post_page(cp, dring, i); in cas_rx_ringN()
2345 cp->rx_new[ring] = entry; in cas_rx_ringN()
2348 netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); in cas_rx_ringN()
2355 struct cas *cp, int ring) in cas_post_rxcs_ringN() argument
2357 struct cas_rx_comp *rxc = cp->init_rxcs[ring]; in cas_post_rxcs_ringN()
2360 last = cp->rx_cur[ring]; in cas_post_rxcs_ringN()
2361 entry = cp->rx_new[ring]; in cas_post_rxcs_ringN()
2362 netif_printk(cp, intr, KERN_DEBUG, dev, in cas_post_rxcs_ringN()
2364 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); in cas_post_rxcs_ringN()
2371 cp->rx_cur[ring] = last; in cas_post_rxcs_ringN()
2374 writel(last, cp->regs + REG_RX_COMP_TAIL); in cas_post_rxcs_ringN()
2375 else if (cp->cas_flags & CAS_FLAG_REG_PLUS) in cas_post_rxcs_ringN()
2376 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); in cas_post_rxcs_ringN()
2386 struct cas *cp, const u32 status, in cas_handle_irqN() argument
2390 cas_post_rxcs_ringN(dev, cp, ring); in cas_handle_irqN()
2396 struct cas *cp = netdev_priv(dev); in cas_interruptN() local
2398 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; in cas_interruptN()
2399 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); in cas_interruptN()
2405 spin_lock_irqsave(&cp->lock, flags); in cas_interruptN()
2408 cas_mask_intr(cp); in cas_interruptN()
2409 napi_schedule(&cp->napi); in cas_interruptN()
2411 cas_rx_ringN(cp, ring, 0); in cas_interruptN()
2417 cas_handle_irqN(dev, cp, status, ring); in cas_interruptN()
2418 spin_unlock_irqrestore(&cp->lock, flags); in cas_interruptN()
2425 static inline void cas_handle_irq1(struct cas *cp, const u32 status) in cas_handle_irq1() argument
2430 cas_post_rxds_ringN(cp, 1, 0); in cas_handle_irq1()
2431 spin_lock(&cp->stat_lock[1]); in cas_handle_irq1()
2432 cp->net_stats[1].rx_dropped++; in cas_handle_irq1()
2433 spin_unlock(&cp->stat_lock[1]); in cas_handle_irq1()
2437 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - in cas_handle_irq1()
2441 cas_post_rxcs_ringN(cp, 1); in cas_handle_irq1()
2448 struct cas *cp = netdev_priv(dev); in cas_interrupt1() local
2450 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); in cas_interrupt1()
2456 spin_lock_irqsave(&cp->lock, flags); in cas_interrupt1()
2459 cas_mask_intr(cp); in cas_interrupt1()
2460 napi_schedule(&cp->napi); in cas_interrupt1()
2462 cas_rx_ringN(cp, 1, 0); in cas_interrupt1()
2467 cas_handle_irq1(cp, status); in cas_interrupt1()
2468 spin_unlock_irqrestore(&cp->lock, flags); in cas_interrupt1()
2474 struct cas *cp, const u32 status) in cas_handle_irq() argument
2478 cas_abnormal_irq(dev, cp, status); in cas_handle_irq()
2484 cas_post_rxds_ringN(cp, 0, 0); in cas_handle_irq()
2485 spin_lock(&cp->stat_lock[0]); in cas_handle_irq()
2486 cp->net_stats[0].rx_dropped++; in cas_handle_irq()
2487 spin_unlock(&cp->stat_lock[0]); in cas_handle_irq()
2489 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - in cas_handle_irq()
2494 cas_post_rxcs_ringN(dev, cp, 0); in cas_handle_irq()
2500 struct cas *cp = netdev_priv(dev); in cas_interrupt() local
2502 u32 status = readl(cp->regs + REG_INTR_STATUS); in cas_interrupt()
2507 spin_lock_irqsave(&cp->lock, flags); in cas_interrupt()
2509 cas_tx(dev, cp, status); in cas_interrupt()
2515 cas_mask_intr(cp); in cas_interrupt()
2516 napi_schedule(&cp->napi); in cas_interrupt()
2518 cas_rx_ringN(cp, 0, 0); in cas_interrupt()
2524 cas_handle_irq(dev, cp, status); in cas_interrupt()
2525 spin_unlock_irqrestore(&cp->lock, flags); in cas_interrupt()
2533 struct cas *cp = container_of(napi, struct cas, napi); in cas_poll() local
2534 struct net_device *dev = cp->dev; in cas_poll()
2536 u32 status = readl(cp->regs + REG_INTR_STATUS); in cas_poll()
2539 spin_lock_irqsave(&cp->lock, flags); in cas_poll()
2540 cas_tx(dev, cp, status); in cas_poll()
2541 spin_unlock_irqrestore(&cp->lock, flags); in cas_poll()
2555 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); in cas_poll()
2565 spin_lock_irqsave(&cp->lock, flags); in cas_poll()
2567 cas_handle_irq(dev, cp, status); in cas_poll()
2571 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); in cas_poll()
2573 cas_handle_irq1(dev, cp, status); in cas_poll()
2579 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); in cas_poll()
2581 cas_handle_irqN(dev, cp, status, 2); in cas_poll()
2587 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); in cas_poll()
2589 cas_handle_irqN(dev, cp, status, 3); in cas_poll()
2592 spin_unlock_irqrestore(&cp->lock, flags); in cas_poll()
2595 cas_unmask_intr(cp); in cas_poll()
2604 struct cas *cp = netdev_priv(dev); in cas_netpoll() local
2606 cas_disable_irq(cp, 0); in cas_netpoll()
2607 cas_interrupt(cp->pdev->irq, dev); in cas_netpoll()
2608 cas_enable_irq(cp, 0); in cas_netpoll()
2630 struct cas *cp = netdev_priv(dev); in cas_tx_timeout() local
2633 if (!cp->hw_running) { in cas_tx_timeout()
2639 readl(cp->regs + REG_MIF_STATE_MACHINE)); in cas_tx_timeout()
2642 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_tx_timeout()
2645 readl(cp->regs + REG_TX_CFG), in cas_tx_timeout()
2646 readl(cp->regs + REG_MAC_TX_STATUS), in cas_tx_timeout()
2647 readl(cp->regs + REG_MAC_TX_CFG), in cas_tx_timeout()
2648 readl(cp->regs + REG_TX_FIFO_PKT_CNT), in cas_tx_timeout()
2649 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), in cas_tx_timeout()
2650 readl(cp->regs + REG_TX_FIFO_READ_PTR), in cas_tx_timeout()
2651 readl(cp->regs + REG_TX_SM_1), in cas_tx_timeout()
2652 readl(cp->regs + REG_TX_SM_2)); in cas_tx_timeout()
2655 readl(cp->regs + REG_RX_CFG), in cas_tx_timeout()
2656 readl(cp->regs + REG_MAC_RX_STATUS), in cas_tx_timeout()
2657 readl(cp->regs + REG_MAC_RX_CFG)); in cas_tx_timeout()
2660 readl(cp->regs + REG_HP_STATE_MACHINE), in cas_tx_timeout()
2661 readl(cp->regs + REG_HP_STATUS0), in cas_tx_timeout()
2662 readl(cp->regs + REG_HP_STATUS1), in cas_tx_timeout()
2663 readl(cp->regs + REG_HP_STATUS2)); in cas_tx_timeout()
2666 atomic_inc(&cp->reset_task_pending); in cas_tx_timeout()
2667 atomic_inc(&cp->reset_task_pending_all); in cas_tx_timeout()
2668 schedule_work(&cp->reset_task); in cas_tx_timeout()
2670 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_tx_timeout()
2671 schedule_work(&cp->reset_task); in cas_tx_timeout()
2684 static void cas_write_txd(struct cas *cp, int ring, int entry, in cas_write_txd() argument
2687 struct cas_tx_desc *txd = cp->init_txds[ring] + entry; in cas_write_txd()
2698 static inline void *tx_tiny_buf(struct cas *cp, const int ring, in tx_tiny_buf() argument
2701 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_buf()
2704 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, in tx_tiny_map() argument
2707 cp->tx_tiny_use[ring][tentry].nbufs++; in tx_tiny_map()
2708 cp->tx_tiny_use[ring][entry].used = 1; in tx_tiny_map()
2709 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; in tx_tiny_map()
2712 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, in cas_xmit_tx_ringN() argument
2715 struct net_device *dev = cp->dev; in cas_xmit_tx_ringN()
2722 spin_lock_irqsave(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2725 if (TX_BUFFS_AVAIL(cp, ring) <= in cas_xmit_tx_ringN()
2726 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { in cas_xmit_tx_ringN()
2728 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2743 entry = cp->tx_new[ring]; in cas_xmit_tx_ringN()
2744 cp->tx_skbs[ring][entry] = skb; in cas_xmit_tx_ringN()
2748 mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), in cas_xmit_tx_ringN()
2752 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); in cas_xmit_tx_ringN()
2755 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2760 tx_tiny_buf(cp, ring, entry), tabort); in cas_xmit_tx_ringN()
2761 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2762 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, in cas_xmit_tx_ringN()
2765 cas_write_txd(cp, ring, entry, mapping, len, ctrl | in cas_xmit_tx_ringN()
2774 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, in cas_xmit_tx_ringN()
2777 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len); in cas_xmit_tx_ringN()
2780 cas_write_txd(cp, ring, entry, mapping, len - tabort, in cas_xmit_tx_ringN()
2783 memcpy_from_page(tx_tiny_buf(cp, ring, entry), in cas_xmit_tx_ringN()
2787 mapping = tx_tiny_map(cp, ring, entry, tentry); in cas_xmit_tx_ringN()
2791 cas_write_txd(cp, ring, entry, mapping, len, ctrl, in cas_xmit_tx_ringN()
2796 cp->tx_new[ring] = entry; in cas_xmit_tx_ringN()
2797 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) in cas_xmit_tx_ringN()
2800 netif_printk(cp, tx_queued, KERN_DEBUG, dev, in cas_xmit_tx_ringN()
2802 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); in cas_xmit_tx_ringN()
2803 writel(entry, cp->regs + REG_TX_KICKN(ring)); in cas_xmit_tx_ringN()
2804 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); in cas_xmit_tx_ringN()
2810 struct cas *cp = netdev_priv(dev); in cas_start_xmit() local
2817 if (skb_padto(skb, cp->min_frame_size)) in cas_start_xmit()
2823 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) in cas_start_xmit()
2828 static void cas_init_tx_dma(struct cas *cp) in cas_init_tx_dma() argument
2830 u64 desc_dma = cp->block_dvma; in cas_init_tx_dma()
2838 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); in cas_init_tx_dma()
2839 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); in cas_init_tx_dma()
2852 off = (unsigned long) cp->init_txds[i] - in cas_init_tx_dma()
2853 (unsigned long) cp->init_block; in cas_init_tx_dma()
2856 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); in cas_init_tx_dma()
2857 writel((desc_dma + off) & 0xffffffff, cp->regs + in cas_init_tx_dma()
2863 writel(val, cp->regs + REG_TX_CFG); in cas_init_tx_dma()
2869 writel(0x800, cp->regs + REG_TX_MAXBURST_0); in cas_init_tx_dma()
2870 writel(0x1600, cp->regs + REG_TX_MAXBURST_1); in cas_init_tx_dma()
2871 writel(0x2400, cp->regs + REG_TX_MAXBURST_2); in cas_init_tx_dma()
2872 writel(0x4800, cp->regs + REG_TX_MAXBURST_3); in cas_init_tx_dma()
2874 writel(0x800, cp->regs + REG_TX_MAXBURST_0); in cas_init_tx_dma()
2875 writel(0x800, cp->regs + REG_TX_MAXBURST_1); in cas_init_tx_dma()
2876 writel(0x800, cp->regs + REG_TX_MAXBURST_2); in cas_init_tx_dma()
2877 writel(0x800, cp->regs + REG_TX_MAXBURST_3); in cas_init_tx_dma()
2881 /* Must be invoked under cp->lock. */
2882 static inline void cas_init_dma(struct cas *cp) in cas_init_dma() argument
2884 cas_init_tx_dma(cp); in cas_init_dma()
2885 cas_init_rx_dma(cp); in cas_init_dma()
2888 static void cas_process_mc_list(struct cas *cp) in cas_process_mc_list() argument
2896 netdev_for_each_mc_addr(ha, cp->dev) { in cas_process_mc_list()
2902 cp->regs + REG_MAC_ADDRN(i*3 + 0)); in cas_process_mc_list()
2904 cp->regs + REG_MAC_ADDRN(i*3 + 1)); in cas_process_mc_list()
2906 cp->regs + REG_MAC_ADDRN(i*3 + 2)); in cas_process_mc_list()
2919 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); in cas_process_mc_list()
2922 /* Must be invoked under cp->lock. */
2923 static u32 cas_setup_multicast(struct cas *cp) in cas_setup_multicast() argument
2928 if (cp->dev->flags & IFF_PROMISC) { in cas_setup_multicast()
2931 } else if (cp->dev->flags & IFF_ALLMULTI) { in cas_setup_multicast()
2933 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); in cas_setup_multicast()
2937 cas_process_mc_list(cp); in cas_setup_multicast()
2944 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
2945 static void cas_clear_mac_err(struct cas *cp) in cas_clear_mac_err() argument
2947 writel(0, cp->regs + REG_MAC_COLL_NORMAL); in cas_clear_mac_err()
2948 writel(0, cp->regs + REG_MAC_COLL_FIRST); in cas_clear_mac_err()
2949 writel(0, cp->regs + REG_MAC_COLL_EXCESS); in cas_clear_mac_err()
2950 writel(0, cp->regs + REG_MAC_COLL_LATE); in cas_clear_mac_err()
2951 writel(0, cp->regs + REG_MAC_TIMER_DEFER); in cas_clear_mac_err()
2952 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); in cas_clear_mac_err()
2953 writel(0, cp->regs + REG_MAC_RECV_FRAME); in cas_clear_mac_err()
2954 writel(0, cp->regs + REG_MAC_LEN_ERR); in cas_clear_mac_err()
2955 writel(0, cp->regs + REG_MAC_ALIGN_ERR); in cas_clear_mac_err()
2956 writel(0, cp->regs + REG_MAC_FCS_ERR); in cas_clear_mac_err()
2957 writel(0, cp->regs + REG_MAC_RX_CODE_ERR); in cas_clear_mac_err()
2961 static void cas_mac_reset(struct cas *cp) in cas_mac_reset() argument
2966 writel(0x1, cp->regs + REG_MAC_TX_RESET); in cas_mac_reset()
2967 writel(0x1, cp->regs + REG_MAC_RX_RESET); in cas_mac_reset()
2972 if (readl(cp->regs + REG_MAC_TX_RESET) == 0) in cas_mac_reset()
2980 if (readl(cp->regs + REG_MAC_RX_RESET) == 0) in cas_mac_reset()
2985 if (readl(cp->regs + REG_MAC_TX_RESET) | in cas_mac_reset()
2986 readl(cp->regs + REG_MAC_RX_RESET)) in cas_mac_reset()
2987 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", in cas_mac_reset()
2988 readl(cp->regs + REG_MAC_TX_RESET), in cas_mac_reset()
2989 readl(cp->regs + REG_MAC_RX_RESET), in cas_mac_reset()
2990 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_mac_reset()
2994 /* Must be invoked under cp->lock. */
2995 static void cas_init_mac(struct cas *cp) in cas_init_mac() argument
2997 const unsigned char *e = &cp->dev->dev_addr[0]; in cas_init_mac()
2999 cas_mac_reset(cp); in cas_init_mac()
3002 writel(CAWR_RR_DIS, cp->regs + REG_CAWR); in cas_init_mac()
3008 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) in cas_init_mac()
3009 writel(INF_BURST_EN, cp->regs + REG_INF_BURST); in cas_init_mac()
3012 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); in cas_init_mac()
3014 writel(0x00, cp->regs + REG_MAC_IPG0); in cas_init_mac()
3015 writel(0x08, cp->regs + REG_MAC_IPG1); in cas_init_mac()
3016 writel(0x04, cp->regs + REG_MAC_IPG2); in cas_init_mac()
3019 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); in cas_init_mac()
3022 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); in cas_init_mac()
3031 cp->regs + REG_MAC_FRAMESIZE_MAX); in cas_init_mac()
3037 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) in cas_init_mac()
3038 writel(0x41, cp->regs + REG_MAC_PA_SIZE); in cas_init_mac()
3040 writel(0x07, cp->regs + REG_MAC_PA_SIZE); in cas_init_mac()
3041 writel(0x04, cp->regs + REG_MAC_JAM_SIZE); in cas_init_mac()
3042 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); in cas_init_mac()
3043 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); in cas_init_mac()
3045 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); in cas_init_mac()
3047 writel(0, cp->regs + REG_MAC_ADDR_FILTER0); in cas_init_mac()
3048 writel(0, cp->regs + REG_MAC_ADDR_FILTER1); in cas_init_mac()
3049 writel(0, cp->regs + REG_MAC_ADDR_FILTER2); in cas_init_mac()
3050 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); in cas_init_mac()
3051 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); in cas_init_mac()
3055 writel(0x0, cp->regs + REG_MAC_ADDRN(i)); in cas_init_mac()
3057 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); in cas_init_mac()
3058 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); in cas_init_mac()
3059 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); in cas_init_mac()
3061 writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); in cas_init_mac()
3062 writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); in cas_init_mac()
3063 writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); in cas_init_mac()
3065 cp->mac_rx_cfg = cas_setup_multicast(cp); in cas_init_mac()
3067 spin_lock(&cp->stat_lock[N_TX_RINGS]); in cas_init_mac()
3068 cas_clear_mac_err(cp); in cas_init_mac()
3069 spin_unlock(&cp->stat_lock[N_TX_RINGS]); in cas_init_mac()
3075 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); in cas_init_mac()
3076 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); in cas_init_mac()
3081 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); in cas_init_mac()
3084 /* Must be invoked under cp->lock. */
3085 static void cas_init_pause_thresholds(struct cas *cp) in cas_init_pause_thresholds() argument
3090 if (cp->rx_fifo_size <= (2 * 1024)) { in cas_init_pause_thresholds()
3091 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; in cas_init_pause_thresholds()
3093 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; in cas_init_pause_thresholds()
3094 if (max_frame * 3 > cp->rx_fifo_size) { in cas_init_pause_thresholds()
3095 cp->rx_pause_off = 7104; in cas_init_pause_thresholds()
3096 cp->rx_pause_on = 960; in cas_init_pause_thresholds()
3098 int off = (cp->rx_fifo_size - (max_frame * 2)); in cas_init_pause_thresholds()
3100 cp->rx_pause_off = off; in cas_init_pause_thresholds()
3101 cp->rx_pause_on = on; in cas_init_pause_thresholds()
3130 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, in cas_get_vpd_info() argument
3133 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; in cas_get_vpd_info()
3149 cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_get_vpd_info()
3254 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; in cas_get_vpd_info()
3295 addr = of_get_property(cp->of_node, "local-mac-address", NULL); in cas_get_vpd_info()
3310 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_get_vpd_info()
3315 static void cas_check_pci_invariants(struct cas *cp) in cas_check_pci_invariants() argument
3317 struct pci_dev *pdev = cp->pdev; in cas_check_pci_invariants()
3319 cp->cas_flags = 0; in cas_check_pci_invariants()
3323 cp->cas_flags |= CAS_FLAG_REG_PLUS; in cas_check_pci_invariants()
3325 cp->cas_flags |= CAS_FLAG_TARGET_ABORT; in cas_check_pci_invariants()
3331 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; in cas_check_pci_invariants()
3334 cp->cas_flags |= CAS_FLAG_REG_PLUS; in cas_check_pci_invariants()
3341 cp->cas_flags |= CAS_FLAG_SATURN; in cas_check_pci_invariants()
3346 static int cas_check_invariants(struct cas *cp) in cas_check_invariants() argument
3348 struct pci_dev *pdev = cp->pdev; in cas_check_invariants()
3354 cp->page_order = 0; in cas_check_invariants()
3363 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; in cas_check_invariants()
3369 cp->page_size = (PAGE_SIZE << cp->page_order); in cas_check_invariants()
3372 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; in cas_check_invariants()
3373 cp->rx_fifo_size = RX_FIFO_SIZE; in cas_check_invariants()
3378 cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn)); in cas_check_invariants()
3379 eth_hw_addr_set(cp->dev, addr); in cas_check_invariants()
3380 if (cp->phy_type & CAS_PHY_SERDES) { in cas_check_invariants()
3381 cp->cas_flags |= CAS_FLAG_1000MB_CAP; in cas_check_invariants()
3386 cfg = readl(cp->regs + REG_MIF_CFG); in cas_check_invariants()
3388 cp->phy_type = CAS_PHY_MII_MDIO1; in cas_check_invariants()
3390 cp->phy_type = CAS_PHY_MII_MDIO0; in cas_check_invariants()
3393 cas_mif_poll(cp, 0); in cas_check_invariants()
3394 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); in cas_check_invariants()
3401 cp->phy_addr = i; in cas_check_invariants()
3402 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; in cas_check_invariants()
3403 phy_id |= cas_phy_read(cp, MII_PHYSID2); in cas_check_invariants()
3405 cp->phy_id = phy_id; in cas_check_invariants()
3411 readl(cp->regs + REG_MIF_STATE_MACHINE)); in cas_check_invariants()
3416 cfg = cas_phy_read(cp, MII_BMSR); in cas_check_invariants()
3418 cas_phy_read(cp, CAS_MII_1000_EXTEND)) in cas_check_invariants()
3419 cp->cas_flags |= CAS_FLAG_1000MB_CAP; in cas_check_invariants()
3423 /* Must be invoked under cp->lock. */
3424 static inline void cas_start_dma(struct cas *cp) in cas_start_dma() argument
3431 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; in cas_start_dma()
3432 writel(val, cp->regs + REG_TX_CFG); in cas_start_dma()
3433 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; in cas_start_dma()
3434 writel(val, cp->regs + REG_RX_CFG); in cas_start_dma()
3437 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; in cas_start_dma()
3438 writel(val, cp->regs + REG_MAC_TX_CFG); in cas_start_dma()
3439 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; in cas_start_dma()
3440 writel(val, cp->regs + REG_MAC_RX_CFG); in cas_start_dma()
3444 val = readl(cp->regs + REG_MAC_TX_CFG); in cas_start_dma()
3452 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_start_dma()
3455 netdev_err(cp->dev, in cas_start_dma()
3457 readl(cp->regs + REG_MIF_STATE_MACHINE), in cas_start_dma()
3458 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_start_dma()
3464 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", in cas_start_dma()
3466 readl(cp->regs + REG_MIF_STATE_MACHINE), in cas_start_dma()
3467 readl(cp->regs + REG_MAC_STATE_MACHINE)); in cas_start_dma()
3470 cas_unmask_intr(cp); /* enable interrupts */ in cas_start_dma()
3471 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); in cas_start_dma()
3472 writel(0, cp->regs + REG_RX_COMP_TAIL); in cas_start_dma()
3474 if (cp->cas_flags & CAS_FLAG_REG_PLUS) { in cas_start_dma()
3477 cp->regs + REG_PLUS_RX_KICK1); in cas_start_dma()
3481 /* Must be invoked under cp->lock. */
3482 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, in cas_read_pcs_link_mode() argument
3485 u32 val = readl(cp->regs + REG_PCS_MII_LPA); in cas_read_pcs_link_mode()
3493 /* Must be invoked under cp->lock. */
3494 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, in cas_read_mii_link_mode() argument
3504 val = cas_phy_read(cp, MII_LPA); in cas_read_mii_link_mode()
3516 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_read_mii_link_mode()
3517 val = cas_phy_read(cp, CAS_MII_1000_STATUS); in cas_read_mii_link_mode()
3528 * Must be invoked under cp->lock.
3530 static void cas_set_link_modes(struct cas *cp) in cas_set_link_modes() argument
3539 if (CAS_PHY_MII(cp->phy_type)) { in cas_set_link_modes()
3540 cas_mif_poll(cp, 0); in cas_set_link_modes()
3541 val = cas_phy_read(cp, MII_BMCR); in cas_set_link_modes()
3543 cas_read_mii_link_mode(cp, &full_duplex, &speed, in cas_set_link_modes()
3552 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? in cas_set_link_modes()
3555 cas_mif_poll(cp, 1); in cas_set_link_modes()
3558 val = readl(cp->regs + REG_PCS_MII_CTRL); in cas_set_link_modes()
3559 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); in cas_set_link_modes()
3566 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", in cas_set_link_modes()
3570 if (CAS_PHY_MII(cp->phy_type)) { in cas_set_link_modes()
3579 writel(val, cp->regs + REG_MAC_XIF_CFG); in cas_set_link_modes()
3601 cp->regs + REG_MAC_TX_CFG); in cas_set_link_modes()
3603 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3606 cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3608 writel(0x200, cp->regs + REG_MAC_SLOT_TIME); in cas_set_link_modes()
3610 cp->crc_size = 4; in cas_set_link_modes()
3612 cp->min_frame_size = CAS_1000MB_MIN_FRAME; in cas_set_link_modes()
3615 writel(val, cp->regs + REG_MAC_TX_CFG); in cas_set_link_modes()
3620 val = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3623 cp->crc_size = 0; in cas_set_link_modes()
3624 cp->min_frame_size = CAS_MIN_MTU; in cas_set_link_modes()
3627 cp->crc_size = 4; in cas_set_link_modes()
3628 cp->min_frame_size = CAS_MIN_FRAME; in cas_set_link_modes()
3631 cp->regs + REG_MAC_RX_CFG); in cas_set_link_modes()
3632 writel(0x40, cp->regs + REG_MAC_SLOT_TIME); in cas_set_link_modes()
3635 if (netif_msg_link(cp)) { in cas_set_link_modes()
3637 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", in cas_set_link_modes()
3638 cp->rx_fifo_size, in cas_set_link_modes()
3639 cp->rx_pause_off, in cas_set_link_modes()
3640 cp->rx_pause_on); in cas_set_link_modes()
3642 netdev_info(cp->dev, "TX pause enabled\n"); in cas_set_link_modes()
3644 netdev_info(cp->dev, "Pause is disabled\n"); in cas_set_link_modes()
3648 val = readl(cp->regs + REG_MAC_CTRL_CFG); in cas_set_link_modes()
3656 writel(val, cp->regs + REG_MAC_CTRL_CFG); in cas_set_link_modes()
3657 cas_start_dma(cp); in cas_set_link_modes()
3660 /* Must be invoked under cp->lock. */
3661 static void cas_init_hw(struct cas *cp, int restart_link) in cas_init_hw() argument
3664 cas_phy_init(cp); in cas_init_hw()
3666 cas_init_pause_thresholds(cp); in cas_init_hw()
3667 cas_init_mac(cp); in cas_init_hw()
3668 cas_init_dma(cp); in cas_init_hw()
3672 cp->timer_ticks = 0; in cas_init_hw()
3673 cas_begin_auto_negotiation(cp, NULL); in cas_init_hw()
3674 } else if (cp->lstate == link_up) { in cas_init_hw()
3675 cas_set_link_modes(cp); in cas_init_hw()
3676 netif_carrier_on(cp->dev); in cas_init_hw()
3680 /* Must be invoked under cp->lock. on earlier cassini boards,
3684 static void cas_hard_reset(struct cas *cp) in cas_hard_reset() argument
3686 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); in cas_hard_reset()
3688 pci_restore_state(cp->pdev); in cas_hard_reset()
3692 static void cas_global_reset(struct cas *cp, int blkflag) in cas_global_reset() argument
3697 if (blkflag && !CAS_PHY_MII(cp->phy_type)) { in cas_global_reset()
3705 cp->regs + REG_SW_RESET); in cas_global_reset()
3707 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); in cas_global_reset()
3715 u32 val = readl(cp->regs + REG_SW_RESET); in cas_global_reset()
3720 netdev_err(cp->dev, "sw reset failed\n"); in cas_global_reset()
3725 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); in cas_global_reset()
3733 PCI_ERR_BIM_DMA_READ), cp->regs + in cas_global_reset()
3739 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); in cas_global_reset()
3742 static void cas_reset(struct cas *cp, int blkflag) in cas_reset() argument
3746 cas_mask_intr(cp); in cas_reset()
3747 cas_global_reset(cp, blkflag); in cas_reset()
3748 cas_mac_reset(cp); in cas_reset()
3749 cas_entropy_reset(cp); in cas_reset()
3752 val = readl(cp->regs + REG_TX_CFG); in cas_reset()
3754 writel(val, cp->regs + REG_TX_CFG); in cas_reset()
3756 val = readl(cp->regs + REG_RX_CFG); in cas_reset()
3758 writel(val, cp->regs + REG_RX_CFG); in cas_reset()
3761 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || in cas_reset()
3763 cas_load_firmware(cp, CAS_HP_FIRMWARE); in cas_reset()
3765 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); in cas_reset()
3769 spin_lock(&cp->stat_lock[N_TX_RINGS]); in cas_reset()
3770 cas_clear_mac_err(cp); in cas_reset()
3771 spin_unlock(&cp->stat_lock[N_TX_RINGS]); in cas_reset()
3775 static void cas_shutdown(struct cas *cp) in cas_shutdown() argument
3780 cp->hw_running = 0; in cas_shutdown()
3782 del_timer_sync(&cp->link_timer); in cas_shutdown()
3786 while (atomic_read(&cp->reset_task_pending_mtu) || in cas_shutdown()
3787 atomic_read(&cp->reset_task_pending_spare) || in cas_shutdown()
3788 atomic_read(&cp->reset_task_pending_all)) in cas_shutdown()
3792 while (atomic_read(&cp->reset_task_pending)) in cas_shutdown()
3796 cas_lock_all_save(cp, flags); in cas_shutdown()
3797 cas_reset(cp, 0); in cas_shutdown()
3798 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_shutdown()
3799 cas_phy_powerdown(cp); in cas_shutdown()
3800 cas_unlock_all_restore(cp, flags); in cas_shutdown()
3805 struct cas *cp = netdev_priv(dev); in cas_change_mtu() local
3813 atomic_inc(&cp->reset_task_pending); in cas_change_mtu()
3814 if ((cp->phy_type & CAS_PHY_SERDES)) { in cas_change_mtu()
3815 atomic_inc(&cp->reset_task_pending_all); in cas_change_mtu()
3817 atomic_inc(&cp->reset_task_pending_mtu); in cas_change_mtu()
3819 schedule_work(&cp->reset_task); in cas_change_mtu()
3821 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? in cas_change_mtu()
3824 schedule_work(&cp->reset_task); in cas_change_mtu()
3827 flush_work(&cp->reset_task); in cas_change_mtu()
3831 static void cas_clean_txd(struct cas *cp, int ring) in cas_clean_txd() argument
3833 struct cas_tx_desc *txd = cp->init_txds[ring]; in cas_clean_txd()
3834 struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; in cas_clean_txd()
3857 dma_unmap_page(&cp->pdev->dev, daddr, dlen, in cas_clean_txd()
3867 if (cp->tx_tiny_use[ring][ent].used) in cas_clean_txd()
3875 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); in cas_clean_txd()
3879 static inline void cas_free_rx_desc(struct cas *cp, int ring) in cas_free_rx_desc() argument
3881 cas_page_t **page = cp->rx_pages[ring]; in cas_free_rx_desc()
3887 cas_page_free(cp, page[i]); in cas_free_rx_desc()
3893 static void cas_free_rxds(struct cas *cp) in cas_free_rxds() argument
3898 cas_free_rx_desc(cp, i); in cas_free_rxds()
3901 /* Must be invoked under cp->lock. */
3902 static void cas_clean_rings(struct cas *cp) in cas_clean_rings() argument
3907 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); in cas_clean_rings()
3908 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); in cas_clean_rings()
3910 cas_clean_txd(cp, i); in cas_clean_rings()
3913 memset(cp->init_block, 0, sizeof(struct cas_init_block)); in cas_clean_rings()
3914 cas_clean_rxds(cp); in cas_clean_rings()
3915 cas_clean_rxcs(cp); in cas_clean_rings()
3919 static inline int cas_alloc_rx_desc(struct cas *cp, int ring) in cas_alloc_rx_desc() argument
3921 cas_page_t **page = cp->rx_pages[ring]; in cas_alloc_rx_desc()
3926 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) in cas_alloc_rx_desc()
3932 static int cas_alloc_rxds(struct cas *cp) in cas_alloc_rxds() argument
3937 if (cas_alloc_rx_desc(cp, i) < 0) { in cas_alloc_rxds()
3938 cas_free_rxds(cp); in cas_alloc_rxds()
3947 struct cas *cp = container_of(work, struct cas, reset_task); in cas_reset_task() local
3949 int pending = atomic_read(&cp->reset_task_pending); in cas_reset_task()
3951 int pending_all = atomic_read(&cp->reset_task_pending_all); in cas_reset_task()
3952 int pending_spare = atomic_read(&cp->reset_task_pending_spare); in cas_reset_task()
3953 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); in cas_reset_task()
3959 atomic_dec(&cp->reset_task_pending); in cas_reset_task()
3967 if (cp->hw_running) { in cas_reset_task()
3971 netif_device_detach(cp->dev); in cas_reset_task()
3972 cas_lock_all_save(cp, flags); in cas_reset_task()
3974 if (cp->opened) { in cas_reset_task()
3979 cas_spare_recover(cp, GFP_ATOMIC); in cas_reset_task()
3997 cas_reset(cp, !(pending_all > 0)); in cas_reset_task()
3998 if (cp->opened) in cas_reset_task()
3999 cas_clean_rings(cp); in cas_reset_task()
4000 cas_init_hw(cp, (pending_all > 0)); in cas_reset_task()
4002 cas_reset(cp, !(pending == CAS_RESET_ALL)); in cas_reset_task()
4003 if (cp->opened) in cas_reset_task()
4004 cas_clean_rings(cp); in cas_reset_task()
4005 cas_init_hw(cp, pending == CAS_RESET_ALL); in cas_reset_task()
4009 cas_unlock_all_restore(cp, flags); in cas_reset_task()
4010 netif_device_attach(cp->dev); in cas_reset_task()
4013 atomic_sub(pending_all, &cp->reset_task_pending_all); in cas_reset_task()
4014 atomic_sub(pending_spare, &cp->reset_task_pending_spare); in cas_reset_task()
4015 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); in cas_reset_task()
4016 atomic_dec(&cp->reset_task_pending); in cas_reset_task()
4018 atomic_set(&cp->reset_task_pending, 0); in cas_reset_task()
4024 struct cas *cp = from_timer(cp, t, link_timer); in cas_link_timer() local
4029 cp->link_transition_jiffies_valid && in cas_link_timer()
4030 time_is_before_jiffies(cp->link_transition_jiffies + in cas_link_timer()
4036 cp->link_transition_jiffies_valid = 0; in cas_link_timer()
4039 if (!cp->hw_running) in cas_link_timer()
4042 spin_lock_irqsave(&cp->lock, flags); in cas_link_timer()
4043 cas_lock_tx(cp); in cas_link_timer()
4044 cas_entropy_gather(cp); in cas_link_timer()
4050 if (atomic_read(&cp->reset_task_pending_all) || in cas_link_timer()
4051 atomic_read(&cp->reset_task_pending_spare) || in cas_link_timer()
4052 atomic_read(&cp->reset_task_pending_mtu)) in cas_link_timer()
4055 if (atomic_read(&cp->reset_task_pending)) in cas_link_timer()
4060 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { in cas_link_timer()
4069 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { in cas_link_timer()
4073 cp->cas_flags &= ~rmask; in cas_link_timer()
4077 if (CAS_PHY_MII(cp->phy_type)) { in cas_link_timer()
4079 cas_mif_poll(cp, 0); in cas_link_timer()
4080 bmsr = cas_phy_read(cp, MII_BMSR); in cas_link_timer()
4086 bmsr = cas_phy_read(cp, MII_BMSR); in cas_link_timer()
4087 cas_mif_poll(cp, 1); in cas_link_timer()
4088 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ in cas_link_timer()
4089 reset = cas_mii_link_check(cp, bmsr); in cas_link_timer()
4091 reset = cas_pcs_link_check(cp); in cas_link_timer()
4098 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { in cas_link_timer()
4099 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); in cas_link_timer()
4105 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, in cas_link_timer()
4111 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); in cas_link_timer()
4112 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); in cas_link_timer()
4113 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); in cas_link_timer()
4115 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, in cas_link_timer()
4122 cas_hard_reset(cp); in cas_link_timer()
4128 atomic_inc(&cp->reset_task_pending); in cas_link_timer()
4129 atomic_inc(&cp->reset_task_pending_all); in cas_link_timer()
4130 schedule_work(&cp->reset_task); in cas_link_timer()
4132 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); in cas_link_timer()
4134 schedule_work(&cp->reset_task); in cas_link_timer()
4139 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); in cas_link_timer()
4140 cas_unlock_tx(cp); in cas_link_timer()
4141 spin_unlock_irqrestore(&cp->lock, flags); in cas_link_timer()
4147 static void cas_tx_tiny_free(struct cas *cp) in cas_tx_tiny_free() argument
4149 struct pci_dev *pdev = cp->pdev; in cas_tx_tiny_free()
4153 if (!cp->tx_tiny_bufs[i]) in cas_tx_tiny_free()
4157 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); in cas_tx_tiny_free()
4158 cp->tx_tiny_bufs[i] = NULL; in cas_tx_tiny_free()
4162 static int cas_tx_tiny_alloc(struct cas *cp) in cas_tx_tiny_alloc() argument
4164 struct pci_dev *pdev = cp->pdev; in cas_tx_tiny_alloc()
4168 cp->tx_tiny_bufs[i] = in cas_tx_tiny_alloc()
4170 &cp->tx_tiny_dvma[i], GFP_KERNEL); in cas_tx_tiny_alloc()
4171 if (!cp->tx_tiny_bufs[i]) { in cas_tx_tiny_alloc()
4172 cas_tx_tiny_free(cp); in cas_tx_tiny_alloc()
4182 struct cas *cp = netdev_priv(dev); in cas_open() local
4186 mutex_lock(&cp->pm_mutex); in cas_open()
4188 hw_was_up = cp->hw_running; in cas_open()
4191 * etc. state so it is safe to do this bit without cp->lock in cas_open()
4193 if (!cp->hw_running) { in cas_open()
4195 cas_lock_all_save(cp, flags); in cas_open()
4201 cas_reset(cp, 0); in cas_open()
4202 cp->hw_running = 1; in cas_open()
4203 cas_unlock_all_restore(cp, flags); in cas_open()
4207 if (cas_tx_tiny_alloc(cp) < 0) in cas_open()
4211 if (cas_alloc_rxds(cp) < 0) in cas_open()
4215 cas_spare_init(cp); in cas_open()
4216 cas_spare_recover(cp, GFP_KERNEL); in cas_open()
4223 if (request_irq(cp->pdev->irq, cas_interrupt, in cas_open()
4225 netdev_err(cp->dev, "failed to request irq !\n"); in cas_open()
4231 napi_enable(&cp->napi); in cas_open()
4234 cas_lock_all_save(cp, flags); in cas_open()
4235 cas_clean_rings(cp); in cas_open()
4236 cas_init_hw(cp, !hw_was_up); in cas_open()
4237 cp->opened = 1; in cas_open()
4238 cas_unlock_all_restore(cp, flags); in cas_open()
4241 mutex_unlock(&cp->pm_mutex); in cas_open()
4245 cas_spare_free(cp); in cas_open()
4246 cas_free_rxds(cp); in cas_open()
4248 cas_tx_tiny_free(cp); in cas_open()
4250 mutex_unlock(&cp->pm_mutex); in cas_open()
4257 struct cas *cp = netdev_priv(dev); in cas_close() local
4260 napi_disable(&cp->napi); in cas_close()
4263 mutex_lock(&cp->pm_mutex); in cas_close()
4268 cas_lock_all_save(cp, flags); in cas_close()
4269 cp->opened = 0; in cas_close()
4270 cas_reset(cp, 0); in cas_close()
4271 cas_phy_init(cp); in cas_close()
4272 cas_begin_auto_negotiation(cp, NULL); in cas_close()
4273 cas_clean_rings(cp); in cas_close()
4274 cas_unlock_all_restore(cp, flags); in cas_close()
4276 free_irq(cp->pdev->irq, (void *) dev); in cas_close()
4277 cas_spare_free(cp); in cas_close()
4278 cas_free_rxds(cp); in cas_close()
4279 cas_tx_tiny_free(cp); in cas_close()
4280 mutex_unlock(&cp->pm_mutex); in cas_close()
4331 static void cas_read_regs(struct cas *cp, u8 *ptr, int len) in cas_read_regs() argument
4337 spin_lock_irqsave(&cp->lock, flags); in cas_read_regs()
4342 hval = cas_phy_read(cp, in cas_read_regs()
4346 val= readl(cp->regs+ethtool_register_table[i].offsets); in cas_read_regs()
4350 spin_unlock_irqrestore(&cp->lock, flags); in cas_read_regs()
4355 struct cas *cp = netdev_priv(dev); in cas_get_stats() local
4356 struct net_device_stats *stats = cp->net_stats; in cas_get_stats()
4362 if (!cp->hw_running) in cas_get_stats()
4373 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); in cas_get_stats()
4375 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; in cas_get_stats()
4377 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; in cas_get_stats()
4379 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; in cas_get_stats()
4381 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + in cas_get_stats()
4382 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); in cas_get_stats()
4385 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); in cas_get_stats()
4388 readl(cp->regs + REG_MAC_COLL_EXCESS); in cas_get_stats()
4389 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + in cas_get_stats()
4390 readl(cp->regs + REG_MAC_COLL_LATE); in cas_get_stats()
4392 cas_clear_mac_err(cp); in cas_get_stats()
4395 spin_lock(&cp->stat_lock[0]); in cas_get_stats()
4402 spin_unlock(&cp->stat_lock[0]); in cas_get_stats()
4405 spin_lock(&cp->stat_lock[i]); in cas_get_stats()
4418 spin_unlock(&cp->stat_lock[i]); in cas_get_stats()
4420 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); in cas_get_stats()
4427 struct cas *cp = netdev_priv(dev); in cas_set_multicast() local
4432 if (!cp->hw_running) in cas_set_multicast()
4435 spin_lock_irqsave(&cp->lock, flags); in cas_set_multicast()
4436 rxcfg = readl(cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4439 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4440 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { in cas_set_multicast()
4449 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4450 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { in cas_set_multicast()
4457 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); in cas_set_multicast()
4459 writel(rxcfg, cp->regs + REG_MAC_RX_CFG); in cas_set_multicast()
4460 spin_unlock_irqrestore(&cp->lock, flags); in cas_set_multicast()
4465 struct cas *cp = netdev_priv(dev); in cas_get_drvinfo() local
4468 strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cas_get_drvinfo()
4474 struct cas *cp = netdev_priv(dev); in cas_get_link_ksettings() local
4483 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { in cas_get_link_ksettings()
4489 spin_lock_irqsave(&cp->lock, flags); in cas_get_link_ksettings()
4491 linkstate = cp->lstate; in cas_get_link_ksettings()
4492 if (CAS_PHY_MII(cp->phy_type)) { in cas_get_link_ksettings()
4494 cmd->base.phy_address = cp->phy_addr; in cas_get_link_ksettings()
4508 if (cp->hw_running) { in cas_get_link_ksettings()
4509 cas_mif_poll(cp, 0); in cas_get_link_ksettings()
4510 bmcr = cas_phy_read(cp, MII_BMCR); in cas_get_link_ksettings()
4511 cas_read_mii_link_mode(cp, &full_duplex, in cas_get_link_ksettings()
4513 cas_mif_poll(cp, 1); in cas_get_link_ksettings()
4522 if (cp->hw_running) { in cas_get_link_ksettings()
4524 bmcr = readl(cp->regs + REG_PCS_MII_CTRL); in cas_get_link_ksettings()
4525 cas_read_pcs_link_mode(cp, &full_duplex, in cas_get_link_ksettings()
4529 spin_unlock_irqrestore(&cp->lock, flags); in cas_get_link_ksettings()
4559 if (cp->link_cntl & BMCR_ANENABLE) { in cas_get_link_ksettings()
4564 if (cp->link_cntl & BMCR_SPEED100) { in cas_get_link_ksettings()
4566 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { in cas_get_link_ksettings()
4569 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ? in cas_get_link_ksettings()
4585 struct cas *cp = netdev_priv(dev); in cas_set_link_ksettings() local
4603 spin_lock_irqsave(&cp->lock, flags); in cas_set_link_ksettings()
4604 cas_begin_auto_negotiation(cp, cmd); in cas_set_link_ksettings()
4605 spin_unlock_irqrestore(&cp->lock, flags); in cas_set_link_ksettings()
4611 struct cas *cp = netdev_priv(dev); in cas_nway_reset() local
4614 if ((cp->link_cntl & BMCR_ANENABLE) == 0) in cas_nway_reset()
4618 spin_lock_irqsave(&cp->lock, flags); in cas_nway_reset()
4619 cas_begin_auto_negotiation(cp, NULL); in cas_nway_reset()
4620 spin_unlock_irqrestore(&cp->lock, flags); in cas_nway_reset()
4627 struct cas *cp = netdev_priv(dev); in cas_get_link() local
4628 return cp->lstate == link_up; in cas_get_link()
4633 struct cas *cp = netdev_priv(dev); in cas_get_msglevel() local
4634 return cp->msg_enable; in cas_get_msglevel()
4639 struct cas *cp = netdev_priv(dev); in cas_set_msglevel() local
4640 cp->msg_enable = value; in cas_set_msglevel()
4645 struct cas *cp = netdev_priv(dev); in cas_get_regs_len() local
4646 return min_t(int, cp->casreg_len, CAS_MAX_REGS); in cas_get_regs_len()
4652 struct cas *cp = netdev_priv(dev); in cas_get_regs() local
4654 /* cas_read_regs handles locks (cp->lock). */ in cas_get_regs()
4655 cas_read_regs(cp, p, regs->len / sizeof(u32)); in cas_get_regs()
4677 struct cas *cp = netdev_priv(dev); in cas_get_ethtool_stats() local
4678 struct net_device_stats *stats = cas_get_stats(cp->dev); in cas_get_ethtool_stats()
4716 struct cas *cp = netdev_priv(dev); in cas_ioctl() local
4724 mutex_lock(&cp->pm_mutex); in cas_ioctl()
4727 data->phy_id = cp->phy_addr; in cas_ioctl()
4731 spin_lock_irqsave(&cp->lock, flags); in cas_ioctl()
4732 cas_mif_poll(cp, 0); in cas_ioctl()
4733 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); in cas_ioctl()
4734 cas_mif_poll(cp, 1); in cas_ioctl()
4735 spin_unlock_irqrestore(&cp->lock, flags); in cas_ioctl()
4740 spin_lock_irqsave(&cp->lock, flags); in cas_ioctl()
4741 cas_mif_poll(cp, 0); in cas_ioctl()
4742 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); in cas_ioctl()
4743 cas_mif_poll(cp, 1); in cas_ioctl()
4744 spin_unlock_irqrestore(&cp->lock, flags); in cas_ioctl()
4750 mutex_unlock(&cp->pm_mutex); in cas_ioctl()
4859 struct cas *cp; in cas_init_one() local
4880 dev = alloc_etherdev(sizeof(*cp)); in cas_init_one()
4940 cp = netdev_priv(dev); in cas_init_one()
4941 cp->pdev = pdev; in cas_init_one()
4944 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; in cas_init_one()
4946 cp->dev = dev; in cas_init_one()
4947 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : in cas_init_one()
4951 cp->of_node = pci_device_to_OF_node(pdev); in cas_init_one()
4954 cp->link_transition = LINK_TRANSITION_UNKNOWN; in cas_init_one()
4955 cp->link_transition_jiffies_valid = 0; in cas_init_one()
4957 spin_lock_init(&cp->lock); in cas_init_one()
4958 spin_lock_init(&cp->rx_inuse_lock); in cas_init_one()
4959 spin_lock_init(&cp->rx_spare_lock); in cas_init_one()
4961 spin_lock_init(&cp->stat_lock[i]); in cas_init_one()
4962 spin_lock_init(&cp->tx_lock[i]); in cas_init_one()
4964 spin_lock_init(&cp->stat_lock[N_TX_RINGS]); in cas_init_one()
4965 mutex_init(&cp->pm_mutex); in cas_init_one()
4967 timer_setup(&cp->link_timer, cas_link_timer, 0); in cas_init_one()
4973 atomic_set(&cp->reset_task_pending, 0); in cas_init_one()
4974 atomic_set(&cp->reset_task_pending_all, 0); in cas_init_one()
4975 atomic_set(&cp->reset_task_pending_spare, 0); in cas_init_one()
4976 atomic_set(&cp->reset_task_pending_mtu, 0); in cas_init_one()
4978 INIT_WORK(&cp->reset_task, cas_reset_task); in cas_init_one()
4982 cp->link_cntl = link_modes[link_mode]; in cas_init_one()
4984 cp->link_cntl = BMCR_ANENABLE; in cas_init_one()
4985 cp->lstate = link_down; in cas_init_one()
4986 cp->link_transition = LINK_TRANSITION_LINK_DOWN; in cas_init_one()
4987 netif_carrier_off(cp->dev); in cas_init_one()
4988 cp->timer_ticks = 0; in cas_init_one()
4991 cp->regs = pci_iomap(pdev, 0, casreg_len); in cas_init_one()
4992 if (!cp->regs) { in cas_init_one()
4996 cp->casreg_len = casreg_len; in cas_init_one()
4999 cas_check_pci_invariants(cp); in cas_init_one()
5000 cas_hard_reset(cp); in cas_init_one()
5001 cas_reset(cp, 0); in cas_init_one()
5002 if (cas_check_invariants(cp)) in cas_init_one()
5004 if (cp->cas_flags & CAS_FLAG_SATURN) in cas_init_one()
5005 cas_saturn_firmware_init(cp); in cas_init_one()
5007 cp->init_block = in cas_init_one()
5009 &cp->block_dvma, GFP_KERNEL); in cas_init_one()
5010 if (!cp->init_block) { in cas_init_one()
5016 cp->init_txds[i] = cp->init_block->txds[i]; in cas_init_one()
5019 cp->init_rxds[i] = cp->init_block->rxds[i]; in cas_init_one()
5022 cp->init_rxcs[i] = cp->init_block->rxcs[i]; in cas_init_one()
5025 skb_queue_head_init(&cp->rx_flows[i]); in cas_init_one()
5032 netif_napi_add(dev, &cp->napi, cas_poll); in cas_init_one()
5038 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) in cas_init_one()
5052 i = readl(cp->regs + REG_BIM_CFG); in cas_init_one()
5054 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", in cas_init_one()
5057 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, in cas_init_one()
5061 cp->hw_running = 1; in cas_init_one()
5062 cas_entropy_reset(cp); in cas_init_one()
5063 cas_phy_init(cp); in cas_init_one()
5064 cas_begin_auto_negotiation(cp, NULL); in cas_init_one()
5069 cp->init_block, cp->block_dvma); in cas_init_one()
5072 mutex_lock(&cp->pm_mutex); in cas_init_one()
5073 if (cp->hw_running) in cas_init_one()
5074 cas_shutdown(cp); in cas_init_one()
5075 mutex_unlock(&cp->pm_mutex); in cas_init_one()
5077 vfree(cp->fw_data); in cas_init_one()
5079 pci_iounmap(pdev, cp->regs); in cas_init_one()
5101 struct cas *cp; in cas_remove_one() local
5105 cp = netdev_priv(dev); in cas_remove_one()
5108 vfree(cp->fw_data); in cas_remove_one()
5110 mutex_lock(&cp->pm_mutex); in cas_remove_one()
5111 cancel_work_sync(&cp->reset_task); in cas_remove_one()
5112 if (cp->hw_running) in cas_remove_one()
5113 cas_shutdown(cp); in cas_remove_one()
5114 mutex_unlock(&cp->pm_mutex); in cas_remove_one()
5117 if (cp->orig_cacheline_size) { in cas_remove_one()
5122 cp->orig_cacheline_size); in cas_remove_one()
5126 cp->init_block, cp->block_dvma); in cas_remove_one()
5127 pci_iounmap(pdev, cp->regs); in cas_remove_one()
5136 struct cas *cp = netdev_priv(dev); in cas_suspend() local
5139 mutex_lock(&cp->pm_mutex); in cas_suspend()
5142 if (cp->opened) { in cas_suspend()
5145 cas_lock_all_save(cp, flags); in cas_suspend()
5152 cas_reset(cp, 0); in cas_suspend()
5153 cas_clean_rings(cp); in cas_suspend()
5154 cas_unlock_all_restore(cp, flags); in cas_suspend()
5157 if (cp->hw_running) in cas_suspend()
5158 cas_shutdown(cp); in cas_suspend()
5159 mutex_unlock(&cp->pm_mutex); in cas_suspend()
5167 struct cas *cp = netdev_priv(dev); in cas_resume() local
5171 mutex_lock(&cp->pm_mutex); in cas_resume()
5172 cas_hard_reset(cp); in cas_resume()
5173 if (cp->opened) { in cas_resume()
5175 cas_lock_all_save(cp, flags); in cas_resume()
5176 cas_reset(cp, 0); in cas_resume()
5177 cp->hw_running = 1; in cas_resume()
5178 cas_clean_rings(cp); in cas_resume()
5179 cas_init_hw(cp, 1); in cas_resume()
5180 cas_unlock_all_restore(cp, flags); in cas_resume()
5184 mutex_unlock(&cp->pm_mutex); in cas_resume()