Lines Matching refs:dp
181 static void sfe_set_eq_sis630(struct gem_dev *dp);
195 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
197 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
198 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
200 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
201 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
203 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
204 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
205 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
206 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
209 static uint_t sfe_interrupt(struct gem_dev *dp);
266 #define SFE_EEPROM_DELAY(dp) \ argument
267 { (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
272 sfe_read_eeprom(struct gem_dev *dp, uint_t offset) in sfe_read_eeprom() argument
279 OUTL(dp, EROMAR, 0); in sfe_read_eeprom()
280 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
281 OUTL(dp, EROMAR, EROMAR_EESK); in sfe_read_eeprom()
282 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
292 OUTL(dp, EROMAR, EROMAR_EECS | eedi); in sfe_read_eeprom()
293 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
294 OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK); in sfe_read_eeprom()
295 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
298 OUTL(dp, EROMAR, EROMAR_EECS); in sfe_read_eeprom()
303 OUTL(dp, EROMAR, EROMAR_EECS); in sfe_read_eeprom()
304 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
305 OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK); in sfe_read_eeprom()
306 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
308 ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1); in sfe_read_eeprom()
311 OUTL(dp, EROMAR, 0); in sfe_read_eeprom()
312 SFE_EEPROM_DELAY(dp); in sfe_read_eeprom()
319 sfe_get_mac_addr_dp83815(struct gem_dev *dp) in sfe_get_mac_addr_dp83815() argument
327 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_get_mac_addr_dp83815()
329 mac = dp->dev_addr.ether_addr_octet; in sfe_get_mac_addr_dp83815()
335 val = sfe_read_eeprom(dp, 0x6); in sfe_get_mac_addr_dp83815()
339 val = sfe_read_eeprom(dp, 0x7); in sfe_get_mac_addr_dp83815()
345 val = sfe_read_eeprom(dp, 0x8); in sfe_get_mac_addr_dp83815()
351 val = sfe_read_eeprom(dp, 0x9); in sfe_get_mac_addr_dp83815()
361 sfe_get_mac_addr_sis900(struct gem_dev *dp) in sfe_get_mac_addr_sis900() argument
367 mac = dp->dev_addr.ether_addr_octet; in sfe_get_mac_addr_sis900()
370 val = sfe_read_eeprom(dp, 0x8 + i); in sfe_get_mac_addr_sis900()
422 sfe_get_mac_addr_sis630e(struct gem_dev *dp) in sfe_get_mac_addr_sis630e() argument
436 dp->name); in sfe_get_mac_addr_sis630e()
442 dp->name); in sfe_get_mac_addr_sis630e()
452 dp->dev_addr.ether_addr_octet[i] = inb(0x71); in sfe_get_mac_addr_sis630e()
463 sfe_get_mac_addr_sis635(struct gem_dev *dp) in sfe_get_mac_addr_sis635() argument
468 struct sfe_dev *lp = dp->private; in sfe_get_mac_addr_sis635()
470 DPRINTF(2, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_get_mac_addr_sis635()
471 rfcr = INL(dp, RFCR); in sfe_get_mac_addr_sis635()
473 OUTL(dp, CR, lp->cr | CR_RELOAD); in sfe_get_mac_addr_sis635()
474 OUTL(dp, CR, lp->cr); in sfe_get_mac_addr_sis635()
477 OUTL(dp, RFCR, rfcr & ~RFCR_RFEN); in sfe_get_mac_addr_sis635()
481 OUTL(dp, RFCR, in sfe_get_mac_addr_sis635()
483 v = INL(dp, RFDR); in sfe_get_mac_addr_sis635()
484 dp->dev_addr.ether_addr_octet[i] = (uint8_t)v; in sfe_get_mac_addr_sis635()
485 dp->dev_addr.ether_addr_octet[i+1] = (uint8_t)(v >> 8); in sfe_get_mac_addr_sis635()
489 OUTL(dp, RFCR, rfcr | RFCR_RFEN); in sfe_get_mac_addr_sis635()
495 sfe_get_mac_addr_sis962(struct gem_dev *dp) in sfe_get_mac_addr_sis962() argument
503 OUTL(dp, MEAR, EROMAR_EEREQ); in sfe_get_mac_addr_sis962()
504 for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) { in sfe_get_mac_addr_sis962()
508 CONS "%s: failed to access eeprom", dp->name); in sfe_get_mac_addr_sis962()
513 ret = sfe_get_mac_addr_sis900(dp); in sfe_get_mac_addr_sis962()
516 OUTL(dp, MEAR, EROMAR_EEDONE); in sfe_get_mac_addr_sis962()
522 sfe_reset_chip_sis900(struct gem_dev *dp) in sfe_reset_chip_sis900() argument
527 struct sfe_dev *lp = dp->private; in sfe_reset_chip_sis900()
529 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__)); in sfe_reset_chip_sis900()
537 OUTL(dp, IMR, 0); in sfe_reset_chip_sis900()
538 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits; in sfe_reset_chip_sis900()
540 OUTLINL(dp, RFCR, 0); in sfe_reset_chip_sis900()
542 OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR); in sfe_reset_chip_sis900()
548 cmn_err(CE_WARN, "%s: chip reset timeout", dp->name); in sfe_reset_chip_sis900()
551 done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP); in sfe_reset_chip_sis900()
557 OUTL(dp, CR, lp->cr | INL(dp, CR)); in sfe_reset_chip_sis900()
562 dp->name, INL(dp, CFG), CFG_BITS_SIS900)); in sfe_reset_chip_sis900()
569 OUTL(dp, CFG, val); in sfe_reset_chip_sis900()
570 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name, in sfe_reset_chip_sis900()
571 INL(dp, CFG), CFG_BITS_SIS900)); in sfe_reset_chip_sis900()
577 sfe_reset_chip_dp83815(struct gem_dev *dp) in sfe_reset_chip_dp83815() argument
581 struct sfe_dev *lp = dp->private; in sfe_reset_chip_dp83815()
583 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__)); in sfe_reset_chip_dp83815()
591 OUTL(dp, IMR, 0); in sfe_reset_chip_dp83815()
592 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits; in sfe_reset_chip_dp83815()
594 OUTL(dp, RFCR, 0); in sfe_reset_chip_dp83815()
596 OUTL(dp, CR, CR_RST); in sfe_reset_chip_dp83815()
599 for (i = 0; INL(dp, CR) & CR_RST; i++) { in sfe_reset_chip_dp83815()
601 cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name); in sfe_reset_chip_dp83815()
606 DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10)); in sfe_reset_chip_dp83815()
608 OUTL(dp, CCSR, CCSR_PMESTS); in sfe_reset_chip_dp83815()
609 OUTL(dp, CCSR, 0); in sfe_reset_chip_dp83815()
613 dp->name, INL(dp, CFG), CFG_BITS_DP83815)); in sfe_reset_chip_dp83815()
614 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_reset_chip_dp83815()
615 OUTL(dp, CFG, val | CFG_PAUSE_ADV); in sfe_reset_chip_dp83815()
616 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name, in sfe_reset_chip_dp83815()
617 INL(dp, CFG), CFG_BITS_DP83815)); in sfe_reset_chip_dp83815()
623 sfe_init_chip(struct gem_dev *dp) in sfe_init_chip() argument
632 OUTL(dp, IMR, 0); in sfe_init_chip()
637 OUTL(dp, TXDP, dp->tx_ring_dma); in sfe_init_chip()
640 OUTL(dp, RXDP, dp->rx_ring_dma); in sfe_init_chip()
646 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr) in sfe_mcast_hash() argument
653 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end) in sfe_rxfilter_dump() argument
659 cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name); in sfe_rxfilter_dump()
663 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2); in sfe_rxfilter_dump()
664 ram[j] = INL(dp, RFDR); in sfe_rxfilter_dump()
683 sfe_set_rx_filter_dp83815(struct gem_dev *dp) in sfe_set_rx_filter_dp83815() argument
688 uint8_t *mac = dp->cur_addr.ether_addr_octet; in sfe_set_rx_filter_dp83815()
690 struct sfe_dev *lp = dp->private; in sfe_set_rx_filter_dp83815()
693 dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS)); in sfe_set_rx_filter_dp83815()
696 for (i = 0; i < dp->mc_count; i++) { in sfe_set_rx_filter_dp83815()
699 dp->name, i, in sfe_set_rx_filter_dp83815()
700 dp->mc_list[i].addr.ether_addr_octet[0], in sfe_set_rx_filter_dp83815()
701 dp->mc_list[i].addr.ether_addr_octet[1], in sfe_set_rx_filter_dp83815()
702 dp->mc_list[i].addr.ether_addr_octet[2], in sfe_set_rx_filter_dp83815()
703 dp->mc_list[i].addr.ether_addr_octet[3], in sfe_set_rx_filter_dp83815()
704 dp->mc_list[i].addr.ether_addr_octet[4], in sfe_set_rx_filter_dp83815()
705 dp->mc_list[i].addr.ether_addr_octet[5]); in sfe_set_rx_filter_dp83815()
708 if ((dp->rxmode & RXMODE_ENABLE) == 0) { in sfe_set_rx_filter_dp83815()
710 OUTL(dp, RFCR, 0); in sfe_set_rx_filter_dp83815()
717 if (dp->rxmode & RXMODE_PROMISC) { in sfe_set_rx_filter_dp83815()
720 } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) { in sfe_set_rx_filter_dp83815()
723 } else if (dp->mc_count > 4) { in sfe_set_rx_filter_dp83815()
731 for (i = 0; i < dp->mc_count; i++) { in sfe_set_rx_filter_dp83815()
732 j = dp->mc_list[i].hash >> (32 - 9); in sfe_set_rx_filter_dp83815()
742 (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT); in sfe_set_rx_filter_dp83815()
749 dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], in sfe_set_rx_filter_dp83815()
761 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i); in sfe_set_rx_filter_dp83815()
762 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]); in sfe_set_rx_filter_dp83815()
771 OUTL(dp, RFCR, j); in sfe_set_rx_filter_dp83815()
772 OUTL(dp, RFDR, 0); in sfe_set_rx_filter_dp83815()
777 for (j = 0; j < dp->mc_count; j++) { in sfe_set_rx_filter_dp83815()
778 mac = &dp->mc_list[j].addr.ether_addr_octet[0]; in sfe_set_rx_filter_dp83815()
780 OUTL(dp, RFCR, in sfe_set_rx_filter_dp83815()
782 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]); in sfe_set_rx_filter_dp83815()
787 OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815); in sfe_set_rx_filter_dp83815()
788 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL); in sfe_set_rx_filter_dp83815()
789 OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815); in sfe_set_rx_filter_dp83815()
790 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL); in sfe_set_rx_filter_dp83815()
797 OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2); in sfe_set_rx_filter_dp83815()
798 OUTL(dp, RFDR, hash_tbl[i]); in sfe_set_rx_filter_dp83815()
802 sfe_rxfilter_dump(dp, 0, 0x10); in sfe_set_rx_filter_dp83815()
803 sfe_rxfilter_dump(dp, 0x200, 0x380); in sfe_set_rx_filter_dp83815()
806 OUTL(dp, RFCR, RFCR_RFEN | mode); in sfe_set_rx_filter_dp83815()
812 sfe_set_rx_filter_sis900(struct gem_dev *dp) in sfe_set_rx_filter_sis900() argument
817 uint8_t *mac = dp->cur_addr.ether_addr_octet; in sfe_set_rx_filter_sis900()
820 struct sfe_dev *lp = dp->private; in sfe_set_rx_filter_sis900()
822 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_set_rx_filter_sis900()
824 if ((dp->rxmode & RXMODE_ENABLE) == 0) { in sfe_set_rx_filter_sis900()
826 OUTLINL(dp, RFCR, 0); in sfe_set_rx_filter_sis900()
842 if (dp->rxmode & RXMODE_PROMISC) { in sfe_set_rx_filter_sis900()
845 } else if ((dp->rxmode & RXMODE_ALLMULTI) || in sfe_set_rx_filter_sis900()
846 dp->mc_count > hash_size*16/2) { in sfe_set_rx_filter_sis900()
855 for (i = 0; i < dp->mc_count; i++) { in sfe_set_rx_filter_sis900()
857 h = dp->mc_list[i].hash >> hash_shift; in sfe_set_rx_filter_sis900()
865 OUTLINL(dp, RFCR, in sfe_set_rx_filter_sis900()
867 OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]); in sfe_set_rx_filter_sis900()
876 OUTLINL(dp, RFCR, in sfe_set_rx_filter_sis900()
878 OUTLINL(dp, RFDR, hash_tbl[i]); in sfe_set_rx_filter_sis900()
882 OUTLINL(dp, RFCR, RFCR_RFEN | mode); in sfe_set_rx_filter_sis900()
888 sfe_start_chip(struct gem_dev *dp) in sfe_start_chip() argument
890 struct sfe_dev *lp = dp->private; in sfe_start_chip()
892 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_start_chip()
901 if ((dp->misc_flag & GEM_NOINTR) == 0) { in sfe_start_chip()
902 OUTL(dp, IER, 1); in sfe_start_chip()
903 OUTL(dp, IMR, lp->our_intr_bits); in sfe_start_chip()
907 OUTL(dp, CR, lp->cr | CR_RXE); in sfe_start_chip()
916 sfe_stop_chip(struct gem_dev *dp) in sfe_stop_chip() argument
918 struct sfe_dev *lp = dp->private; in sfe_stop_chip()
923 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); in sfe_stop_chip()
929 OUTL(dp, IMR, 0); in sfe_stop_chip()
932 OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR); in sfe_stop_chip()
942 dp->name, __func__); in sfe_stop_chip()
946 val = INL(dp, ISR); in sfe_stop_chip()
960 sfe_stop_chip_quiesce(struct gem_dev *dp) in sfe_stop_chip_quiesce() argument
962 struct sfe_dev *lp = dp->private; in sfe_stop_chip_quiesce()
971 OUTL(dp, IMR, 0); in sfe_stop_chip_quiesce()
974 OUTL(dp, CR, CR_TXR | CR_RXR); in sfe_stop_chip_quiesce()
986 val = INL(dp, ISR); in sfe_stop_chip_quiesce()
1020 sfe_set_media(struct gem_dev *dp) in sfe_set_media() argument
1028 struct sfe_dev *lp = dp->private; in sfe_set_media()
1033 dp->name, __func__, in sfe_set_media()
1034 dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed])); in sfe_set_media()
1038 if (dp->full_duplex) { in sfe_set_media()
1042 if (dp->full_duplex) { in sfe_set_media()
1051 val = INL(dp, CFG) & CFG_EDB_MASTER; in sfe_set_media()
1074 txmxdma = max(dp->txmaxdma, 256); in sfe_set_media()
1075 rxmxdma = max(dp->rxmaxdma, 256); in sfe_set_media()
1080 lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT); in sfe_set_media()
1106 val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT); in sfe_set_media()
1113 dp->name, __func__, in sfe_set_media()
1124 OUTL(dp, TXCFG, txcfg); in sfe_set_media()
1130 OUTL(dp, RXCFG, rxcfg); in sfe_set_media()
1133 dp->name, __func__, in sfe_set_media()
1138 pcr = INL(dp, PCR); in sfe_set_media()
1139 switch (dp->flow_control) { in sfe_set_media()
1142 OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST); in sfe_set_media()
1146 OUTL(dp, PCR, in sfe_set_media()
1150 DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name, in sfe_set_media()
1151 INL(dp, PCR), PCR_BITS)); in sfe_set_media()
1154 switch (dp->flow_control) { in sfe_set_media()
1157 OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN); in sfe_set_media()
1160 OUTL(dp, FLOWCTL, 0); in sfe_set_media()
1164 dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS)); in sfe_set_media()
1170 sfe_get_stats(struct gem_dev *dp) in sfe_get_stats() argument
1180 sfe_tx_desc_write(struct gem_dev *dp, int slot, in sfe_tx_desc_write() argument
1192 dp->name, ddi_get_lbolt(), __func__, in sfe_tx_desc_write()
1193 dp->tx_desc_tail, slot, frags, flags); in sfe_tx_desc_write()
1215 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; in sfe_tx_desc_write()
1225 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot) in sfe_tx_start() argument
1227 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; in sfe_tx_start()
1229 struct sfe_dev *lp = dp->private; in sfe_tx_start()
1232 gem_tx_desc_dma_sync(dp, in sfe_tx_start()
1237 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot]; in sfe_tx_start()
1240 gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV); in sfe_tx_start()
1245 if (dp->mac_active) { in sfe_tx_start()
1246 OUTL(dp, CR, lp->cr | CR_TXE); in sfe_tx_start()
1251 sfe_rx_desc_write(struct gem_dev *dp, int slot, in sfe_rx_desc_write() argument
1264 dp->name, __func__, dp->rx_active_tail, slot, frags); in sfe_rx_desc_write()
1271 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_write()
1280 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc) in sfe_tx_desc_stat() argument
1282 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; in sfe_tx_desc_stat()
1286 struct sfe_dev *lp = dp->private; in sfe_tx_desc_stat()
1293 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)]; in sfe_tx_desc_stat()
1303 dp->name, ddi_get_lbolt(), __func__, in sfe_tx_desc_stat()
1312 dp->mac_active) { in sfe_tx_desc_stat()
1313 OUTL(dp, CR, lp->cr | CR_TXE); in sfe_tx_desc_stat()
1323 dp->name, slot, status); in sfe_tx_desc_stat()
1327 delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10; in sfe_tx_desc_stat()
1330 dp->name, delay, slot)); in sfe_tx_desc_stat()
1342 &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts); in sfe_tx_desc_stat()
1357 dp->name, status, TXSTAT_BITS)); in sfe_tx_desc_stat()
1359 dp->stats.errxmt++; in sfe_tx_desc_stat()
1362 dp->stats.underflow++; in sfe_tx_desc_stat()
1364 dp->stats.nocarrier++; in sfe_tx_desc_stat()
1366 dp->stats.xmtlatecoll++; in sfe_tx_desc_stat()
1367 } else if ((!dp->full_duplex) && (status & CMDSTS_EC)) { in sfe_tx_desc_stat()
1368 dp->stats.excoll++; in sfe_tx_desc_stat()
1369 dp->stats.collisions += 16; in sfe_tx_desc_stat()
1371 dp->stats.xmit_internal_err++; in sfe_tx_desc_stat()
1373 } else if (!dp->full_duplex) { in sfe_tx_desc_stat()
1378 dp->stats.first_coll++; in sfe_tx_desc_stat()
1380 dp->stats.multi_coll++; in sfe_tx_desc_stat()
1382 dp->stats.collisions += cols; in sfe_tx_desc_stat()
1384 dp->stats.defer++; in sfe_tx_desc_stat()
1391 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc) in sfe_rx_desc_stat() argument
1402 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_stat()
1412 dp->name, ddi_get_lbolt(), __func__, in sfe_rx_desc_stat()
1433 dp->name, status, RXSTAT_BITS)); in sfe_rx_desc_stat()
1436 dp->stats.errrcv++; in sfe_rx_desc_stat()
1439 dp->stats.overflow++; in sfe_rx_desc_stat()
1441 dp->stats.frame_too_long++; in sfe_rx_desc_stat()
1443 dp->stats.runt++; in sfe_rx_desc_stat()
1445 dp->stats.frame++; in sfe_rx_desc_stat()
1447 dp->stats.crc++; in sfe_rx_desc_stat()
1449 dp->stats.rcv_internal_err++; in sfe_rx_desc_stat()
1465 uint8_t *bp = dp->rx_buf_head->rxb_buf; in sfe_rx_desc_stat()
1467 cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len); in sfe_rx_desc_stat()
1482 sfe_tx_desc_init(struct gem_dev *dp, int slot) in sfe_tx_desc_init() argument
1484 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; in sfe_tx_desc_init()
1488 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; in sfe_tx_desc_init()
1494 here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot; in sfe_tx_desc_init()
1497 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)]; in sfe_tx_desc_init()
1502 sfe_rx_desc_init(struct gem_dev *dp, int slot) in sfe_rx_desc_init() argument
1504 uint_t rx_ring_size = dp->gc.gc_rx_ring_size; in sfe_rx_desc_init()
1508 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_init()
1514 here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot; in sfe_rx_desc_init()
1517 &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)]; in sfe_rx_desc_init()
1522 sfe_tx_desc_clean(struct gem_dev *dp, int slot) in sfe_tx_desc_clean() argument
1526 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; in sfe_tx_desc_clean()
1531 sfe_rx_desc_clean(struct gem_dev *dp, int slot) in sfe_rx_desc_clean() argument
1535 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; in sfe_rx_desc_clean()
1543 sfe_interrupt(struct gem_dev *dp) in sfe_interrupt() argument
1545 uint_t rx_ring_size = dp->gc.gc_rx_ring_size; in sfe_interrupt()
1550 struct sfe_dev *lp = dp->private; in sfe_interrupt()
1553 isr = INL(dp, ISR); in sfe_interrupt()
1565 dp->name, ddi_get_lbolt(), __func__, in sfe_interrupt()
1566 isr, INTR_BITS, dp->rx_active_head)); in sfe_interrupt()
1568 if (!dp->mac_active) { in sfe_interrupt()
1578 (void) gem_receive(dp); in sfe_interrupt()
1583 dp->name, isr, INTR_BITS)); in sfe_interrupt()
1585 dp->stats.overflow++; in sfe_interrupt()
1591 dp->name, isr, INTR_BITS)); in sfe_interrupt()
1593 dp->stats.norcvbuf++; in sfe_interrupt()
1599 OUTL(dp, RXDP, dp->rx_ring_dma + in sfe_interrupt()
1601 SLOT(dp->rx_active_head, rx_ring_size)); in sfe_interrupt()
1604 OUTL(dp, CR, lp->cr | CR_RXE); in sfe_interrupt()
1611 if (gem_tx_done(dp)) { in sfe_interrupt()
1622 dp->name, isr, INTR_BITS); in sfe_interrupt()
1627 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF); in sfe_interrupt()
1632 dp->name, __func__, isr, INTR_BITS)); in sfe_interrupt()
1647 sfe_mii_sync_dp83815(struct gem_dev *dp) in sfe_mii_sync_dp83815() argument
1653 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset) in sfe_mii_read_dp83815() argument
1656 dp->name, __func__, offset)); in sfe_mii_read_dp83815()
1657 return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4)); in sfe_mii_read_dp83815()
1661 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val) in sfe_mii_write_dp83815() argument
1664 dp->name, __func__, offset, val)); in sfe_mii_write_dp83815()
1665 OUTL(dp, MII_REGS_BASE + offset*4, val); in sfe_mii_write_dp83815()
1669 sfe_mii_config_dp83815(struct gem_dev *dp) in sfe_mii_config_dp83815() argument
1673 srr = INL(dp, SRR) & SRR_REV; in sfe_mii_config_dp83815()
1676 dp->name, srr, in sfe_mii_config_dp83815()
1677 INW(dp, 0x00cc), /* PGSEL */ in sfe_mii_config_dp83815()
1678 INW(dp, 0x00e4), /* PMDCSR */ in sfe_mii_config_dp83815()
1679 INW(dp, 0x00fc), /* TSTDAT */ in sfe_mii_config_dp83815()
1680 INW(dp, 0x00f4), /* DSPCFG */ in sfe_mii_config_dp83815()
1681 INW(dp, 0x00f8))); /* SDCFG */ in sfe_mii_config_dp83815()
1690 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */ in sfe_mii_config_dp83815()
1691 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */ in sfe_mii_config_dp83815()
1692 OUTW(dp, 0x00fc, 0x0000); /* TSTDAT */ in sfe_mii_config_dp83815()
1693 OUTW(dp, 0x00f4, 0x5040); /* DSPCFG */ in sfe_mii_config_dp83815()
1694 OUTW(dp, 0x00f8, 0x008c); /* SDCFG */ in sfe_mii_config_dp83815()
1695 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */ in sfe_mii_config_dp83815()
1699 dp->name, in sfe_mii_config_dp83815()
1700 INW(dp, 0x00cc), /* PGSEL */ in sfe_mii_config_dp83815()
1701 INW(dp, 0x00e4), /* PMDCSR */ in sfe_mii_config_dp83815()
1702 INW(dp, 0x00fc), /* TSTDAT */ in sfe_mii_config_dp83815()
1703 INW(dp, 0x00f4), /* DSPCFG */ in sfe_mii_config_dp83815()
1704 INW(dp, 0x00f8))); /* SDCFG */ in sfe_mii_config_dp83815()
1710 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */ in sfe_mii_config_dp83815()
1711 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */ in sfe_mii_config_dp83815()
1712 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */ in sfe_mii_config_dp83815()
1716 dp->name, in sfe_mii_config_dp83815()
1717 INW(dp, 0x00cc), /* PGSEL */ in sfe_mii_config_dp83815()
1718 INW(dp, 0x00e4))); /* PMDCSR */ in sfe_mii_config_dp83815()
1721 return (gem_mii_config_default(dp)); in sfe_mii_config_dp83815()
1725 sfe_mii_probe_dp83815(struct gem_dev *dp) in sfe_mii_probe_dp83815() argument
1731 dp->name, __func__)); in sfe_mii_probe_dp83815()
1732 dp->mii_phy_addr = 0; in sfe_mii_probe_dp83815()
1733 dp->gc.gc_mii_sync = &sfe_mii_sync_sis900; in sfe_mii_probe_dp83815()
1734 dp->gc.gc_mii_read = &sfe_mii_read_sis900; in sfe_mii_probe_dp83815()
1735 dp->gc.gc_mii_write = &sfe_mii_write_sis900; in sfe_mii_probe_dp83815()
1737 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_mii_probe_dp83815()
1738 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS); in sfe_mii_probe_dp83815()
1740 if (gem_mii_probe_default(dp) == GEM_SUCCESS) { in sfe_mii_probe_dp83815()
1746 dp->name, __func__)); in sfe_mii_probe_dp83815()
1747 dp->mii_phy_addr = -1; in sfe_mii_probe_dp83815()
1748 dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815; in sfe_mii_probe_dp83815()
1749 dp->gc.gc_mii_read = &sfe_mii_read_dp83815; in sfe_mii_probe_dp83815()
1750 dp->gc.gc_mii_write = &sfe_mii_write_dp83815; in sfe_mii_probe_dp83815()
1752 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_mii_probe_dp83815()
1753 OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST); in sfe_mii_probe_dp83815()
1755 OUTL(dp, CFG, val | CFG_PAUSE_ADV); in sfe_mii_probe_dp83815()
1760 return (gem_mii_probe_default(dp)); in sfe_mii_probe_dp83815()
1764 sfe_mii_init_dp83815(struct gem_dev *dp) in sfe_mii_init_dp83815() argument
1768 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); in sfe_mii_init_dp83815()
1770 if (dp->mii_phy_addr == -1) { in sfe_mii_init_dp83815()
1772 OUTL(dp, CFG, val | CFG_PAUSE_ADV); in sfe_mii_init_dp83815()
1775 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS); in sfe_mii_init_dp83815()
1784 #define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); } argument
1786 sfe_mii_sync_sis900(struct gem_dev *dp) in sfe_mii_sync_sis900() argument
1792 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO); in sfe_mii_sync_sis900()
1793 MDIO_DELAY(dp); in sfe_mii_sync_sis900()
1794 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC); in sfe_mii_sync_sis900()
1795 MDIO_DELAY(dp); in sfe_mii_sync_sis900()
1800 sfe_mii_config_sis900(struct gem_dev *dp) in sfe_mii_config_sis900() argument
1802 struct sfe_dev *lp = dp->private; in sfe_mii_config_sis900()
1805 if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) { in sfe_mii_config_sis900()
1807 gem_mii_write(dp, 0x0018, 0xD200); in sfe_mii_config_sis900()
1815 gem_mii_write(dp, MII_AN_ADVERT, 0x05e1); in sfe_mii_config_sis900()
1816 gem_mii_write(dp, MII_CONFIG1, 0x0022); in sfe_mii_config_sis900()
1817 gem_mii_write(dp, MII_CONFIG2, 0xff00); in sfe_mii_config_sis900()
1818 gem_mii_write(dp, MII_MASK, 0xffc0); in sfe_mii_config_sis900()
1820 sfe_set_eq_sis630(dp); in sfe_mii_config_sis900()
1822 return (gem_mii_config_default(dp)); in sfe_mii_config_sis900()
1826 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg) in sfe_mii_read_sis900() argument
1833 cmd = MII_READ_CMD(dp->mii_phy_addr, reg); in sfe_mii_read_sis900()
1837 OUTL(dp, MEAR, data | MEAR_MDDIR); in sfe_mii_read_sis900()
1838 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1839 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC); in sfe_mii_read_sis900()
1840 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1844 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1845 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1848 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1849 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1851 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1853 (void) INL(dp, MEAR); /* delay */ in sfe_mii_read_sis900()
1854 if (INL(dp, MEAR) & MEAR_MDIO) { in sfe_mii_read_sis900()
1856 dp->name, dp->mii_phy_addr); in sfe_mii_read_sis900()
1859 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1862 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1863 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1867 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1868 (void) INL(dp, MEAR); /* delay */ in sfe_mii_read_sis900()
1869 ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1); in sfe_mii_read_sis900()
1870 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1871 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1876 OUTL(dp, MEAR, 0); in sfe_mii_read_sis900()
1877 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1878 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_read_sis900()
1879 MDIO_DELAY(dp); in sfe_mii_read_sis900()
1886 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val) in sfe_mii_write_sis900() argument
1892 cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val); in sfe_mii_write_sis900()
1896 OUTL(dp, MEAR, data | MEAR_MDDIR); in sfe_mii_write_sis900()
1897 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1898 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC); in sfe_mii_write_sis900()
1899 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1904 OUTL(dp, MEAR, 0); in sfe_mii_write_sis900()
1905 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1906 OUTL(dp, MEAR, MEAR_MDC); in sfe_mii_write_sis900()
1907 MDIO_DELAY(dp); in sfe_mii_write_sis900()
1913 sfe_set_eq_sis630(struct gem_dev *dp) in sfe_set_eq_sis630() argument
1921 struct sfe_dev *lp = dp->private; in sfe_set_eq_sis630()
1931 if (dp->mii_state == MII_STATE_LINKUP) { in sfe_set_eq_sis630()
1932 reg14h = gem_mii_read(dp, MII_RESV); in sfe_set_eq_sis630()
1933 gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF); in sfe_set_eq_sis630()
1935 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3; in sfe_set_eq_sis630()
1938 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3; in sfe_set_eq_sis630()
1971 reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8; in sfe_set_eq_sis630()
1973 gem_mii_write(dp, MII_RESV, reg14h); in sfe_set_eq_sis630()
1975 reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000; in sfe_set_eq_sis630()
1982 gem_mii_write(dp, MII_RESV, reg14h); in sfe_set_eq_sis630()
1992 sfe_chipinfo_init_sis900(struct gem_dev *dp) in sfe_chipinfo_init_sis900() argument
1995 struct sfe_dev *lp = (struct sfe_dev *)dp->private; in sfe_chipinfo_init_sis900()
2026 dp->name); in sfe_chipinfo_init_sis900()
2032 dp->name); in sfe_chipinfo_init_sis900()
2043 sfe_attach_chip(struct gem_dev *dp) in sfe_attach_chip() argument
2045 struct sfe_dev *lp = (struct sfe_dev *)dp->private; in sfe_attach_chip()
2047 DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__)); in sfe_attach_chip()
2051 sfe_chipinfo_init_sis900(dp); in sfe_attach_chip()
2057 if (!(lp->get_mac_addr)(dp)) { in sfe_attach_chip()
2061 dp->name, __func__); in sfe_attach_chip()
2066 dp->mii_phy_addr = -1; /* no need to scan PHY */ in sfe_attach_chip()
2067 dp->misc_flag |= GEM_VLAN_SOFT; in sfe_attach_chip()
2068 dp->txthr += 4; /* VTAG_SIZE */ in sfe_attach_chip()
2070 dp->txthr = min(dp->txthr, TXFIFOSIZE - 2); in sfe_attach_chip()
2090 struct gem_dev *dp; in sfeattach() local
2301 dp = gem_do_attach(dip, 0, gcp, base, ®s_ha, in sfeattach()
2305 if (dp == NULL) { in sfeattach()
2348 struct gem_dev *dp; in sfe_quiesce() local
2351 dp = GEM_GET_DEV(dip); in sfe_quiesce()
2353 if (dp == NULL) in sfe_quiesce()
2356 ret = sfe_stop_chip_quiesce(dp); in sfe_quiesce()