Lines Matching +full:srom +full:- +full:page +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-only
29 #include <asm/page.h>
37 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
38 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
179 dbdma_st32(&dmap->control, in dbdma_continue()
187 dbdma_st32(&dmap->control, in dbdma_reset()
190 while (dbdma_ld32(&dmap->status) & RUN) in dbdma_reset()
199 out_le16(&cp->command, cmd); in dbdma_setcmd()
200 out_le16(&cp->req_count, count); in dbdma_setcmd()
201 out_le32(&cp->phy_addr, addr); in dbdma_setcmd()
202 out_le32(&cp->cmd_dep, cmd_dep); in dbdma_setcmd()
203 out_le16(&cp->xfer_status, 0); in dbdma_setcmd()
204 out_le16(&cp->res_count, 0); in dbdma_setcmd()
210 out_le16((void __iomem *)dev->base_addr + reg_offset, data); in bmwrite()
217 return in_le16((void __iomem *)dev->base_addr + reg_offset); in bmread()
224 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_enable_and_reset_chip()
225 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_enable_and_reset_chip()
232 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); in bmac_enable_and_reset_chip()
242 while (--nb >= 0) { in bmac_mif_readbits()
262 while (--nb >= 0) { in bmac_mif_writebits()
319 --i; in bmac_init_registers()
324 if (!bp->is_bmac_plus) { in bmac_init_registers()
366 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; in bmac_init_registers()
367 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ in bmac_init_registers()
368 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ in bmac_init_registers()
369 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ in bmac_init_registers()
370 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ in bmac_init_registers()
372 pWord16 = (const unsigned short *)dev->dev_addr; in bmac_init_registers()
401 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_start_chip()
430 if (bp->is_bmac_plus) { in bmac_init_phy()
461 spin_lock_irqsave(&bp->lock, flags); in bmac_suspend()
462 if (bp->timeout_active) { in bmac_suspend()
463 timer_delete(&bp->tx_timeout); in bmac_suspend()
464 bp->timeout_active = 0; in bmac_suspend()
466 disable_irq(dev->irq); in bmac_suspend()
467 disable_irq(bp->tx_dma_intr); in bmac_suspend()
468 disable_irq(bp->rx_dma_intr); in bmac_suspend()
469 bp->sleeping = 1; in bmac_suspend()
470 spin_unlock_irqrestore(&bp->lock, flags); in bmac_suspend()
471 if (bp->opened) { in bmac_suspend()
472 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_suspend()
473 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_suspend()
481 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ in bmac_suspend()
482 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ in bmac_suspend()
485 if (bp->rx_bufs[i] != NULL) { in bmac_suspend()
486 dev_kfree_skb(bp->rx_bufs[i]); in bmac_suspend()
487 bp->rx_bufs[i] = NULL; in bmac_suspend()
491 if (bp->tx_bufs[i] != NULL) { in bmac_suspend()
492 dev_kfree_skb(bp->tx_bufs[i]); in bmac_suspend()
493 bp->tx_bufs[i] = NULL; in bmac_suspend()
497 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_suspend()
507 if (bp->opened) in bmac_resume()
510 enable_irq(dev->irq); in bmac_resume()
511 enable_irq(bp->tx_dma_intr); in bmac_resume()
512 enable_irq(bp->rx_dma_intr); in bmac_resume()
526 spin_lock_irqsave(&bp->lock, flags); in bmac_set_address()
531 pWord16 = (const unsigned short *)dev->dev_addr; in bmac_set_address()
536 spin_unlock_irqrestore(&bp->lock, flags); in bmac_set_address()
546 spin_lock_irqsave(&bp->lock, flags); in bmac_set_timeout()
547 if (bp->timeout_active) in bmac_set_timeout()
548 timer_delete(&bp->tx_timeout); in bmac_set_timeout()
549 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; in bmac_set_timeout()
550 add_timer(&bp->tx_timeout); in bmac_set_timeout()
551 bp->timeout_active = 1; in bmac_set_timeout()
552 spin_unlock_irqrestore(&bp->lock, flags); in bmac_set_timeout()
562 len = skb->len; in bmac_construct_xmt()
563 vaddr = skb->data; in bmac_construct_xmt()
572 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; in bmac_construct_rxbuff()
581 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_init_tx_ring()
583 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); in bmac_init_tx_ring()
585 bp->tx_empty = 0; in bmac_init_tx_ring()
586 bp->tx_fill = 0; in bmac_init_tx_ring()
587 bp->tx_fullup = 0; in bmac_init_tx_ring()
590 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], in bmac_init_tx_ring()
591 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); in bmac_init_tx_ring()
595 out_le32(&td->wait_sel, 0x00200020); in bmac_init_tx_ring()
596 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); in bmac_init_tx_ring()
603 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_init_rx_ring()
608 memset((char *)bp->rx_cmds, 0, in bmac_init_rx_ring()
611 if ((skb = bp->rx_bufs[i]) == NULL) { in bmac_init_rx_ring()
612 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); in bmac_init_rx_ring()
616 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); in bmac_init_rx_ring()
619 bp->rx_empty = 0; in bmac_init_rx_ring()
620 bp->rx_fill = i; in bmac_init_rx_ring()
623 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], in bmac_init_rx_ring()
624 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); in bmac_init_rx_ring()
628 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); in bmac_init_rx_ring()
637 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_transmit_packet()
642 /* bp->tx_empty, bp->tx_fill)); */ in bmac_transmit_packet()
643 i = bp->tx_fill + 1; in bmac_transmit_packet()
646 if (i == bp->tx_empty) { in bmac_transmit_packet()
648 bp->tx_fullup = 1; in bmac_transmit_packet()
650 return -1; /* can't take it at the moment */ in bmac_transmit_packet()
653 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); in bmac_transmit_packet()
655 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); in bmac_transmit_packet()
657 bp->tx_bufs[bp->tx_fill] = skb; in bmac_transmit_packet()
658 bp->tx_fill = i; in bmac_transmit_packet()
660 dev->stats.tx_bytes += skb->len; in bmac_transmit_packet()
673 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_rxdma_intr()
681 spin_lock_irqsave(&bp->lock, flags); in bmac_rxdma_intr()
687 last = -1; in bmac_rxdma_intr()
688 i = bp->rx_empty; in bmac_rxdma_intr()
691 cp = &bp->rx_cmds[i]; in bmac_rxdma_intr()
692 stat = le16_to_cpu(cp->xfer_status); in bmac_rxdma_intr()
693 residual = le16_to_cpu(cp->res_count); in bmac_rxdma_intr()
696 nb = RX_BUFLEN - residual - 2; in bmac_rxdma_intr()
697 if (nb < (ETHERMINPACKET - ETHERCRC)) { in bmac_rxdma_intr()
699 dev->stats.rx_length_errors++; in bmac_rxdma_intr()
700 dev->stats.rx_errors++; in bmac_rxdma_intr()
702 skb = bp->rx_bufs[i]; in bmac_rxdma_intr()
703 bp->rx_bufs[i] = NULL; in bmac_rxdma_intr()
706 nb -= ETHERCRC; in bmac_rxdma_intr()
708 skb->protocol = eth_type_trans(skb, dev); in bmac_rxdma_intr()
710 ++dev->stats.rx_packets; in bmac_rxdma_intr()
711 dev->stats.rx_bytes += nb; in bmac_rxdma_intr()
713 ++dev->stats.rx_dropped; in bmac_rxdma_intr()
715 if ((skb = bp->rx_bufs[i]) == NULL) { in bmac_rxdma_intr()
716 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); in bmac_rxdma_intr()
718 skb_reserve(bp->rx_bufs[i], 2); in bmac_rxdma_intr()
720 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); in bmac_rxdma_intr()
721 cp->res_count = cpu_to_le16(0); in bmac_rxdma_intr()
722 cp->xfer_status = cpu_to_le16(0); in bmac_rxdma_intr()
727 if (last != -1) { in bmac_rxdma_intr()
728 bp->rx_fill = last; in bmac_rxdma_intr()
729 bp->rx_empty = i; in bmac_rxdma_intr()
733 spin_unlock_irqrestore(&bp->lock, flags); in bmac_rxdma_intr()
751 spin_lock_irqsave(&bp->lock, flags); in bmac_txdma_intr()
757 /* timer_delete(&bp->tx_timeout); */ in bmac_txdma_intr()
758 /* bp->timeout_active = 0; */ in bmac_txdma_intr()
761 cp = &bp->tx_cmds[bp->tx_empty]; in bmac_txdma_intr()
762 stat = le16_to_cpu(cp->xfer_status); in bmac_txdma_intr()
770 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) in bmac_txdma_intr()
774 if (bp->tx_bufs[bp->tx_empty]) { in bmac_txdma_intr()
775 ++dev->stats.tx_packets; in bmac_txdma_intr()
776 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]); in bmac_txdma_intr()
778 bp->tx_bufs[bp->tx_empty] = NULL; in bmac_txdma_intr()
779 bp->tx_fullup = 0; in bmac_txdma_intr()
781 if (++bp->tx_empty >= N_TX_RING) in bmac_txdma_intr()
782 bp->tx_empty = 0; in bmac_txdma_intr()
783 if (bp->tx_empty == bp->tx_fill) in bmac_txdma_intr()
787 spin_unlock_irqrestore(&bp->lock, flags); in bmac_txdma_intr()
790 XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); in bmac_txdma_intr()
811 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ in bmac_addhash()
814 bp->hash_use_count[crc/16] |= mask; in bmac_addhash()
825 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ in bmac_removehash()
826 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ in bmac_removehash()
829 bp->hash_table_mask[crc/16] &= mask; in bmac_removehash()
871 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ in bmac_update_hash_table_mask()
872 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ in bmac_update_hash_table_mask()
873 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ in bmac_update_hash_table_mask()
874 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ in bmac_update_hash_table_mask()
886 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
897 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
902 num_addrs == -1 Promiscuous mode, receive all packets
903 num_addrs == 0 Normal mode, clear multicast list
904 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
905 best-effort filtering.
915 if (bp->sleeping) in bmac_set_multicast()
920 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { in bmac_set_multicast()
921 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; in bmac_set_multicast()
925 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { in bmac_set_multicast()
930 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); in bmac_set_multicast()
932 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; in bmac_set_multicast()
933 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; in bmac_set_multicast()
939 bmac_addhash(bp, ha->addr); in bmac_set_multicast()
957 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { in bmac_set_multicast()
962 } else if(dev->flags & IFF_PROMISC) { in bmac_set_multicast()
974 crc = ether_crc_le(6, ha->addr); in bmac_set_multicast()
997 /* if (status & FrameReceived) dev->stats.rx_dropped++; */ in bmac_misc_intr()
998 if (status & RxErrorMask) dev->stats.rx_errors++; in bmac_misc_intr()
999 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; in bmac_misc_intr()
1000 if (status & RxLenCntExp) dev->stats.rx_length_errors++; in bmac_misc_intr()
1001 if (status & RxOverFlow) dev->stats.rx_over_errors++; in bmac_misc_intr()
1002 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; in bmac_misc_intr()
1004 /* if (status & FrameSent) dev->stats.tx_dropped++; */ in bmac_misc_intr()
1005 if (status & TxErrorMask) dev->stats.tx_errors++; in bmac_misc_intr()
1006 if (status & TxUnderrun) dev->stats.tx_fifo_errors++; in bmac_misc_intr()
1007 if (status & TxNormalCollExp) dev->stats.collisions++; in bmac_misc_intr()
1023 #define SROMReadCount 3 /* number of words to read from SROM */
1085 val = addr >> (addr_len-i-1); in read_srom()
1089 /* Now read in the 16-bit data */ in read_srom()
1141 spin_lock_irqsave(&bp->lock, flags); in bmac_reset_and_enable()
1148 bp->sleeping = 0; in bmac_reset_and_enable()
1157 memcpy(data, dev->dev_addr, ETH_ALEN); in bmac_reset_and_enable()
1158 memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN); in bmac_reset_and_enable()
1161 spin_unlock_irqrestore(&bp->lock, flags); in bmac_reset_and_enable()
1185 int is_bmac_plus = ((int)match->data) != 0; in bmac_probe()
1189 return -ENODEV; in bmac_probe()
1192 "mac-address", NULL); in bmac_probe()
1195 "local-mac-address", NULL); in bmac_probe()
1197 printk(KERN_ERR "BMAC: Can't get mac-address\n"); in bmac_probe()
1198 return -ENODEV; in bmac_probe()
1205 return -ENOMEM; in bmac_probe()
1208 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); in bmac_probe()
1211 bp->mdev = mdev; in bmac_probe()
1212 spin_lock_init(&bp->lock); in bmac_probe()
1219 dev->base_addr = (unsigned long) in bmac_probe()
1221 if (dev->base_addr == 0) in bmac_probe()
1224 dev->irq = macio_irq(mdev, 0); in bmac_probe()
1239 dev->netdev_ops = &bmac_netdev_ops; in bmac_probe()
1240 dev->ethtool_ops = &bmac_ethtool_ops; in bmac_probe()
1246 bp->is_bmac_plus = is_bmac_plus; in bmac_probe()
1247 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); in bmac_probe()
1248 if (!bp->tx_dma) in bmac_probe()
1250 bp->tx_dma_intr = macio_irq(mdev, 1); in bmac_probe()
1251 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); in bmac_probe()
1252 if (!bp->rx_dma) in bmac_probe()
1254 bp->rx_dma_intr = macio_irq(mdev, 2); in bmac_probe()
1256 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); in bmac_probe()
1257 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; in bmac_probe()
1259 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); in bmac_probe()
1260 skb_queue_head_init(bp->queue); in bmac_probe()
1262 timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0); in bmac_probe()
1264 ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev); in bmac_probe()
1266 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); in bmac_probe()
1269 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); in bmac_probe()
1271 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); in bmac_probe()
1274 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); in bmac_probe()
1276 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); in bmac_probe()
1281 * re-enabled on open() in bmac_probe()
1283 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_probe()
1291 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr); in bmac_probe()
1292 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); in bmac_probe()
1298 free_irq(bp->rx_dma_intr, dev); in bmac_probe()
1300 free_irq(bp->tx_dma_intr, dev); in bmac_probe()
1302 free_irq(dev->irq, dev); in bmac_probe()
1304 iounmap(bp->rx_dma); in bmac_probe()
1306 iounmap(bp->tx_dma); in bmac_probe()
1308 iounmap((void __iomem *)dev->base_addr); in bmac_probe()
1312 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_probe()
1315 return -ENODEV; in bmac_probe()
1323 bp->opened = 1; in bmac_open()
1325 enable_irq(dev->irq); in bmac_open()
1332 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_close()
1333 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_close()
1337 bp->sleeping = 1; in bmac_close()
1349 rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ in bmac_close()
1350 td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ in bmac_close()
1355 if (bp->rx_bufs[i] != NULL) { in bmac_close()
1356 dev_kfree_skb(bp->rx_bufs[i]); in bmac_close()
1357 bp->rx_bufs[i] = NULL; in bmac_close()
1362 if (bp->tx_bufs[i] != NULL) { in bmac_close()
1363 dev_kfree_skb(bp->tx_bufs[i]); in bmac_close()
1364 bp->tx_bufs[i] = NULL; in bmac_close()
1369 bp->opened = 0; in bmac_close()
1370 disable_irq(dev->irq); in bmac_close()
1371 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); in bmac_close()
1384 if (bp->sleeping) in bmac_start()
1387 spin_lock_irqsave(&bp->lock, flags); in bmac_start()
1389 i = bp->tx_fill + 1; in bmac_start()
1392 if (i == bp->tx_empty) in bmac_start()
1394 skb = skb_dequeue(bp->queue); in bmac_start()
1399 spin_unlock_irqrestore(&bp->lock, flags); in bmac_start()
1406 skb_queue_tail(bp->queue, skb); in bmac_output()
1414 struct net_device *dev = macio_get_drvdata(bp->mdev); in bmac_tx_timeout()
1415 volatile struct dbdma_regs __iomem *td = bp->tx_dma; in bmac_tx_timeout()
1416 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; in bmac_tx_timeout()
1423 spin_lock_irqsave(&bp->lock, flags); in bmac_tx_timeout()
1424 bp->timeout_active = 0; in bmac_tx_timeout()
1429 cp = &bp->tx_cmds[bp->tx_empty]; in bmac_tx_timeout()
1431 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ in bmac_tx_timeout()
1432 /* mb->pr, mb->xmtfs, mb->fifofc)); */ in bmac_tx_timeout()
1439 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); in bmac_tx_timeout()
1440 printk(KERN_ERR "bmac: transmit timeout - resetting\n"); in bmac_tx_timeout()
1444 cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); in bmac_tx_timeout()
1445 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); in bmac_tx_timeout()
1446 out_le16(&cp->xfer_status, 0); in bmac_tx_timeout()
1447 out_le32(&rd->cmdptr, virt_to_bus(cp)); in bmac_tx_timeout()
1448 out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); in bmac_tx_timeout()
1452 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); in bmac_tx_timeout()
1453 i = bp->tx_empty; in bmac_tx_timeout()
1454 ++dev->stats.tx_errors; in bmac_tx_timeout()
1455 if (i != bp->tx_fill) { in bmac_tx_timeout()
1456 dev_kfree_skb_irq(bp->tx_bufs[i]); in bmac_tx_timeout()
1457 bp->tx_bufs[i] = NULL; in bmac_tx_timeout()
1459 bp->tx_empty = i; in bmac_tx_timeout()
1461 bp->tx_fullup = 0; in bmac_tx_timeout()
1463 if (i != bp->tx_fill) { in bmac_tx_timeout()
1464 cp = &bp->tx_cmds[i]; in bmac_tx_timeout()
1465 out_le16(&cp->xfer_status, 0); in bmac_tx_timeout()
1466 out_le16(&cp->command, OUTPUT_LAST); in bmac_tx_timeout()
1467 out_le32(&td->cmdptr, virt_to_bus(cp)); in bmac_tx_timeout()
1468 out_le32(&td->control, DBDMA_SET(RUN)); in bmac_tx_timeout()
1479 spin_unlock_irqrestore(&bp->lock, flags); in bmac_tx_timeout()
1510 return -ENOSYS;
1528 *start = buffer + (offset - begin);
1529 len -= (offset - begin);
1544 free_irq(dev->irq, dev); in bmac_remove()
1545 free_irq(bp->tx_dma_intr, dev); in bmac_remove()
1546 free_irq(bp->rx_dma_intr, dev); in bmac_remove()
1548 iounmap((void __iomem *)dev->base_addr); in bmac_remove()
1549 iounmap(bp->tx_dma); in bmac_remove()
1550 iounmap(bp->rx_dma); in bmac_remove()
1593 return -ENOMEM; in bmac_init()