Lines Matching full:ep
176 #define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) argument
177 #define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) argument
178 #define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) argument
179 #define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) argument
180 #define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) argument
181 #define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) argument
185 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_mdio_read() local
189 wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); in ep93xx_mdio_read()
192 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) in ep93xx_mdio_read()
201 data = rdl(ep, REG_MIIDATA); in ep93xx_mdio_read()
209 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_mdio_write() local
212 wrl(ep, REG_MIIDATA, data); in ep93xx_mdio_write()
213 wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); in ep93xx_mdio_write()
216 if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) in ep93xx_mdio_write()
227 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_rx() local
238 entry = ep->rx_pointer; in ep93xx_rx()
239 rstat = ep->descs->rstat + entry; in ep93xx_rx()
281 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; in ep93xx_rx()
285 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); in ep93xx_rx()
292 napi_gro_receive(&ep->napi, skb); in ep93xx_rx()
301 ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); in ep93xx_rx()
310 struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); in ep93xx_poll() local
311 struct net_device *dev = ep->dev; in ep93xx_poll()
316 spin_lock_irq(&ep->rx_lock); in ep93xx_poll()
317 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); in ep93xx_poll()
318 spin_unlock_irq(&ep->rx_lock); in ep93xx_poll()
322 wrw(ep, REG_RXDENQ, rx); in ep93xx_poll()
323 wrw(ep, REG_RXSTSENQ, rx); in ep93xx_poll()
331 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_xmit() local
341 entry = ep->tx_pointer; in ep93xx_xmit()
342 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); in ep93xx_xmit()
344 txd = &ep->descs->tdesc[entry]; in ep93xx_xmit()
349 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); in ep93xx_xmit()
354 spin_lock_irq(&ep->tx_pending_lock); in ep93xx_xmit()
355 ep->tx_pending++; in ep93xx_xmit()
356 if (ep->tx_pending == TX_QUEUE_ENTRIES) in ep93xx_xmit()
358 spin_unlock_irq(&ep->tx_pending_lock); in ep93xx_xmit()
360 wrl(ep, REG_TXDENQ, 1); in ep93xx_xmit()
367 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_tx_complete() local
372 spin_lock(&ep->tx_pending_lock); in ep93xx_tx_complete()
378 entry = ep->tx_clean_pointer; in ep93xx_tx_complete()
379 tstat = ep->descs->tstat + entry; in ep93xx_tx_complete()
393 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; in ep93xx_tx_complete()
407 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); in ep93xx_tx_complete()
408 if (ep->tx_pending == TX_QUEUE_ENTRIES) in ep93xx_tx_complete()
410 ep->tx_pending--; in ep93xx_tx_complete()
412 spin_unlock(&ep->tx_pending_lock); in ep93xx_tx_complete()
421 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_irq() local
424 status = rdl(ep, REG_INTSTSC); in ep93xx_irq()
429 spin_lock(&ep->rx_lock); in ep93xx_irq()
430 if (likely(napi_schedule_prep(&ep->napi))) { in ep93xx_irq()
431 wrl(ep, REG_INTEN, REG_INTEN_TX); in ep93xx_irq()
432 __napi_schedule(&ep->napi); in ep93xx_irq()
434 spin_unlock(&ep->rx_lock); in ep93xx_irq()
443 static void ep93xx_free_buffers(struct ep93xx_priv *ep) in ep93xx_free_buffers() argument
445 struct device *dev = ep->dev->dev.parent; in ep93xx_free_buffers()
448 if (!ep->descs) in ep93xx_free_buffers()
454 d = ep->descs->rdesc[i].buf_addr; in ep93xx_free_buffers()
458 kfree(ep->rx_buf[i]); in ep93xx_free_buffers()
464 d = ep->descs->tdesc[i].buf_addr; in ep93xx_free_buffers()
468 kfree(ep->tx_buf[i]); in ep93xx_free_buffers()
471 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, in ep93xx_free_buffers()
472 ep->descs_dma_addr); in ep93xx_free_buffers()
473 ep->descs = NULL; in ep93xx_free_buffers()
476 static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) in ep93xx_alloc_buffers() argument
478 struct device *dev = ep->dev->dev.parent; in ep93xx_alloc_buffers()
481 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), in ep93xx_alloc_buffers()
482 &ep->descs_dma_addr, GFP_KERNEL); in ep93xx_alloc_buffers()
483 if (ep->descs == NULL) in ep93xx_alloc_buffers()
500 ep->rx_buf[i] = buf; in ep93xx_alloc_buffers()
501 ep->descs->rdesc[i].buf_addr = d; in ep93xx_alloc_buffers()
502 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; in ep93xx_alloc_buffers()
519 ep->tx_buf[i] = buf; in ep93xx_alloc_buffers()
520 ep->descs->tdesc[i].buf_addr = d; in ep93xx_alloc_buffers()
526 ep93xx_free_buffers(ep); in ep93xx_alloc_buffers()
532 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_start_hw() local
536 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); in ep93xx_start_hw()
538 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) in ep93xx_start_hw()
548 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); in ep93xx_start_hw()
551 if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) in ep93xx_start_hw()
552 wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); in ep93xx_start_hw()
555 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); in ep93xx_start_hw()
556 wrl(ep, REG_RXDQBADD, addr); in ep93xx_start_hw()
557 wrl(ep, REG_RXDCURADD, addr); in ep93xx_start_hw()
558 wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); in ep93xx_start_hw()
561 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); in ep93xx_start_hw()
562 wrl(ep, REG_RXSTSQBADD, addr); in ep93xx_start_hw()
563 wrl(ep, REG_RXSTSQCURADD, addr); in ep93xx_start_hw()
564 wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); in ep93xx_start_hw()
567 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); in ep93xx_start_hw()
568 wrl(ep, REG_TXDQBADD, addr); in ep93xx_start_hw()
569 wrl(ep, REG_TXDQCURADD, addr); in ep93xx_start_hw()
570 wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); in ep93xx_start_hw()
573 addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); in ep93xx_start_hw()
574 wrl(ep, REG_TXSTSQBADD, addr); in ep93xx_start_hw()
575 wrl(ep, REG_TXSTSQCURADD, addr); in ep93xx_start_hw()
576 wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); in ep93xx_start_hw()
578 wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); in ep93xx_start_hw()
579 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); in ep93xx_start_hw()
580 wrl(ep, REG_GIINTMSK, 0); in ep93xx_start_hw()
583 if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) in ep93xx_start_hw()
593 wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); in ep93xx_start_hw()
594 wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); in ep93xx_start_hw()
596 wrb(ep, REG_INDAD0, dev->dev_addr[0]); in ep93xx_start_hw()
597 wrb(ep, REG_INDAD1, dev->dev_addr[1]); in ep93xx_start_hw()
598 wrb(ep, REG_INDAD2, dev->dev_addr[2]); in ep93xx_start_hw()
599 wrb(ep, REG_INDAD3, dev->dev_addr[3]); in ep93xx_start_hw()
600 wrb(ep, REG_INDAD4, dev->dev_addr[4]); in ep93xx_start_hw()
601 wrb(ep, REG_INDAD5, dev->dev_addr[5]); in ep93xx_start_hw()
602 wrl(ep, REG_AFP, 0); in ep93xx_start_hw()
604 wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); in ep93xx_start_hw()
606 wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); in ep93xx_start_hw()
607 wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); in ep93xx_start_hw()
614 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_stop_hw() local
617 wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); in ep93xx_stop_hw()
619 if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) in ep93xx_stop_hw()
630 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_open() local
633 if (ep93xx_alloc_buffers(ep)) in ep93xx_open()
636 napi_enable(&ep->napi); in ep93xx_open()
639 napi_disable(&ep->napi); in ep93xx_open()
640 ep93xx_free_buffers(ep); in ep93xx_open()
644 spin_lock_init(&ep->rx_lock); in ep93xx_open()
645 ep->rx_pointer = 0; in ep93xx_open()
646 ep->tx_clean_pointer = 0; in ep93xx_open()
647 ep->tx_pointer = 0; in ep93xx_open()
648 spin_lock_init(&ep->tx_pending_lock); in ep93xx_open()
649 ep->tx_pending = 0; in ep93xx_open()
651 err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); in ep93xx_open()
653 napi_disable(&ep->napi); in ep93xx_open()
655 ep93xx_free_buffers(ep); in ep93xx_open()
659 wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); in ep93xx_open()
668 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_close() local
670 napi_disable(&ep->napi); in ep93xx_close()
673 wrl(ep, REG_GIINTMSK, 0); in ep93xx_close()
674 free_irq(ep->irq, dev); in ep93xx_close()
676 ep93xx_free_buffers(ep); in ep93xx_close()
683 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_ioctl() local
686 return generic_mii_ioctl(&ep->mii, data, cmd, NULL); in ep93xx_ioctl()
697 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_get_link_ksettings() local
699 mii_ethtool_get_link_ksettings(&ep->mii, cmd); in ep93xx_get_link_ksettings()
707 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_set_link_ksettings() local
708 return mii_ethtool_set_link_ksettings(&ep->mii, cmd); in ep93xx_set_link_ksettings()
713 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_nway_reset() local
714 return mii_nway_restart(&ep->mii); in ep93xx_nway_reset()
719 struct ep93xx_priv *ep = netdev_priv(dev); in ep93xx_get_link() local
720 return mii_link_ok(&ep->mii); in ep93xx_get_link()
743 struct ep93xx_priv *ep; in ep93xx_eth_remove() local
750 ep = netdev_priv(dev); in ep93xx_eth_remove()
754 ep93xx_free_buffers(ep); in ep93xx_eth_remove()
756 if (ep->base_addr != NULL) in ep93xx_eth_remove()
757 iounmap(ep->base_addr); in ep93xx_eth_remove()
759 if (ep->res != NULL) { in ep93xx_eth_remove()
770 struct ep93xx_priv *ep; in ep93xx_eth_probe() local
812 ep = netdev_priv(dev); in ep93xx_eth_probe()
813 ep->dev = dev; in ep93xx_eth_probe()
815 netif_napi_add(dev, &ep->napi, ep93xx_poll); in ep93xx_eth_probe()
819 ep->res = request_mem_region(mem->start, resource_size(mem), in ep93xx_eth_probe()
821 if (ep->res == NULL) { in ep93xx_eth_probe()
827 ep->base_addr = base_addr; in ep93xx_eth_probe()
828 ep->irq = irq; in ep93xx_eth_probe()
830 ep->mii.phy_id = phy_id; in ep93xx_eth_probe()
831 ep->mii.phy_id_mask = 0x1f; in ep93xx_eth_probe()
832 ep->mii.reg_num_mask = 0x1f; in ep93xx_eth_probe()
833 ep->mii.dev = dev; in ep93xx_eth_probe()
834 ep->mii.mdio_read = ep93xx_mdio_read; in ep93xx_eth_probe()
835 ep->mii.mdio_write = ep93xx_mdio_write; in ep93xx_eth_probe()
836 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ in ep93xx_eth_probe()
848 dev->name, ep->irq, dev->dev_addr); in ep93xx_eth_probe()