Lines Matching full:mdp

349 	struct sh_eth_private *mdp = netdev_priv(ndev);  in sh_eth_write()  local
350 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_write()
355 iowrite32(data, mdp->addr + offset); in sh_eth_write()
360 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_read() local
361 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_read()
366 return ioread32(mdp->addr + offset); in sh_eth_read()
376 static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) in sh_eth_tsu_get_offset() argument
378 return mdp->reg_offset[enum_index]; in sh_eth_tsu_get_offset()
381 static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, in sh_eth_tsu_write() argument
384 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); in sh_eth_tsu_write()
389 iowrite32(data, mdp->tsu_addr + offset); in sh_eth_tsu_write()
392 static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) in sh_eth_tsu_read() argument
394 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); in sh_eth_tsu_read()
399 return ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read()
415 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_select_mii() local
418 switch (mdp->phy_interface) { in sh_eth_select_mii()
443 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_duplex() local
445 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); in sh_eth_set_duplex()
450 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset() local
453 sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); in sh_eth_chip_reset()
482 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_soft_reset_gether() local
503 if (mdp->cd->csmr) in sh_eth_soft_reset_gether()
507 if (mdp->cd->select_mii) in sh_eth_soft_reset_gether()
515 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_gether() local
517 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_gether()
520 switch (mdp->speed) { in sh_eth_set_rate_gether()
634 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_rcar() local
636 switch (mdp->speed) { in sh_eth_set_rate_rcar()
797 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7724() local
799 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
841 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7757() local
843 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
912 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_giga() local
914 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_giga()
917 switch (mdp->speed) { in sh_eth_set_rate_giga()
1233 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_free() local
1239 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_tx_free()
1240 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_tx_free()
1241 txdesc = &mdp->tx_ring[entry]; in sh_eth_tx_free()
1247 netif_info(mdp, tx_done, ndev, in sh_eth_tx_free()
1251 if (mdp->tx_skbuff[entry]) { in sh_eth_tx_free()
1252 dma_unmap_single(&mdp->pdev->dev, in sh_eth_tx_free()
1256 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_tx_free()
1257 mdp->tx_skbuff[entry] = NULL; in sh_eth_tx_free()
1261 if (entry >= mdp->num_tx_ring - 1) in sh_eth_tx_free()
1275 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_free() local
1278 if (mdp->rx_ring) { in sh_eth_ring_free()
1279 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_free()
1280 if (mdp->rx_skbuff[i]) { in sh_eth_ring_free()
1281 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_free()
1283 dma_unmap_single(&mdp->pdev->dev, in sh_eth_ring_free()
1285 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_ring_free()
1289 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_free()
1290 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, in sh_eth_ring_free()
1291 mdp->rx_desc_dma); in sh_eth_ring_free()
1292 mdp->rx_ring = NULL; in sh_eth_ring_free()
1296 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1297 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1298 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1300 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1301 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1303 if (mdp->tx_ring) { in sh_eth_ring_free()
1306 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_free()
1307 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, in sh_eth_ring_free()
1308 mdp->tx_desc_dma); in sh_eth_ring_free()
1309 mdp->tx_ring = NULL; in sh_eth_ring_free()
1313 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1314 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1320 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_format() local
1325 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1326 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1327 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_ring_format()
1331 mdp->cur_rx = 0; in sh_eth_ring_format()
1332 mdp->cur_tx = 0; in sh_eth_ring_format()
1333 mdp->dirty_rx = 0; in sh_eth_ring_format()
1334 mdp->dirty_tx = 0; in sh_eth_ring_format()
1336 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1339 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1341 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1348 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_ring_format()
1349 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, in sh_eth_ring_format()
1351 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_ring_format()
1355 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1358 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1365 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1366 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1371 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1377 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1380 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1381 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1382 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1387 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1388 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1399 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_init() local
1407 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1409 if (mdp->cd->rpadir) in sh_eth_ring_init()
1410 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1413 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), in sh_eth_ring_init()
1415 if (!mdp->rx_skbuff) in sh_eth_ring_init()
1418 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), in sh_eth_ring_init()
1420 if (!mdp->tx_skbuff) in sh_eth_ring_init()
1424 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1425 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, in sh_eth_ring_init()
1426 &mdp->rx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1427 if (!mdp->rx_ring) in sh_eth_ring_init()
1430 mdp->dirty_rx = 0; in sh_eth_ring_init()
1433 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1434 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, in sh_eth_ring_init()
1435 &mdp->tx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1436 if (!mdp->tx_ring) in sh_eth_ring_init()
1449 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_init() local
1453 ret = mdp->cd->soft_reset(ndev); in sh_eth_dev_init()
1457 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1462 if (mdp->cd->rpadir) in sh_eth_dev_init()
1469 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1476 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1482 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1485 if (mdp->cd->nbst) in sh_eth_dev_init()
1489 if (mdp->cd->bculr) in sh_eth_dev_init()
1492 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1494 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1502 mdp->irq_enabled = true; in sh_eth_dev_init()
1503 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1506 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | in sh_eth_dev_init()
1510 if (mdp->cd->set_rate) in sh_eth_dev_init()
1511 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1514 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1517 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1523 if (mdp->cd->apr) in sh_eth_dev_init()
1525 if (mdp->cd->mpr) in sh_eth_dev_init()
1527 if (mdp->cd->tpauser) in sh_eth_dev_init()
1538 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_exit() local
1544 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1545 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); in sh_eth_dev_exit()
1560 mdp->cd->soft_reset(ndev); in sh_eth_dev_exit()
1563 if (mdp->cd->rmiimode) in sh_eth_dev_exit()
1586 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_rx() local
1589 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1590 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1594 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_rx()
1601 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1611 netif_info(mdp, rx_status, ndev, in sh_eth_rx()
1624 if (mdp->cd->csmr) in sh_eth_rx()
1627 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1645 if (!mdp->cd->hw_swap) in sh_eth_rx()
1649 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1650 if (mdp->cd->rpadir) in sh_eth_rx()
1652 dma_unmap_single(&mdp->pdev->dev, dma_addr, in sh_eth_rx()
1653 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_rx()
1665 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1666 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1670 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1671 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1672 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1674 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_rx()
1677 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1682 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, in sh_eth_rx()
1684 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_rx()
1688 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1694 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1705 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { in sh_eth_rx()
1709 mdp->cur_rx = count; in sh_eth_rx()
1710 mdp->dirty_rx = count; in sh_eth_rx()
1735 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_emac_interrupt() local
1744 pm_wakeup_event(&mdp->pdev->dev, 0); in sh_eth_emac_interrupt()
1747 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_emac_interrupt()
1750 if (mdp->ether_link_active_low) in sh_eth_emac_interrupt()
1769 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_error() local
1776 netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); in sh_eth_error()
1791 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); in sh_eth_error()
1797 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); in sh_eth_error()
1810 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1813 netif_err(mdp, tx_err, ndev, "Address Error\n"); in sh_eth_error()
1817 if (mdp->cd->no_ade) in sh_eth_error()
1825 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1831 if (edtrr ^ mdp->cd->edtrr_trns) { in sh_eth_error()
1833 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_error()
1843 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_interrupt() local
1844 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1848 spin_lock(&mdp->lock); in sh_eth_interrupt()
1866 if (unlikely(!mdp->irq_enabled)) { in sh_eth_interrupt()
1872 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1876 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1905 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1912 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, in sh_eth_poll() local
1932 if (mdp->irq_enabled) in sh_eth_poll()
1933 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1941 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_adjust_link() local
1946 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_adjust_link()
1949 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1953 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1955 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1956 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1957 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1960 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1962 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1963 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1964 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1966 if (!mdp->link) { in sh_eth_adjust_link()
1969 mdp->link = phydev->link; in sh_eth_adjust_link()
1971 } else if (mdp->link) { in sh_eth_adjust_link()
1973 mdp->link = 0; in sh_eth_adjust_link()
1974 mdp->speed = 0; in sh_eth_adjust_link()
1975 mdp->duplex = -1; in sh_eth_adjust_link()
1979 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) in sh_eth_adjust_link()
1982 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_adjust_link()
1984 if (new_state && netif_msg_link(mdp)) in sh_eth_adjust_link()
1992 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_init() local
1995 mdp->link = 0; in sh_eth_phy_init()
1996 mdp->speed = 0; in sh_eth_phy_init()
1997 mdp->duplex = -1; in sh_eth_phy_init()
2006 mdp->phy_interface); in sh_eth_phy_init()
2015 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
2018 mdp->phy_interface); in sh_eth_phy_init()
2027 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) in sh_eth_phy_init()
2059 struct sh_eth_private *mdp = netdev_priv(ndev); in __sh_eth_get_regs() local
2060 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
2084 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
2093 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) in __sh_eth_get_regs()
2207 *buf++ = ioread32(mdp->tsu_addr + in __sh_eth_get_regs()
2208 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2230 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_regs() local
2234 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2236 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2241 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_msglevel() local
2242 return mdp->msg_enable; in sh_eth_get_msglevel()
2247 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_msglevel() local
2248 mdp->msg_enable = value; in sh_eth_set_msglevel()
2270 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ethtool_stats() local
2274 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2275 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2276 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2277 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2295 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ringparam() local
2299 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2300 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2308 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_ringparam() local
2328 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2330 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2340 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2341 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2365 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_wol() local
2370 if (mdp->cd->magic) { in sh_eth_get_wol()
2372 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; in sh_eth_get_wol()
2378 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_wol() local
2380 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) in sh_eth_set_wol()
2383 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in sh_eth_set_wol()
2385 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); in sh_eth_set_wol()
2411 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_open() local
2414 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2416 napi_enable(&mdp->napi); in sh_eth_open()
2419 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2442 mdp->is_opened = 1; in sh_eth_open()
2449 napi_disable(&mdp->napi); in sh_eth_open()
2450 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2457 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_timeout() local
2463 netif_err(mdp, timer, ndev, in sh_eth_tx_timeout()
2471 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2472 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2475 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2476 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2478 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2479 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2480 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2493 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_start_xmit() local
2499 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2500 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2502 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); in sh_eth_start_xmit()
2504 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2508 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2513 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2514 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2515 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2517 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2519 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, in sh_eth_start_xmit()
2521 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_start_xmit()
2529 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2535 mdp->cur_tx++; in sh_eth_start_xmit()
2537 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) in sh_eth_start_xmit()
2538 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_start_xmit()
2561 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_stats() local
2563 if (mdp->cd->no_tx_cntrs) in sh_eth_get_stats()
2566 if (!mdp->is_opened) in sh_eth_get_stats()
2573 if (mdp->cd->cexcr) { in sh_eth_get_stats()
2589 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_close() local
2597 mdp->irq_enabled = false; in sh_eth_close()
2599 napi_disable(&mdp->napi); in sh_eth_close()
2615 mdp->is_opened = 0; in sh_eth_close()
2617 pm_runtime_put(&mdp->pdev->dev); in sh_eth_close()
2639 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) in sh_eth_tsu_get_post_bit() argument
2641 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2647 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_enable_cam_entry_post() local
2651 tmp = sh_eth_tsu_read(mdp, reg); in sh_eth_tsu_enable_cam_entry_post()
2652 sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); in sh_eth_tsu_enable_cam_entry_post()
2658 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_post() local
2663 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; in sh_eth_tsu_disable_cam_entry_post()
2665 tmp = sh_eth_tsu_read(mdp, reg); in sh_eth_tsu_disable_cam_entry_post()
2666 sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); in sh_eth_tsu_disable_cam_entry_post()
2675 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_busy() local
2677 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { in sh_eth_tsu_busy()
2692 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_write_entry() local
2696 iowrite32(val, mdp->tsu_addr + offset); in sh_eth_tsu_write_entry()
2701 iowrite32(val, mdp->tsu_addr + offset + 4); in sh_eth_tsu_write_entry()
2710 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_read_entry() local
2713 val = ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read_entry()
2718 val = ioread32(mdp->tsu_addr + offset + 4); in sh_eth_tsu_read_entry()
2726 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_find_entry() local
2727 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_find_entry()
2753 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_table() local
2754 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_disable_cam_entry_table()
2758 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & in sh_eth_tsu_disable_cam_entry_table()
2770 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_add_entry() local
2771 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_add_entry()
2774 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2788 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | in sh_eth_tsu_add_entry()
2800 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_del_entry() local
2803 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2823 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_all() local
2826 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2844 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_mcast() local
2845 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_purge_mcast()
2849 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2862 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_mode() local
2867 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2872 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2888 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2907 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2912 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_csum() local
2915 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_csum()
2926 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_csum()
2933 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_features() local
2935 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) in sh_eth_set_features()
2943 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) in sh_eth_get_vtag_index() argument
2945 if (!mdp->port) in sh_eth_get_vtag_index()
2954 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_add_vid() local
2955 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_add_vid()
2957 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2964 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2969 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2971 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_add_vid()
2975 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), in sh_eth_vlan_rx_add_vid()
2984 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_kill_vid() local
2985 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_kill_vid()
2987 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2994 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
2995 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_kill_vid()
3001 static void sh_eth_tsu_init(struct sh_eth_private *mdp) in sh_eth_tsu_init() argument
3003 if (!mdp->cd->dual_port) { in sh_eth_tsu_init()
3004 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
3005 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, in sh_eth_tsu_init()
3010 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
3011 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
3012 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
3013 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); in sh_eth_tsu_init()
3014 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); in sh_eth_tsu_init()
3015 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); in sh_eth_tsu_init()
3016 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); in sh_eth_tsu_init()
3017 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); in sh_eth_tsu_init()
3018 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); in sh_eth_tsu_init()
3019 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); in sh_eth_tsu_init()
3020 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
3021 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
3022 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ in sh_eth_tsu_init()
3023 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ in sh_eth_tsu_init()
3024 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
3025 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
3026 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
3027 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
3028 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
3032 static int sh_mdio_release(struct sh_eth_private *mdp) in sh_mdio_release() argument
3035 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
3038 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
3089 static int sh_mdio_init(struct sh_eth_private *mdp, in sh_mdio_init() argument
3094 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
3095 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
3105 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
3110 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
3111 if (!mdp->mii_bus) in sh_mdio_init()
3115 mdp->mii_bus->read = sh_mdiobb_read_c22; in sh_mdio_init()
3116 mdp->mii_bus->write = sh_mdiobb_write_c22; in sh_mdio_init()
3117 mdp->mii_bus->read_c45 = sh_mdiobb_read_c45; in sh_mdio_init()
3118 mdp->mii_bus->write_c45 = sh_mdiobb_write_c45; in sh_mdio_init()
3121 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
3122 mdp->mii_bus->parent = dev; in sh_mdio_init()
3123 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
3128 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
3130 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
3145 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3258 struct sh_eth_private *mdp; in sh_eth_drv_probe() local
3276 mdp = netdev_priv(ndev); in sh_eth_drv_probe()
3277 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3278 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3279 mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in sh_eth_drv_probe()
3280 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3281 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3287 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3288 mdp->pdev = pdev; in sh_eth_drv_probe()
3299 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3300 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3301 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3302 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3306 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3308 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); in sh_eth_drv_probe()
3310 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3311 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3313 mdp->cd->register_type); in sh_eth_drv_probe()
3317 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3326 if (mdp->cd->rx_csum) { in sh_eth_drv_probe()
3332 if (mdp->cd->tsu) in sh_eth_drv_probe()
3340 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3350 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3372 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3374 if (!mdp->tsu_addr) { in sh_eth_drv_probe()
3379 mdp->port = port; in sh_eth_drv_probe()
3384 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3385 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3388 sh_eth_tsu_init(mdp); in sh_eth_drv_probe()
3392 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3396 ret = sh_mdio_init(mdp, pd); in sh_eth_drv_probe()
3402 netif_napi_add(ndev, &mdp->napi, sh_eth_poll); in sh_eth_drv_probe()
3409 if (mdp->cd->magic) in sh_eth_drv_probe()
3422 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3423 sh_mdio_release(mdp); in sh_eth_drv_probe()
3437 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_drv_remove() local
3440 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3441 sh_mdio_release(mdp); in sh_eth_drv_remove()
3450 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_wol_setup() local
3454 napi_disable(&mdp->napi); in sh_eth_wol_setup()
3465 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_wol_restore() local
3468 napi_enable(&mdp->napi); in sh_eth_wol_restore()
3489 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_suspend() local
3497 if (mdp->wol_enabled) in sh_eth_suspend()
3508 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_resume() local
3514 if (mdp->wol_enabled) in sh_eth_resume()