Lines Matching +full:mbi +full:- +full:ranges

1 // SPDX-License-Identifier: GPL-2.0-only
490 (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
493 (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
502 (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
507 (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
515 (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
518 (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
524 (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
529 (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
532 (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
593 #define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
851 airoha_rr((eth)->fe_regs, (offset))
853 airoha_wr((eth)->fe_regs, (offset), (val))
855 airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
857 airoha_rmw((eth)->fe_regs, (offset), 0, (val))
859 airoha_rmw((eth)->fe_regs, (offset), (val), 0)
862 airoha_rr((qdma)->regs, (offset))
864 airoha_wr((qdma)->regs, (offset), (val))
866 airoha_rmw((qdma)->regs, (offset), (mask), (val))
868 airoha_rmw((qdma)->regs, (offset), 0, (val))
870 airoha_rmw((qdma)->regs, (offset), (val), 0)
877 if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) in airoha_qdma_set_irqmask()
880 spin_lock_irqsave(&qdma->irq_lock, flags); in airoha_qdma_set_irqmask()
882 qdma->irqmask[index] &= ~clear; in airoha_qdma_set_irqmask()
883 qdma->irqmask[index] |= set; in airoha_qdma_set_irqmask()
884 airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); in airoha_qdma_set_irqmask()
890 spin_unlock_irqrestore(&qdma->irq_lock, flags); in airoha_qdma_set_irqmask()
911 return port->id == 1; in airhoa_is_lan_gdm_port()
916 struct airoha_eth *eth = port->qdma->eth; in airoha_set_macaddr()
965 return -EINVAL; in airoha_set_gdm_port()
1000 for (i--; i >= 0; i--) in airoha_set_gdm_ports()
1010 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { in airoha_fe_maccr_init()
1082 /* ETH->ETH_P_1905 (0x893a) */ in airoha_fe_vip_setup()
1130 all_rsv += (val - orig_val); in airoha_fe_set_pse_oq_rsv()
1137 tmp = fq_limit - all_rsv - 0x20; in airoha_fe_set_pse_oq_rsv()
1142 tmp = fq_limit - all_rsv - 0x100; in airoha_fe_set_pse_oq_rsv()
1203 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) in airoha_fe_pse_ports_init()
1352 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 in airoha_fe_init()
1353 * connect other rings to PSE Port0 OQ-0 in airoha_fe_init()
1369 /* NPU Core-3, NPU Bridge Channel-3 */ in airoha_fe_init()
1374 /* QDMA LAN, RX Ring-22 */ in airoha_fe_init()
1388 /* default aging mode for mbi unlock issue */ in airoha_fe_init()
1405 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_fill_rx_queue()
1406 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_fill_rx_queue()
1407 struct airoha_eth *eth = qdma->eth; in airoha_qdma_fill_rx_queue()
1408 int qid = q - &qdma->q_rx[0]; in airoha_qdma_fill_rx_queue()
1411 while (q->queued < q->ndesc - 1) { in airoha_qdma_fill_rx_queue()
1412 struct airoha_queue_entry *e = &q->entry[q->head]; in airoha_qdma_fill_rx_queue()
1413 struct airoha_qdma_desc *desc = &q->desc[q->head]; in airoha_qdma_fill_rx_queue()
1418 page = page_pool_dev_alloc_frag(q->page_pool, &offset, in airoha_qdma_fill_rx_queue()
1419 q->buf_size); in airoha_qdma_fill_rx_queue()
1423 q->head = (q->head + 1) % q->ndesc; in airoha_qdma_fill_rx_queue()
1424 q->queued++; in airoha_qdma_fill_rx_queue()
1427 e->buf = page_address(page) + offset; in airoha_qdma_fill_rx_queue()
1428 e->dma_addr = page_pool_get_dma_addr(page) + offset; in airoha_qdma_fill_rx_queue()
1429 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); in airoha_qdma_fill_rx_queue()
1431 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_fill_rx_queue()
1434 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); in airoha_qdma_fill_rx_queue()
1435 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
1436 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); in airoha_qdma_fill_rx_queue()
1437 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); in airoha_qdma_fill_rx_queue()
1438 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
1439 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_fill_rx_queue()
1440 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_fill_rx_queue()
1441 WRITE_ONCE(desc->msg2, 0); in airoha_qdma_fill_rx_queue()
1442 WRITE_ONCE(desc->msg3, 0); in airoha_qdma_fill_rx_queue()
1446 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_fill_rx_queue()
1455 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); in airoha_qdma_get_gdm_port()
1463 port = sport - 1; in airoha_qdma_get_gdm_port()
1466 return -EINVAL; in airoha_qdma_get_gdm_port()
1469 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; in airoha_qdma_get_gdm_port()
1474 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_rx_process()
1475 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_rx_process()
1476 struct airoha_eth *eth = qdma->eth; in airoha_qdma_rx_process()
1477 int qid = q - &qdma->q_rx[0]; in airoha_qdma_rx_process()
1481 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_rx_process()
1482 struct airoha_qdma_desc *desc = &q->desc[q->tail]; in airoha_qdma_rx_process()
1483 dma_addr_t dma_addr = le32_to_cpu(desc->addr); in airoha_qdma_rx_process()
1484 u32 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_rx_process()
1498 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_rx_process()
1499 q->queued--; in airoha_qdma_rx_process()
1501 dma_sync_single_for_cpu(eth->dev, dma_addr, in airoha_qdma_rx_process()
1502 SKB_WITH_OVERHEAD(q->buf_size), dir); in airoha_qdma_rx_process()
1505 if (p < 0 || !eth->ports[p]) { in airoha_qdma_rx_process()
1506 page_pool_put_full_page(q->page_pool, in airoha_qdma_rx_process()
1507 virt_to_head_page(e->buf), in airoha_qdma_rx_process()
1512 skb = napi_build_skb(e->buf, q->buf_size); in airoha_qdma_rx_process()
1514 page_pool_put_full_page(q->page_pool, in airoha_qdma_rx_process()
1515 virt_to_head_page(e->buf), in airoha_qdma_rx_process()
1523 skb->dev = eth->ports[p]->dev; in airoha_qdma_rx_process()
1524 skb->protocol = eth_type_trans(skb, skb->dev); in airoha_qdma_rx_process()
1525 skb->ip_summed = CHECKSUM_UNNECESSARY; in airoha_qdma_rx_process()
1527 napi_gro_receive(&q->napi, skb); in airoha_qdma_rx_process()
1542 cur = airoha_qdma_rx_process(q, budget - done); in airoha_qdma_rx_napi_poll()
1547 airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, in airoha_qdma_rx_napi_poll()
1563 .dev = qdma->eth->dev, in airoha_qdma_init_rx_queue()
1564 .napi = &q->napi, in airoha_qdma_init_rx_queue()
1566 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_rx_queue()
1567 int qid = q - &qdma->q_rx[0], thr; in airoha_qdma_init_rx_queue()
1570 q->buf_size = PAGE_SIZE / 2; in airoha_qdma_init_rx_queue()
1571 q->ndesc = ndesc; in airoha_qdma_init_rx_queue()
1572 q->qdma = qdma; in airoha_qdma_init_rx_queue()
1574 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_rx_queue()
1576 if (!q->entry) in airoha_qdma_init_rx_queue()
1577 return -ENOMEM; in airoha_qdma_init_rx_queue()
1579 q->page_pool = page_pool_create(&pp_params); in airoha_qdma_init_rx_queue()
1580 if (IS_ERR(q->page_pool)) { in airoha_qdma_init_rx_queue()
1581 int err = PTR_ERR(q->page_pool); in airoha_qdma_init_rx_queue()
1583 q->page_pool = NULL; in airoha_qdma_init_rx_queue()
1587 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_rx_queue()
1589 if (!q->desc) in airoha_qdma_init_rx_queue()
1590 return -ENOMEM; in airoha_qdma_init_rx_queue()
1592 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); in airoha_qdma_init_rx_queue()
1603 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_rx_queue()
1612 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_rx_queue()
1614 while (q->queued) { in airoha_qdma_cleanup_rx_queue()
1615 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_rx_queue()
1616 struct page *page = virt_to_head_page(e->buf); in airoha_qdma_cleanup_rx_queue()
1618 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_rx_queue()
1619 page_pool_get_dma_dir(q->page_pool)); in airoha_qdma_cleanup_rx_queue()
1620 page_pool_put_full_page(q->page_pool, page, false); in airoha_qdma_cleanup_rx_queue()
1621 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_rx_queue()
1622 q->queued--; in airoha_qdma_cleanup_rx_queue()
1630 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_init_rx()
1634 /* rx-queue not binded to irq */ in airoha_qdma_init_rx()
1638 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, in airoha_qdma_init_rx()
1655 qdma = irq_q->qdma; in airoha_qdma_tx_napi_poll()
1656 id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_napi_poll()
1657 eth = qdma->eth; in airoha_qdma_tx_napi_poll()
1659 while (irq_q->queued > 0 && done < budget) { in airoha_qdma_tx_napi_poll()
1660 u32 qid, last, val = irq_q->q[irq_q->head]; in airoha_qdma_tx_napi_poll()
1666 irq_q->q[irq_q->head] = 0xff; /* mark as done */ in airoha_qdma_tx_napi_poll()
1667 irq_q->head = (irq_q->head + 1) % irq_q->size; in airoha_qdma_tx_napi_poll()
1668 irq_q->queued--; in airoha_qdma_tx_napi_poll()
1674 if (qid >= ARRAY_SIZE(qdma->q_tx)) in airoha_qdma_tx_napi_poll()
1677 q = &qdma->q_tx[qid]; in airoha_qdma_tx_napi_poll()
1678 if (!q->ndesc) in airoha_qdma_tx_napi_poll()
1681 spin_lock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
1683 while (q->queued > 0) { in airoha_qdma_tx_napi_poll()
1684 struct airoha_qdma_desc *desc = &q->desc[q->tail]; in airoha_qdma_tx_napi_poll()
1685 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_tx_napi_poll()
1686 u32 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_tx_napi_poll()
1687 struct sk_buff *skb = e->skb; in airoha_qdma_tx_napi_poll()
1688 u16 index = q->tail; in airoha_qdma_tx_napi_poll()
1694 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_tx_napi_poll()
1695 q->queued--; in airoha_qdma_tx_napi_poll()
1697 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_tx_napi_poll()
1700 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_tx_napi_poll()
1701 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_tx_napi_poll()
1706 txq = netdev_get_tx_queue(skb->dev, qid); in airoha_qdma_tx_napi_poll()
1708 q->ndesc - q->queued >= q->free_thr) in airoha_qdma_tx_napi_poll()
1712 e->skb = NULL; in airoha_qdma_tx_napi_poll()
1719 spin_unlock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
1742 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_tx_queue()
1743 int i, qid = q - &qdma->q_tx[0]; in airoha_qdma_init_tx_queue()
1746 spin_lock_init(&q->lock); in airoha_qdma_init_tx_queue()
1747 q->ndesc = size; in airoha_qdma_init_tx_queue()
1748 q->qdma = qdma; in airoha_qdma_init_tx_queue()
1749 q->free_thr = 1 + MAX_SKB_FRAGS; in airoha_qdma_init_tx_queue()
1751 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_tx_queue()
1753 if (!q->entry) in airoha_qdma_init_tx_queue()
1754 return -ENOMEM; in airoha_qdma_init_tx_queue()
1756 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_tx_queue()
1758 if (!q->desc) in airoha_qdma_init_tx_queue()
1759 return -ENOMEM; in airoha_qdma_init_tx_queue()
1761 for (i = 0; i < q->ndesc; i++) { in airoha_qdma_init_tx_queue()
1765 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); in airoha_qdma_init_tx_queue()
1770 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
1772 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
1780 int id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_irq_init()
1781 struct airoha_eth *eth = qdma->eth; in airoha_qdma_tx_irq_init()
1784 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, in airoha_qdma_tx_irq_init()
1786 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), in airoha_qdma_tx_irq_init()
1788 if (!irq_q->q) in airoha_qdma_tx_irq_init()
1789 return -ENOMEM; in airoha_qdma_tx_irq_init()
1791 memset(irq_q->q, 0xff, size * sizeof(u32)); in airoha_qdma_tx_irq_init()
1792 irq_q->size = size; in airoha_qdma_tx_irq_init()
1793 irq_q->qdma = qdma; in airoha_qdma_tx_irq_init()
1808 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_qdma_init_tx()
1809 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, in airoha_qdma_init_tx()
1815 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_init_tx()
1816 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, in airoha_qdma_init_tx()
1827 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_tx_queue()
1829 spin_lock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1830 while (q->queued) { in airoha_qdma_cleanup_tx_queue()
1831 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_tx_queue()
1833 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_tx_queue()
1835 dev_kfree_skb_any(e->skb); in airoha_qdma_cleanup_tx_queue()
1836 e->skb = NULL; in airoha_qdma_cleanup_tx_queue()
1838 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_tx_queue()
1839 q->queued--; in airoha_qdma_cleanup_tx_queue()
1841 spin_unlock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1846 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_hfwd_queues()
1852 qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1854 if (!qdma->hfwd.desc) in airoha_qdma_init_hfwd_queues()
1855 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1860 qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1862 if (!qdma->hfwd.q) in airoha_qdma_init_hfwd_queues()
1863 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1905 /* fast-tick 25us */ in airoha_qdma_init_qos()
1939 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) in airoha_qdma_hw_init()
1948 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_hw_init()
1949 if (!qdma->q_tx[i].ndesc) in airoha_qdma_hw_init()
1974 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_hw_init()
1975 if (!qdma->q_rx[i].ndesc) in airoha_qdma_hw_init()
1991 u32 intr[ARRAY_SIZE(qdma->irqmask)]; in airoha_irq_handler()
1994 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { in airoha_irq_handler()
1996 intr[i] &= qdma->irqmask[i]; in airoha_irq_handler()
2000 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) in airoha_irq_handler()
2007 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_irq_handler()
2008 if (!qdma->q_rx[i].ndesc) in airoha_irq_handler()
2012 napi_schedule(&qdma->q_rx[i].napi); in airoha_irq_handler()
2017 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_irq_handler()
2018 struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i]; in airoha_irq_handler()
2029 irq_q->head = head % irq_q->size; in airoha_irq_handler()
2030 irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); in airoha_irq_handler()
2032 napi_schedule(&qdma->q_tx_irq[i].napi); in airoha_irq_handler()
2043 int err, id = qdma - &eth->qdma[0]; in airoha_qdma_init()
2046 spin_lock_init(&qdma->irq_lock); in airoha_qdma_init()
2047 qdma->eth = eth; in airoha_qdma_init()
2049 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); in airoha_qdma_init()
2051 return -ENOMEM; in airoha_qdma_init()
2053 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); in airoha_qdma_init()
2054 if (IS_ERR(qdma->regs)) in airoha_qdma_init()
2055 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), in airoha_qdma_init()
2058 qdma->irq = platform_get_irq(pdev, 4 * id); in airoha_qdma_init()
2059 if (qdma->irq < 0) in airoha_qdma_init()
2060 return qdma->irq; in airoha_qdma_init()
2062 err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, in airoha_qdma_init()
2088 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), in airoha_hw_init()
2089 eth->xsi_rsts); in airoha_hw_init()
2093 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
2098 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
2107 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { in airoha_hw_init()
2108 err = airoha_qdma_init(pdev, eth, &eth->qdma[i]); in airoha_hw_init()
2113 set_bit(DEV_STATE_INITIALIZED, &eth->state); in airoha_hw_init()
2122 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_hw_cleanup()
2123 if (!qdma->q_rx[i].ndesc) in airoha_hw_cleanup()
2126 napi_disable(&qdma->q_rx[i].napi); in airoha_hw_cleanup()
2127 netif_napi_del(&qdma->q_rx[i].napi); in airoha_hw_cleanup()
2128 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); in airoha_hw_cleanup()
2129 if (qdma->q_rx[i].page_pool) in airoha_hw_cleanup()
2130 page_pool_destroy(qdma->q_rx[i].page_pool); in airoha_hw_cleanup()
2133 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_hw_cleanup()
2134 napi_disable(&qdma->q_tx_irq[i].napi); in airoha_hw_cleanup()
2135 netif_napi_del(&qdma->q_tx_irq[i].napi); in airoha_hw_cleanup()
2138 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_hw_cleanup()
2139 if (!qdma->q_tx[i].ndesc) in airoha_hw_cleanup()
2142 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_hw_cleanup()
2150 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_qdma_start_napi()
2151 napi_enable(&qdma->q_tx_irq[i].napi); in airoha_qdma_start_napi()
2153 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_start_napi()
2154 if (!qdma->q_rx[i].ndesc) in airoha_qdma_start_napi()
2157 napi_enable(&qdma->q_rx[i].napi); in airoha_qdma_start_napi()
2163 struct airoha_eth *eth = port->qdma->eth; in airoha_update_hw_stats()
2166 spin_lock(&port->stats.lock); in airoha_update_hw_stats()
2167 u64_stats_update_begin(&port->stats.syncp); in airoha_update_hw_stats()
2170 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
2171 port->stats.tx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
2172 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
2173 port->stats.tx_ok_pkts += val; in airoha_update_hw_stats()
2175 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
2176 port->stats.tx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
2177 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
2178 port->stats.tx_ok_bytes += val; in airoha_update_hw_stats()
2180 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
2181 port->stats.tx_drops += val; in airoha_update_hw_stats()
2183 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
2184 port->stats.tx_broadcast += val; in airoha_update_hw_stats()
2186 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
2187 port->stats.tx_multicast += val; in airoha_update_hw_stats()
2189 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
2190 port->stats.tx_len[i] += val; in airoha_update_hw_stats()
2192 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
2193 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2194 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
2195 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2197 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
2198 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2199 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
2200 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2202 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
2203 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2204 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
2205 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2207 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
2208 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2209 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
2210 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2212 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
2213 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2214 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
2215 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2217 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
2218 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2219 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
2220 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2222 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
2223 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2226 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
2227 port->stats.rx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
2228 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
2229 port->stats.rx_ok_pkts += val; in airoha_update_hw_stats()
2231 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
2232 port->stats.rx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
2233 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
2234 port->stats.rx_ok_bytes += val; in airoha_update_hw_stats()
2236 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
2237 port->stats.rx_drops += val; in airoha_update_hw_stats()
2239 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
2240 port->stats.rx_broadcast += val; in airoha_update_hw_stats()
2242 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
2243 port->stats.rx_multicast += val; in airoha_update_hw_stats()
2245 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); in airoha_update_hw_stats()
2246 port->stats.rx_errors += val; in airoha_update_hw_stats()
2248 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); in airoha_update_hw_stats()
2249 port->stats.rx_crc_error += val; in airoha_update_hw_stats()
2251 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); in airoha_update_hw_stats()
2252 port->stats.rx_over_errors += val; in airoha_update_hw_stats()
2254 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); in airoha_update_hw_stats()
2255 port->stats.rx_fragment += val; in airoha_update_hw_stats()
2257 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); in airoha_update_hw_stats()
2258 port->stats.rx_jabber += val; in airoha_update_hw_stats()
2261 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
2262 port->stats.rx_len[i] += val; in airoha_update_hw_stats()
2264 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
2265 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2266 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
2267 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2269 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
2270 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2271 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
2272 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2274 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
2275 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2276 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
2277 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2279 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
2280 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2281 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
2282 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2284 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
2285 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2286 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
2287 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2289 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
2290 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2291 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
2292 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2294 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
2295 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2298 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), in airoha_update_hw_stats()
2301 u64_stats_update_end(&port->stats.syncp); in airoha_update_hw_stats()
2302 spin_unlock(&port->stats.lock); in airoha_update_hw_stats()
2308 struct airoha_qdma *qdma = port->qdma; in airoha_dev_open()
2312 err = airoha_set_gdm_ports(qdma->eth, true); in airoha_dev_open()
2317 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
2320 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
2333 struct airoha_qdma *qdma = port->qdma; in airoha_dev_stop()
2337 err = airoha_set_gdm_ports(qdma->eth, false); in airoha_dev_stop()
2357 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_set_macaddr()
2366 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_init()
2379 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_dev_get_stats64()
2380 storage->rx_packets = port->stats.rx_ok_pkts; in airoha_dev_get_stats64()
2381 storage->tx_packets = port->stats.tx_ok_pkts; in airoha_dev_get_stats64()
2382 storage->rx_bytes = port->stats.rx_ok_bytes; in airoha_dev_get_stats64()
2383 storage->tx_bytes = port->stats.tx_ok_bytes; in airoha_dev_get_stats64()
2384 storage->multicast = port->stats.rx_multicast; in airoha_dev_get_stats64()
2385 storage->rx_errors = port->stats.rx_errors; in airoha_dev_get_stats64()
2386 storage->rx_dropped = port->stats.rx_drops; in airoha_dev_get_stats64()
2387 storage->tx_dropped = port->stats.tx_drops; in airoha_dev_get_stats64()
2388 storage->rx_crc_errors = port->stats.rx_crc_error; in airoha_dev_get_stats64()
2389 storage->rx_over_errors = port->stats.rx_over_errors; in airoha_dev_get_stats64()
2390 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_dev_get_stats64()
2400 struct airoha_qdma *qdma = port->qdma; in airoha_dev_xmit()
2401 u32 nr_frags = 1 + sinfo->nr_frags; in airoha_dev_xmit()
2404 void *data = skb->data; in airoha_dev_xmit()
2408 if (skb->ip_summed == CHECKSUM_PARTIAL) in airoha_dev_xmit()
2418 if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { in airoha_dev_xmit()
2419 __be16 csum = cpu_to_be16(sinfo->gso_size); in airoha_dev_xmit()
2421 tcp_hdr(skb)->check = (__force __sum16)csum; in airoha_dev_xmit()
2426 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; in airoha_dev_xmit()
2430 q = &qdma->q_tx[qid]; in airoha_dev_xmit()
2431 if (WARN_ON_ONCE(!q->ndesc)) in airoha_dev_xmit()
2434 spin_lock_bh(&q->lock); in airoha_dev_xmit()
2437 if (q->queued + nr_frags > q->ndesc) { in airoha_dev_xmit()
2440 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2444 index = q->head; in airoha_dev_xmit()
2446 struct airoha_qdma_desc *desc = &q->desc[index]; in airoha_dev_xmit()
2447 struct airoha_queue_entry *e = &q->entry[index]; in airoha_dev_xmit()
2448 skb_frag_t *frag = &sinfo->frags[i]; in airoha_dev_xmit()
2452 addr = dma_map_single(dev->dev.parent, data, len, in airoha_dev_xmit()
2454 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) in airoha_dev_xmit()
2457 index = (index + 1) % q->ndesc; in airoha_dev_xmit()
2460 if (i < nr_frags - 1) in airoha_dev_xmit()
2462 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_dev_xmit()
2463 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); in airoha_dev_xmit()
2465 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_dev_xmit()
2466 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); in airoha_dev_xmit()
2467 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); in airoha_dev_xmit()
2468 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); in airoha_dev_xmit()
2470 e->skb = i ? NULL : skb; in airoha_dev_xmit()
2471 e->dma_addr = addr; in airoha_dev_xmit()
2472 e->dma_len = len; in airoha_dev_xmit()
2478 q->head = index; in airoha_dev_xmit()
2479 q->queued += i; in airoha_dev_xmit()
2485 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_dev_xmit()
2487 if (q->ndesc - q->queued < q->free_thr) in airoha_dev_xmit()
2490 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2495 for (i--; i >= 0; i--) { in airoha_dev_xmit()
2496 index = (q->head + i) % q->ndesc; in airoha_dev_xmit()
2497 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, in airoha_dev_xmit()
2498 q->entry[index].dma_len, DMA_TO_DEVICE); in airoha_dev_xmit()
2501 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2504 dev->stats.tx_dropped++; in airoha_dev_xmit()
2513 struct airoha_eth *eth = port->qdma->eth; in airoha_ethtool_get_drvinfo()
2515 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); in airoha_ethtool_get_drvinfo()
2516 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); in airoha_ethtool_get_drvinfo()
2527 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_mac_stats()
2528 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; in airoha_ethtool_get_mac_stats()
2529 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; in airoha_ethtool_get_mac_stats()
2530 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; in airoha_ethtool_get_mac_stats()
2531 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_mac_stats()
2548 const struct ethtool_rmon_hist_range **ranges) in airoha_ethtool_get_rmon_stats() argument
2551 struct airoha_hw_stats *hw_stats = &port->stats; in airoha_ethtool_get_rmon_stats()
2555 ARRAY_SIZE(hw_stats->tx_len) + 1); in airoha_ethtool_get_rmon_stats()
2557 ARRAY_SIZE(hw_stats->rx_len) + 1); in airoha_ethtool_get_rmon_stats()
2559 *ranges = airoha_ethtool_rmon_ranges; in airoha_ethtool_get_rmon_stats()
2564 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_rmon_stats()
2565 stats->fragments = hw_stats->rx_fragment; in airoha_ethtool_get_rmon_stats()
2566 stats->jabbers = hw_stats->rx_jabber; in airoha_ethtool_get_rmon_stats()
2567 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; in airoha_ethtool_get_rmon_stats()
2569 stats->hist[i] = hw_stats->rx_len[i]; in airoha_ethtool_get_rmon_stats()
2570 stats->hist_tx[i] = hw_stats->tx_len[i]; in airoha_ethtool_get_rmon_stats()
2572 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_rmon_stats()
2600 dev_err(eth->dev, "missing gdm port id\n"); in airoha_alloc_gdm_port()
2601 return -EINVAL; in airoha_alloc_gdm_port()
2605 index = id - 1; in airoha_alloc_gdm_port()
2607 if (!id || id > ARRAY_SIZE(eth->ports)) { in airoha_alloc_gdm_port()
2608 dev_err(eth->dev, "invalid gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2609 return -EINVAL; in airoha_alloc_gdm_port()
2612 if (eth->ports[index]) { in airoha_alloc_gdm_port()
2613 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2614 return -EINVAL; in airoha_alloc_gdm_port()
2617 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), in airoha_alloc_gdm_port()
2620 dev_err(eth->dev, "alloc_etherdev failed\n"); in airoha_alloc_gdm_port()
2621 return -ENOMEM; in airoha_alloc_gdm_port()
2624 qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA]; in airoha_alloc_gdm_port()
2625 dev->netdev_ops = &airoha_netdev_ops; in airoha_alloc_gdm_port()
2626 dev->ethtool_ops = &airoha_ethtool_ops; in airoha_alloc_gdm_port()
2627 dev->max_mtu = AIROHA_MAX_MTU; in airoha_alloc_gdm_port()
2628 dev->watchdog_timeo = 5 * HZ; in airoha_alloc_gdm_port()
2629 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | in airoha_alloc_gdm_port()
2632 dev->features |= dev->hw_features; in airoha_alloc_gdm_port()
2633 dev->dev.of_node = np; in airoha_alloc_gdm_port()
2634 dev->irq = qdma->irq; in airoha_alloc_gdm_port()
2635 SET_NETDEV_DEV(dev, eth->dev); in airoha_alloc_gdm_port()
2639 if (err == -EPROBE_DEFER) in airoha_alloc_gdm_port()
2643 dev_info(eth->dev, "generated random MAC address %pM\n", in airoha_alloc_gdm_port()
2644 dev->dev_addr); in airoha_alloc_gdm_port()
2648 u64_stats_init(&port->stats.syncp); in airoha_alloc_gdm_port()
2649 spin_lock_init(&port->stats.lock); in airoha_alloc_gdm_port()
2650 port->qdma = qdma; in airoha_alloc_gdm_port()
2651 port->dev = dev; in airoha_alloc_gdm_port()
2652 port->id = id; in airoha_alloc_gdm_port()
2653 eth->ports[index] = port; in airoha_alloc_gdm_port()
2664 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in airoha_probe()
2666 return -ENOMEM; in airoha_probe()
2668 eth->dev = &pdev->dev; in airoha_probe()
2670 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); in airoha_probe()
2672 dev_err(eth->dev, "failed configuring DMA mask\n"); in airoha_probe()
2676 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); in airoha_probe()
2677 if (IS_ERR(eth->fe_regs)) in airoha_probe()
2678 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), in airoha_probe()
2681 eth->rsts[0].id = "fe"; in airoha_probe()
2682 eth->rsts[1].id = "pdma"; in airoha_probe()
2683 eth->rsts[2].id = "qdma"; in airoha_probe()
2684 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2685 ARRAY_SIZE(eth->rsts), in airoha_probe()
2686 eth->rsts); in airoha_probe()
2688 dev_err(eth->dev, "failed to get bulk reset lines\n"); in airoha_probe()
2692 eth->xsi_rsts[0].id = "xsi-mac"; in airoha_probe()
2693 eth->xsi_rsts[1].id = "hsi0-mac"; in airoha_probe()
2694 eth->xsi_rsts[2].id = "hsi1-mac"; in airoha_probe()
2695 eth->xsi_rsts[3].id = "hsi-mac"; in airoha_probe()
2696 eth->xsi_rsts[4].id = "xfp-mac"; in airoha_probe()
2697 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2698 ARRAY_SIZE(eth->xsi_rsts), in airoha_probe()
2699 eth->xsi_rsts); in airoha_probe()
2701 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); in airoha_probe()
2705 eth->napi_dev = alloc_netdev_dummy(0); in airoha_probe()
2706 if (!eth->napi_dev) in airoha_probe()
2707 return -ENOMEM; in airoha_probe()
2710 eth->napi_dev->threaded = true; in airoha_probe()
2711 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); in airoha_probe()
2718 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2719 airoha_qdma_start_napi(&eth->qdma[i]); in airoha_probe()
2721 for_each_child_of_node(pdev->dev.of_node, np) { in airoha_probe()
2722 if (!of_device_is_compatible(np, "airoha,eth-mac")) in airoha_probe()
2738 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2739 airoha_hw_cleanup(&eth->qdma[i]); in airoha_probe()
2741 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_probe()
2742 struct airoha_gdm_port *port = eth->ports[i]; in airoha_probe()
2744 if (port && port->dev->reg_state == NETREG_REGISTERED) in airoha_probe()
2745 unregister_netdev(port->dev); in airoha_probe()
2747 free_netdev(eth->napi_dev); in airoha_probe()
2758 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_remove()
2759 airoha_hw_cleanup(&eth->qdma[i]); in airoha_remove()
2761 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_remove()
2762 struct airoha_gdm_port *port = eth->ports[i]; in airoha_remove()
2767 airoha_dev_stop(port->dev); in airoha_remove()
2768 unregister_netdev(port->dev); in airoha_remove()
2770 free_netdev(eth->napi_dev); in airoha_remove()
2776 { .compatible = "airoha,en7581-eth" },