Lines Matching +full:ethernet +full:- +full:pse
1 // SPDX-License-Identifier: GPL-2.0-only
490 (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
493 (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
502 (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
507 (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
515 (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
518 (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
524 (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
529 (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
532 (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
593 #define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
849 airoha_rr((eth)->fe_regs, (offset))
851 airoha_wr((eth)->fe_regs, (offset), (val))
853 airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
855 airoha_rmw((eth)->fe_regs, (offset), 0, (val))
857 airoha_rmw((eth)->fe_regs, (offset), (val), 0)
860 airoha_rr((qdma)->regs, (offset))
862 airoha_wr((qdma)->regs, (offset), (val))
864 airoha_rmw((qdma)->regs, (offset), (mask), (val))
866 airoha_rmw((qdma)->regs, (offset), 0, (val))
868 airoha_rmw((qdma)->regs, (offset), (val), 0)
875 if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) in airoha_qdma_set_irqmask()
878 spin_lock_irqsave(&qdma->irq_lock, flags); in airoha_qdma_set_irqmask()
880 qdma->irqmask[index] &= ~clear; in airoha_qdma_set_irqmask()
881 qdma->irqmask[index] |= set; in airoha_qdma_set_irqmask()
882 airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); in airoha_qdma_set_irqmask()
888 spin_unlock_irqrestore(&qdma->irq_lock, flags); in airoha_qdma_set_irqmask()
909 return port->id == 1; in airhoa_is_lan_gdm_port()
914 struct airoha_eth *eth = port->qdma->eth; in airoha_set_macaddr()
963 return -EINVAL; in airoha_set_gdm_port()
998 for (i--; i >= 0; i--) in airoha_set_gdm_ports()
1008 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { in airoha_fe_maccr_init()
1080 /* ETH->ETH_P_1905 (0x893a) */ in airoha_fe_vip_setup()
1134 all_rsv += (val - orig_val); in airoha_fe_set_pse_oq_rsv()
1141 tmp = fq_limit - all_rsv - 0x20; in airoha_fe_set_pse_oq_rsv()
1146 tmp = fq_limit - all_rsv - 0x100; in airoha_fe_set_pse_oq_rsv()
1209 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) in airoha_fe_pse_ports_init()
1323 /* PSE IQ reserve */ in airoha_fe_init()
1348 /* set PSE Page as 128B */ in airoha_fe_init()
1358 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 in airoha_fe_init()
1359 * connect other rings to PSE Port0 OQ-0 in airoha_fe_init()
1376 /* NPU Core-3, NPU Bridge Channel-3 */ in airoha_fe_init()
1381 /* QDMA LAN, RX Ring-22 */ in airoha_fe_init()
1412 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_fill_rx_queue()
1413 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_fill_rx_queue()
1414 struct airoha_eth *eth = qdma->eth; in airoha_qdma_fill_rx_queue()
1415 int qid = q - &qdma->q_rx[0]; in airoha_qdma_fill_rx_queue()
1418 while (q->queued < q->ndesc - 1) { in airoha_qdma_fill_rx_queue()
1419 struct airoha_queue_entry *e = &q->entry[q->head]; in airoha_qdma_fill_rx_queue()
1420 struct airoha_qdma_desc *desc = &q->desc[q->head]; in airoha_qdma_fill_rx_queue()
1425 page = page_pool_dev_alloc_frag(q->page_pool, &offset, in airoha_qdma_fill_rx_queue()
1426 q->buf_size); in airoha_qdma_fill_rx_queue()
1430 q->head = (q->head + 1) % q->ndesc; in airoha_qdma_fill_rx_queue()
1431 q->queued++; in airoha_qdma_fill_rx_queue()
1434 e->buf = page_address(page) + offset; in airoha_qdma_fill_rx_queue()
1435 e->dma_addr = page_pool_get_dma_addr(page) + offset; in airoha_qdma_fill_rx_queue()
1436 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); in airoha_qdma_fill_rx_queue()
1438 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_fill_rx_queue()
1441 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); in airoha_qdma_fill_rx_queue()
1442 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
1443 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); in airoha_qdma_fill_rx_queue()
1444 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); in airoha_qdma_fill_rx_queue()
1445 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
1446 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_fill_rx_queue()
1447 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_fill_rx_queue()
1448 WRITE_ONCE(desc->msg2, 0); in airoha_qdma_fill_rx_queue()
1449 WRITE_ONCE(desc->msg3, 0); in airoha_qdma_fill_rx_queue()
1453 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_fill_rx_queue()
1462 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); in airoha_qdma_get_gdm_port()
1470 port = sport - 1; in airoha_qdma_get_gdm_port()
1473 return -EINVAL; in airoha_qdma_get_gdm_port()
1476 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; in airoha_qdma_get_gdm_port()
1481 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_rx_process()
1482 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_rx_process()
1483 struct airoha_eth *eth = qdma->eth; in airoha_qdma_rx_process()
1484 int qid = q - &qdma->q_rx[0]; in airoha_qdma_rx_process()
1488 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_rx_process()
1489 struct airoha_qdma_desc *desc = &q->desc[q->tail]; in airoha_qdma_rx_process()
1490 dma_addr_t dma_addr = le32_to_cpu(desc->addr); in airoha_qdma_rx_process()
1491 u32 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_rx_process()
1505 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_rx_process()
1506 q->queued--; in airoha_qdma_rx_process()
1508 dma_sync_single_for_cpu(eth->dev, dma_addr, in airoha_qdma_rx_process()
1509 SKB_WITH_OVERHEAD(q->buf_size), dir); in airoha_qdma_rx_process()
1512 if (p < 0 || !eth->ports[p]) { in airoha_qdma_rx_process()
1513 page_pool_put_full_page(q->page_pool, in airoha_qdma_rx_process()
1514 virt_to_head_page(e->buf), in airoha_qdma_rx_process()
1519 skb = napi_build_skb(e->buf, q->buf_size); in airoha_qdma_rx_process()
1521 page_pool_put_full_page(q->page_pool, in airoha_qdma_rx_process()
1522 virt_to_head_page(e->buf), in airoha_qdma_rx_process()
1530 skb->dev = eth->ports[p]->dev; in airoha_qdma_rx_process()
1531 skb->protocol = eth_type_trans(skb, skb->dev); in airoha_qdma_rx_process()
1532 skb->ip_summed = CHECKSUM_UNNECESSARY; in airoha_qdma_rx_process()
1534 napi_gro_receive(&q->napi, skb); in airoha_qdma_rx_process()
1549 cur = airoha_qdma_rx_process(q, budget - done); in airoha_qdma_rx_napi_poll()
1554 airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, in airoha_qdma_rx_napi_poll()
1570 .dev = qdma->eth->dev, in airoha_qdma_init_rx_queue()
1571 .napi = &q->napi, in airoha_qdma_init_rx_queue()
1573 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_rx_queue()
1574 int qid = q - &qdma->q_rx[0], thr; in airoha_qdma_init_rx_queue()
1577 q->buf_size = PAGE_SIZE / 2; in airoha_qdma_init_rx_queue()
1578 q->ndesc = ndesc; in airoha_qdma_init_rx_queue()
1579 q->qdma = qdma; in airoha_qdma_init_rx_queue()
1581 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_rx_queue()
1583 if (!q->entry) in airoha_qdma_init_rx_queue()
1584 return -ENOMEM; in airoha_qdma_init_rx_queue()
1586 q->page_pool = page_pool_create(&pp_params); in airoha_qdma_init_rx_queue()
1587 if (IS_ERR(q->page_pool)) { in airoha_qdma_init_rx_queue()
1588 int err = PTR_ERR(q->page_pool); in airoha_qdma_init_rx_queue()
1590 q->page_pool = NULL; in airoha_qdma_init_rx_queue()
1594 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_rx_queue()
1596 if (!q->desc) in airoha_qdma_init_rx_queue()
1597 return -ENOMEM; in airoha_qdma_init_rx_queue()
1599 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); in airoha_qdma_init_rx_queue()
1610 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_rx_queue()
1619 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_rx_queue()
1621 while (q->queued) { in airoha_qdma_cleanup_rx_queue()
1622 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_rx_queue()
1623 struct page *page = virt_to_head_page(e->buf); in airoha_qdma_cleanup_rx_queue()
1625 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_rx_queue()
1626 page_pool_get_dma_dir(q->page_pool)); in airoha_qdma_cleanup_rx_queue()
1627 page_pool_put_full_page(q->page_pool, page, false); in airoha_qdma_cleanup_rx_queue()
1628 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_rx_queue()
1629 q->queued--; in airoha_qdma_cleanup_rx_queue()
1637 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_init_rx()
1641 /* rx-queue not binded to irq */ in airoha_qdma_init_rx()
1645 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, in airoha_qdma_init_rx()
1663 qdma = irq_q->qdma; in airoha_qdma_tx_napi_poll()
1664 id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_napi_poll()
1665 eth = qdma->eth; in airoha_qdma_tx_napi_poll()
1669 head = head % irq_q->size; in airoha_qdma_tx_napi_poll()
1673 u32 qid, val = irq_q->q[head]; in airoha_qdma_tx_napi_poll()
1683 irq_q->q[head] = 0xff; /* mark as done */ in airoha_qdma_tx_napi_poll()
1684 head = (head + 1) % irq_q->size; in airoha_qdma_tx_napi_poll()
1685 irq_queued--; in airoha_qdma_tx_napi_poll()
1689 if (qid >= ARRAY_SIZE(qdma->q_tx)) in airoha_qdma_tx_napi_poll()
1692 q = &qdma->q_tx[qid]; in airoha_qdma_tx_napi_poll()
1693 if (!q->ndesc) in airoha_qdma_tx_napi_poll()
1697 if (index >= q->ndesc) in airoha_qdma_tx_napi_poll()
1700 spin_lock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
1702 if (!q->queued) in airoha_qdma_tx_napi_poll()
1705 desc = &q->desc[index]; in airoha_qdma_tx_napi_poll()
1706 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_tx_napi_poll()
1712 e = &q->entry[index]; in airoha_qdma_tx_napi_poll()
1713 skb = e->skb; in airoha_qdma_tx_napi_poll()
1715 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_tx_napi_poll()
1718 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_tx_napi_poll()
1719 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_tx_napi_poll()
1720 q->queued--; in airoha_qdma_tx_napi_poll()
1722 /* completion ring can report out-of-order indexes if hw QoS in airoha_qdma_tx_napi_poll()
1724 * to same DMA ring. Take into account possible out-of-order in airoha_qdma_tx_napi_poll()
1727 while (q->tail != q->head && !q->entry[q->tail].dma_addr) in airoha_qdma_tx_napi_poll()
1728 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_tx_napi_poll()
1734 txq = netdev_get_tx_queue(skb->dev, queue); in airoha_qdma_tx_napi_poll()
1735 netdev_tx_completed_queue(txq, 1, skb->len); in airoha_qdma_tx_napi_poll()
1737 q->ndesc - q->queued >= q->free_thr) in airoha_qdma_tx_napi_poll()
1743 spin_unlock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
1766 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_tx_queue()
1767 int i, qid = q - &qdma->q_tx[0]; in airoha_qdma_init_tx_queue()
1770 spin_lock_init(&q->lock); in airoha_qdma_init_tx_queue()
1771 q->ndesc = size; in airoha_qdma_init_tx_queue()
1772 q->qdma = qdma; in airoha_qdma_init_tx_queue()
1773 q->free_thr = 1 + MAX_SKB_FRAGS; in airoha_qdma_init_tx_queue()
1775 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_tx_queue()
1777 if (!q->entry) in airoha_qdma_init_tx_queue()
1778 return -ENOMEM; in airoha_qdma_init_tx_queue()
1780 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_tx_queue()
1782 if (!q->desc) in airoha_qdma_init_tx_queue()
1783 return -ENOMEM; in airoha_qdma_init_tx_queue()
1785 for (i = 0; i < q->ndesc; i++) { in airoha_qdma_init_tx_queue()
1789 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); in airoha_qdma_init_tx_queue()
1794 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
1796 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
1804 int id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_irq_init()
1805 struct airoha_eth *eth = qdma->eth; in airoha_qdma_tx_irq_init()
1808 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, in airoha_qdma_tx_irq_init()
1810 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), in airoha_qdma_tx_irq_init()
1812 if (!irq_q->q) in airoha_qdma_tx_irq_init()
1813 return -ENOMEM; in airoha_qdma_tx_irq_init()
1815 memset(irq_q->q, 0xff, size * sizeof(u32)); in airoha_qdma_tx_irq_init()
1816 irq_q->size = size; in airoha_qdma_tx_irq_init()
1817 irq_q->qdma = qdma; in airoha_qdma_tx_irq_init()
1832 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_qdma_init_tx()
1833 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, in airoha_qdma_init_tx()
1839 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_init_tx()
1840 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, in airoha_qdma_init_tx()
1851 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_tx_queue()
1853 spin_lock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1854 while (q->queued) { in airoha_qdma_cleanup_tx_queue()
1855 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_tx_queue()
1857 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_tx_queue()
1859 dev_kfree_skb_any(e->skb); in airoha_qdma_cleanup_tx_queue()
1860 e->skb = NULL; in airoha_qdma_cleanup_tx_queue()
1862 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_tx_queue()
1863 q->queued--; in airoha_qdma_cleanup_tx_queue()
1865 spin_unlock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1870 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_hfwd_queues()
1876 qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1878 if (!qdma->hfwd.desc) in airoha_qdma_init_hfwd_queues()
1879 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1884 qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1886 if (!qdma->hfwd.q) in airoha_qdma_init_hfwd_queues()
1887 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1929 /* fast-tick 25us */ in airoha_qdma_init_qos()
1963 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) in airoha_qdma_hw_init()
1972 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_hw_init()
1973 if (!qdma->q_tx[i].ndesc) in airoha_qdma_hw_init()
1998 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_hw_init()
1999 if (!qdma->q_rx[i].ndesc) in airoha_qdma_hw_init()
2015 u32 intr[ARRAY_SIZE(qdma->irqmask)]; in airoha_irq_handler()
2018 for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { in airoha_irq_handler()
2020 intr[i] &= qdma->irqmask[i]; in airoha_irq_handler()
2024 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) in airoha_irq_handler()
2031 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_irq_handler()
2032 if (!qdma->q_rx[i].ndesc) in airoha_irq_handler()
2036 napi_schedule(&qdma->q_rx[i].napi); in airoha_irq_handler()
2041 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_irq_handler()
2047 napi_schedule(&qdma->q_tx_irq[i].napi); in airoha_irq_handler()
2058 int err, id = qdma - ð->qdma[0]; in airoha_qdma_init()
2061 spin_lock_init(&qdma->irq_lock); in airoha_qdma_init()
2062 qdma->eth = eth; in airoha_qdma_init()
2064 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); in airoha_qdma_init()
2066 return -ENOMEM; in airoha_qdma_init()
2068 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); in airoha_qdma_init()
2069 if (IS_ERR(qdma->regs)) in airoha_qdma_init()
2070 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), in airoha_qdma_init()
2073 qdma->irq = platform_get_irq(pdev, 4 * id); in airoha_qdma_init()
2074 if (qdma->irq < 0) in airoha_qdma_init()
2075 return qdma->irq; in airoha_qdma_init()
2077 err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, in airoha_qdma_init()
2103 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), in airoha_hw_init()
2104 eth->xsi_rsts); in airoha_hw_init()
2108 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
2113 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
2122 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { in airoha_hw_init()
2123 err = airoha_qdma_init(pdev, eth, ð->qdma[i]); in airoha_hw_init()
2128 set_bit(DEV_STATE_INITIALIZED, ð->state); in airoha_hw_init()
2137 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_hw_cleanup()
2138 if (!qdma->q_rx[i].ndesc) in airoha_hw_cleanup()
2141 napi_disable(&qdma->q_rx[i].napi); in airoha_hw_cleanup()
2142 netif_napi_del(&qdma->q_rx[i].napi); in airoha_hw_cleanup()
2143 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); in airoha_hw_cleanup()
2144 if (qdma->q_rx[i].page_pool) in airoha_hw_cleanup()
2145 page_pool_destroy(qdma->q_rx[i].page_pool); in airoha_hw_cleanup()
2148 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_hw_cleanup()
2149 napi_disable(&qdma->q_tx_irq[i].napi); in airoha_hw_cleanup()
2150 netif_napi_del(&qdma->q_tx_irq[i].napi); in airoha_hw_cleanup()
2153 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_hw_cleanup()
2154 if (!qdma->q_tx[i].ndesc) in airoha_hw_cleanup()
2157 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_hw_cleanup()
2165 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_qdma_start_napi()
2166 napi_enable(&qdma->q_tx_irq[i].napi); in airoha_qdma_start_napi()
2168 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_start_napi()
2169 if (!qdma->q_rx[i].ndesc) in airoha_qdma_start_napi()
2172 napi_enable(&qdma->q_rx[i].napi); in airoha_qdma_start_napi()
2178 struct airoha_eth *eth = port->qdma->eth; in airoha_update_hw_stats()
2181 spin_lock(&port->stats.lock); in airoha_update_hw_stats()
2182 u64_stats_update_begin(&port->stats.syncp); in airoha_update_hw_stats()
2185 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
2186 port->stats.tx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
2187 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
2188 port->stats.tx_ok_pkts += val; in airoha_update_hw_stats()
2190 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
2191 port->stats.tx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
2192 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
2193 port->stats.tx_ok_bytes += val; in airoha_update_hw_stats()
2195 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
2196 port->stats.tx_drops += val; in airoha_update_hw_stats()
2198 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
2199 port->stats.tx_broadcast += val; in airoha_update_hw_stats()
2201 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
2202 port->stats.tx_multicast += val; in airoha_update_hw_stats()
2204 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
2205 port->stats.tx_len[i] += val; in airoha_update_hw_stats()
2207 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
2208 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2209 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
2210 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2212 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
2213 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2214 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
2215 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2217 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
2218 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2219 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
2220 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2222 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
2223 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2224 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
2225 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2227 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
2228 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2229 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
2230 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2232 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
2233 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2234 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
2235 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2237 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
2238 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
2241 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
2242 port->stats.rx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
2243 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
2244 port->stats.rx_ok_pkts += val; in airoha_update_hw_stats()
2246 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
2247 port->stats.rx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
2248 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
2249 port->stats.rx_ok_bytes += val; in airoha_update_hw_stats()
2251 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
2252 port->stats.rx_drops += val; in airoha_update_hw_stats()
2254 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
2255 port->stats.rx_broadcast += val; in airoha_update_hw_stats()
2257 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
2258 port->stats.rx_multicast += val; in airoha_update_hw_stats()
2260 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); in airoha_update_hw_stats()
2261 port->stats.rx_errors += val; in airoha_update_hw_stats()
2263 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); in airoha_update_hw_stats()
2264 port->stats.rx_crc_error += val; in airoha_update_hw_stats()
2266 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); in airoha_update_hw_stats()
2267 port->stats.rx_over_errors += val; in airoha_update_hw_stats()
2269 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); in airoha_update_hw_stats()
2270 port->stats.rx_fragment += val; in airoha_update_hw_stats()
2272 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); in airoha_update_hw_stats()
2273 port->stats.rx_jabber += val; in airoha_update_hw_stats()
2276 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
2277 port->stats.rx_len[i] += val; in airoha_update_hw_stats()
2279 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
2280 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2281 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
2282 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2284 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
2285 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2286 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
2287 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2289 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
2290 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2291 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
2292 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2294 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
2295 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2296 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
2297 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2299 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
2300 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2301 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
2302 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2304 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
2305 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
2306 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
2307 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2309 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
2310 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
2313 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), in airoha_update_hw_stats()
2316 u64_stats_update_end(&port->stats.syncp); in airoha_update_hw_stats()
2317 spin_unlock(&port->stats.lock); in airoha_update_hw_stats()
2323 struct airoha_qdma *qdma = port->qdma; in airoha_dev_open()
2327 err = airoha_set_gdm_ports(qdma->eth, true); in airoha_dev_open()
2332 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
2335 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
2348 struct airoha_qdma *qdma = port->qdma; in airoha_dev_stop()
2352 err = airoha_set_gdm_ports(qdma->eth, false); in airoha_dev_stop()
2360 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_dev_stop()
2361 if (!qdma->q_tx[i].ndesc) in airoha_dev_stop()
2364 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_dev_stop()
2380 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_set_macaddr()
2389 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_init()
2402 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_dev_get_stats64()
2403 storage->rx_packets = port->stats.rx_ok_pkts; in airoha_dev_get_stats64()
2404 storage->tx_packets = port->stats.tx_ok_pkts; in airoha_dev_get_stats64()
2405 storage->rx_bytes = port->stats.rx_ok_bytes; in airoha_dev_get_stats64()
2406 storage->tx_bytes = port->stats.tx_ok_bytes; in airoha_dev_get_stats64()
2407 storage->multicast = port->stats.rx_multicast; in airoha_dev_get_stats64()
2408 storage->rx_errors = port->stats.rx_errors; in airoha_dev_get_stats64()
2409 storage->rx_dropped = port->stats.rx_drops; in airoha_dev_get_stats64()
2410 storage->tx_dropped = port->stats.tx_drops; in airoha_dev_get_stats64()
2411 storage->rx_crc_errors = port->stats.rx_crc_error; in airoha_dev_get_stats64()
2412 storage->rx_over_errors = port->stats.rx_over_errors; in airoha_dev_get_stats64()
2413 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_dev_get_stats64()
2423 struct airoha_qdma *qdma = port->qdma; in airoha_dev_xmit()
2424 u32 nr_frags = 1 + sinfo->nr_frags; in airoha_dev_xmit()
2427 void *data = skb->data; in airoha_dev_xmit()
2431 if (skb->ip_summed == CHECKSUM_PARTIAL) in airoha_dev_xmit()
2441 if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { in airoha_dev_xmit()
2442 __be16 csum = cpu_to_be16(sinfo->gso_size); in airoha_dev_xmit()
2444 tcp_hdr(skb)->check = (__force __sum16)csum; in airoha_dev_xmit()
2449 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; in airoha_dev_xmit()
2453 q = &qdma->q_tx[qid]; in airoha_dev_xmit()
2454 if (WARN_ON_ONCE(!q->ndesc)) in airoha_dev_xmit()
2457 spin_lock_bh(&q->lock); in airoha_dev_xmit()
2460 if (q->queued + nr_frags > q->ndesc) { in airoha_dev_xmit()
2463 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2467 index = q->head; in airoha_dev_xmit()
2469 struct airoha_qdma_desc *desc = &q->desc[index]; in airoha_dev_xmit()
2470 struct airoha_queue_entry *e = &q->entry[index]; in airoha_dev_xmit()
2471 skb_frag_t *frag = &sinfo->frags[i]; in airoha_dev_xmit()
2475 addr = dma_map_single(dev->dev.parent, data, len, in airoha_dev_xmit()
2477 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) in airoha_dev_xmit()
2480 index = (index + 1) % q->ndesc; in airoha_dev_xmit()
2483 if (i < nr_frags - 1) in airoha_dev_xmit()
2485 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_dev_xmit()
2486 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); in airoha_dev_xmit()
2488 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_dev_xmit()
2489 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); in airoha_dev_xmit()
2490 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); in airoha_dev_xmit()
2491 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); in airoha_dev_xmit()
2493 e->skb = i ? NULL : skb; in airoha_dev_xmit()
2494 e->dma_addr = addr; in airoha_dev_xmit()
2495 e->dma_len = len; in airoha_dev_xmit()
2501 q->head = index; in airoha_dev_xmit()
2502 q->queued += i; in airoha_dev_xmit()
2505 netdev_tx_sent_queue(txq, skb->len); in airoha_dev_xmit()
2510 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_dev_xmit()
2512 if (q->ndesc - q->queued < q->free_thr) in airoha_dev_xmit()
2515 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2520 for (i--; i >= 0; i--) { in airoha_dev_xmit()
2521 index = (q->head + i) % q->ndesc; in airoha_dev_xmit()
2522 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, in airoha_dev_xmit()
2523 q->entry[index].dma_len, DMA_TO_DEVICE); in airoha_dev_xmit()
2526 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2529 dev->stats.tx_dropped++; in airoha_dev_xmit()
2538 struct airoha_eth *eth = port->qdma->eth; in airoha_ethtool_get_drvinfo()
2540 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); in airoha_ethtool_get_drvinfo()
2541 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); in airoha_ethtool_get_drvinfo()
2552 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_mac_stats()
2553 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; in airoha_ethtool_get_mac_stats()
2554 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; in airoha_ethtool_get_mac_stats()
2555 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; in airoha_ethtool_get_mac_stats()
2556 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_mac_stats()
2576 struct airoha_hw_stats *hw_stats = &port->stats; in airoha_ethtool_get_rmon_stats()
2580 ARRAY_SIZE(hw_stats->tx_len) + 1); in airoha_ethtool_get_rmon_stats()
2582 ARRAY_SIZE(hw_stats->rx_len) + 1); in airoha_ethtool_get_rmon_stats()
2589 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_rmon_stats()
2590 stats->fragments = hw_stats->rx_fragment; in airoha_ethtool_get_rmon_stats()
2591 stats->jabbers = hw_stats->rx_jabber; in airoha_ethtool_get_rmon_stats()
2592 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; in airoha_ethtool_get_rmon_stats()
2594 stats->hist[i] = hw_stats->rx_len[i]; in airoha_ethtool_get_rmon_stats()
2595 stats->hist_tx[i] = hw_stats->tx_len[i]; in airoha_ethtool_get_rmon_stats()
2597 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_rmon_stats()
2625 dev_err(eth->dev, "missing gdm port id\n"); in airoha_alloc_gdm_port()
2626 return -EINVAL; in airoha_alloc_gdm_port()
2630 index = id - 1; in airoha_alloc_gdm_port()
2632 if (!id || id > ARRAY_SIZE(eth->ports)) { in airoha_alloc_gdm_port()
2633 dev_err(eth->dev, "invalid gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2634 return -EINVAL; in airoha_alloc_gdm_port()
2637 if (eth->ports[index]) { in airoha_alloc_gdm_port()
2638 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2639 return -EINVAL; in airoha_alloc_gdm_port()
2642 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), in airoha_alloc_gdm_port()
2645 dev_err(eth->dev, "alloc_etherdev failed\n"); in airoha_alloc_gdm_port()
2646 return -ENOMEM; in airoha_alloc_gdm_port()
2649 qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; in airoha_alloc_gdm_port()
2650 dev->netdev_ops = &airoha_netdev_ops; in airoha_alloc_gdm_port()
2651 dev->ethtool_ops = &airoha_ethtool_ops; in airoha_alloc_gdm_port()
2652 dev->max_mtu = AIROHA_MAX_MTU; in airoha_alloc_gdm_port()
2653 dev->watchdog_timeo = 5 * HZ; in airoha_alloc_gdm_port()
2654 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | in airoha_alloc_gdm_port()
2657 dev->features |= dev->hw_features; in airoha_alloc_gdm_port()
2658 dev->dev.of_node = np; in airoha_alloc_gdm_port()
2659 dev->irq = qdma->irq; in airoha_alloc_gdm_port()
2660 SET_NETDEV_DEV(dev, eth->dev); in airoha_alloc_gdm_port()
2664 if (err == -EPROBE_DEFER) in airoha_alloc_gdm_port()
2668 dev_info(eth->dev, "generated random MAC address %pM\n", in airoha_alloc_gdm_port()
2669 dev->dev_addr); in airoha_alloc_gdm_port()
2673 u64_stats_init(&port->stats.syncp); in airoha_alloc_gdm_port()
2674 spin_lock_init(&port->stats.lock); in airoha_alloc_gdm_port()
2675 port->qdma = qdma; in airoha_alloc_gdm_port()
2676 port->dev = dev; in airoha_alloc_gdm_port()
2677 port->id = id; in airoha_alloc_gdm_port()
2678 eth->ports[index] = port; in airoha_alloc_gdm_port()
2689 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in airoha_probe()
2691 return -ENOMEM; in airoha_probe()
2693 eth->dev = &pdev->dev; in airoha_probe()
2695 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); in airoha_probe()
2697 dev_err(eth->dev, "failed configuring DMA mask\n"); in airoha_probe()
2701 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); in airoha_probe()
2702 if (IS_ERR(eth->fe_regs)) in airoha_probe()
2703 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), in airoha_probe()
2706 eth->rsts[0].id = "fe"; in airoha_probe()
2707 eth->rsts[1].id = "pdma"; in airoha_probe()
2708 eth->rsts[2].id = "qdma"; in airoha_probe()
2709 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2710 ARRAY_SIZE(eth->rsts), in airoha_probe()
2711 eth->rsts); in airoha_probe()
2713 dev_err(eth->dev, "failed to get bulk reset lines\n"); in airoha_probe()
2717 eth->xsi_rsts[0].id = "xsi-mac"; in airoha_probe()
2718 eth->xsi_rsts[1].id = "hsi0-mac"; in airoha_probe()
2719 eth->xsi_rsts[2].id = "hsi1-mac"; in airoha_probe()
2720 eth->xsi_rsts[3].id = "hsi-mac"; in airoha_probe()
2721 eth->xsi_rsts[4].id = "xfp-mac"; in airoha_probe()
2722 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2723 ARRAY_SIZE(eth->xsi_rsts), in airoha_probe()
2724 eth->xsi_rsts); in airoha_probe()
2726 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); in airoha_probe()
2730 eth->napi_dev = alloc_netdev_dummy(0); in airoha_probe()
2731 if (!eth->napi_dev) in airoha_probe()
2732 return -ENOMEM; in airoha_probe()
2735 eth->napi_dev->threaded = true; in airoha_probe()
2736 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); in airoha_probe()
2743 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2744 airoha_qdma_start_napi(ð->qdma[i]); in airoha_probe()
2746 for_each_child_of_node(pdev->dev.of_node, np) { in airoha_probe()
2747 if (!of_device_is_compatible(np, "airoha,eth-mac")) in airoha_probe()
2763 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2764 airoha_hw_cleanup(ð->qdma[i]); in airoha_probe()
2766 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_probe()
2767 struct airoha_gdm_port *port = eth->ports[i]; in airoha_probe()
2769 if (port && port->dev->reg_state == NETREG_REGISTERED) in airoha_probe()
2770 unregister_netdev(port->dev); in airoha_probe()
2772 free_netdev(eth->napi_dev); in airoha_probe()
2783 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_remove()
2784 airoha_hw_cleanup(ð->qdma[i]); in airoha_remove()
2786 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_remove()
2787 struct airoha_gdm_port *port = eth->ports[i]; in airoha_remove()
2792 airoha_dev_stop(port->dev); in airoha_remove()
2793 unregister_netdev(port->dev); in airoha_remove()
2795 free_netdev(eth->napi_dev); in airoha_remove()
2801 { .compatible = "airoha,en7581-eth" },
2818 MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");