Lines Matching +full:qdma +full:- +full:error

1 // SPDX-License-Identifier: GPL-2.0-only
41 struct airoha_qdma *qdma = irq_bank->qdma; in airoha_qdma_set_irqmask() local
42 int bank = irq_bank - &qdma->irq_banks[0]; in airoha_qdma_set_irqmask()
45 if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask))) in airoha_qdma_set_irqmask()
48 spin_lock_irqsave(&irq_bank->irq_lock, flags); in airoha_qdma_set_irqmask()
50 irq_bank->irqmask[index] &= ~clear; in airoha_qdma_set_irqmask()
51 irq_bank->irqmask[index] |= set; in airoha_qdma_set_irqmask()
52 airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index), in airoha_qdma_set_irqmask()
53 irq_bank->irqmask[index]); in airoha_qdma_set_irqmask()
57 airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index)); in airoha_qdma_set_irqmask()
59 spin_unlock_irqrestore(&irq_bank->irq_lock, flags); in airoha_qdma_set_irqmask()
76 struct airoha_eth *eth = port->qdma->eth; in airoha_set_macaddr()
107 struct airoha_eth *eth = port->qdma->eth; in airoha_set_vip_for_gdm_port()
110 switch (port->id) { in airoha_set_vip_for_gdm_port()
138 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) in airoha_fe_maccr_init()
203 /* ETH->ETH_P_1905 (0x893a) */ in airoha_fe_vip_setup()
257 all_rsv += (val - orig_val); in airoha_fe_set_pse_oq_rsv()
264 tmp = fq_limit - all_rsv - 0x20; in airoha_fe_set_pse_oq_rsv()
269 tmp = fq_limit - all_rsv - 0x100; in airoha_fe_set_pse_oq_rsv()
332 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) in airoha_fe_pse_ports_init()
481 /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 in airoha_fe_init()
482 * connect other rings to PSE Port0 OQ-0 in airoha_fe_init()
499 /* NPU Core-3, NPU Bridge Channel-3 */ in airoha_fe_init()
504 /* QDMA LAN, RX Ring-22 */ in airoha_fe_init()
554 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_fill_rx_queue()
555 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_fill_rx_queue() local
556 struct airoha_eth *eth = qdma->eth; in airoha_qdma_fill_rx_queue()
557 int qid = q - &qdma->q_rx[0]; in airoha_qdma_fill_rx_queue()
560 while (q->queued < q->ndesc - 1) { in airoha_qdma_fill_rx_queue()
561 struct airoha_queue_entry *e = &q->entry[q->head]; in airoha_qdma_fill_rx_queue()
562 struct airoha_qdma_desc *desc = &q->desc[q->head]; in airoha_qdma_fill_rx_queue()
567 page = page_pool_dev_alloc_frag(q->page_pool, &offset, in airoha_qdma_fill_rx_queue()
568 q->buf_size); in airoha_qdma_fill_rx_queue()
572 q->head = (q->head + 1) % q->ndesc; in airoha_qdma_fill_rx_queue()
573 q->queued++; in airoha_qdma_fill_rx_queue()
576 e->buf = page_address(page) + offset; in airoha_qdma_fill_rx_queue()
577 e->dma_addr = page_pool_get_dma_addr(page) + offset; in airoha_qdma_fill_rx_queue()
578 e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); in airoha_qdma_fill_rx_queue()
580 dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_fill_rx_queue()
583 val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); in airoha_qdma_fill_rx_queue()
584 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
585 WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); in airoha_qdma_fill_rx_queue()
586 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); in airoha_qdma_fill_rx_queue()
587 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_qdma_fill_rx_queue()
588 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_fill_rx_queue()
589 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_fill_rx_queue()
590 WRITE_ONCE(desc->msg2, 0); in airoha_qdma_fill_rx_queue()
591 WRITE_ONCE(desc->msg3, 0); in airoha_qdma_fill_rx_queue()
593 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), in airoha_qdma_fill_rx_queue()
595 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_fill_rx_queue()
604 u32 port, sport, msg1 = le32_to_cpu(desc->msg1); in airoha_qdma_get_gdm_port()
612 port = sport - 1; in airoha_qdma_get_gdm_port()
615 return -EINVAL; in airoha_qdma_get_gdm_port()
618 return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; in airoha_qdma_get_gdm_port()
623 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); in airoha_qdma_rx_process()
624 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_rx_process() local
625 struct airoha_eth *eth = qdma->eth; in airoha_qdma_rx_process()
626 int qid = q - &qdma->q_rx[0]; in airoha_qdma_rx_process()
630 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_rx_process()
631 struct airoha_qdma_desc *desc = &q->desc[q->tail]; in airoha_qdma_rx_process()
632 u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); in airoha_qdma_rx_process()
633 struct page *page = virt_to_head_page(e->buf); in airoha_qdma_rx_process()
634 u32 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_rx_process()
641 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_rx_process()
642 q->queued--; in airoha_qdma_rx_process()
644 dma_sync_single_for_cpu(eth->dev, e->dma_addr, in airoha_qdma_rx_process()
645 SKB_WITH_OVERHEAD(q->buf_size), dir); in airoha_qdma_rx_process()
648 data_len = q->skb ? q->buf_size in airoha_qdma_rx_process()
649 : SKB_WITH_OVERHEAD(q->buf_size); in airoha_qdma_rx_process()
654 if (p < 0 || !eth->ports[p]) in airoha_qdma_rx_process()
657 port = eth->ports[p]; in airoha_qdma_rx_process()
658 if (!q->skb) { /* first buffer */ in airoha_qdma_rx_process()
659 q->skb = napi_build_skb(e->buf, q->buf_size); in airoha_qdma_rx_process()
660 if (!q->skb) in airoha_qdma_rx_process()
663 __skb_put(q->skb, len); in airoha_qdma_rx_process()
664 skb_mark_for_recycle(q->skb); in airoha_qdma_rx_process()
665 q->skb->dev = port->dev; in airoha_qdma_rx_process()
666 q->skb->protocol = eth_type_trans(q->skb, port->dev); in airoha_qdma_rx_process()
667 q->skb->ip_summed = CHECKSUM_UNNECESSARY; in airoha_qdma_rx_process()
668 skb_record_rx_queue(q->skb, qid); in airoha_qdma_rx_process()
670 struct skb_shared_info *shinfo = skb_shinfo(q->skb); in airoha_qdma_rx_process()
671 int nr_frags = shinfo->nr_frags; in airoha_qdma_rx_process()
673 if (nr_frags >= ARRAY_SIZE(shinfo->frags)) in airoha_qdma_rx_process()
676 skb_add_rx_frag(q->skb, nr_frags, page, in airoha_qdma_rx_process()
677 e->buf - page_address(page), len, in airoha_qdma_rx_process()
678 q->buf_size); in airoha_qdma_rx_process()
684 if (netdev_uses_dsa(port->dev)) { in airoha_qdma_rx_process()
691 le32_to_cpu(desc->msg0)); in airoha_qdma_rx_process()
693 if (sptag < ARRAY_SIZE(port->dsa_meta) && in airoha_qdma_rx_process()
694 port->dsa_meta[sptag]) in airoha_qdma_rx_process()
695 skb_dst_set_noref(q->skb, in airoha_qdma_rx_process()
696 &port->dsa_meta[sptag]->dst); in airoha_qdma_rx_process()
701 skb_set_hash(q->skb, jhash_1word(hash, 0), in airoha_qdma_rx_process()
706 airoha_ppe_check_skb(eth->ppe, q->skb, hash); in airoha_qdma_rx_process()
709 napi_gro_receive(&q->napi, q->skb); in airoha_qdma_rx_process()
710 q->skb = NULL; in airoha_qdma_rx_process()
713 if (q->skb) { in airoha_qdma_rx_process()
714 dev_kfree_skb(q->skb); in airoha_qdma_rx_process()
715 q->skb = NULL; in airoha_qdma_rx_process()
717 page_pool_put_full_page(q->page_pool, page, true); in airoha_qdma_rx_process()
731 cur = airoha_qdma_rx_process(q, budget - done); in airoha_qdma_rx_napi_poll()
736 struct airoha_qdma *qdma = q->qdma; in airoha_qdma_rx_napi_poll() local
737 int i, qid = q - &qdma->q_rx[0]; in airoha_qdma_rx_napi_poll()
741 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { in airoha_qdma_rx_napi_poll()
745 airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg, in airoha_qdma_rx_napi_poll()
754 struct airoha_qdma *qdma, int ndesc) in airoha_qdma_init_rx_queue() argument
763 .dev = qdma->eth->dev, in airoha_qdma_init_rx_queue()
764 .napi = &q->napi, in airoha_qdma_init_rx_queue()
766 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_rx_queue()
767 int qid = q - &qdma->q_rx[0], thr; in airoha_qdma_init_rx_queue()
770 q->buf_size = PAGE_SIZE / 2; in airoha_qdma_init_rx_queue()
771 q->ndesc = ndesc; in airoha_qdma_init_rx_queue()
772 q->qdma = qdma; in airoha_qdma_init_rx_queue()
774 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_rx_queue()
776 if (!q->entry) in airoha_qdma_init_rx_queue()
777 return -ENOMEM; in airoha_qdma_init_rx_queue()
779 q->page_pool = page_pool_create(&pp_params); in airoha_qdma_init_rx_queue()
780 if (IS_ERR(q->page_pool)) { in airoha_qdma_init_rx_queue()
781 int err = PTR_ERR(q->page_pool); in airoha_qdma_init_rx_queue()
783 q->page_pool = NULL; in airoha_qdma_init_rx_queue()
787 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_rx_queue()
789 if (!q->desc) in airoha_qdma_init_rx_queue()
790 return -ENOMEM; in airoha_qdma_init_rx_queue()
792 netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); in airoha_qdma_init_rx_queue()
794 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); in airoha_qdma_init_rx_queue()
795 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), in airoha_qdma_init_rx_queue()
800 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, in airoha_qdma_init_rx_queue()
802 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, in airoha_qdma_init_rx_queue()
803 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_rx_queue()
804 airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK); in airoha_qdma_init_rx_queue()
813 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_rx_queue()
815 while (q->queued) { in airoha_qdma_cleanup_rx_queue()
816 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_rx_queue()
817 struct page *page = virt_to_head_page(e->buf); in airoha_qdma_cleanup_rx_queue()
819 dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_rx_queue()
820 page_pool_get_dma_dir(q->page_pool)); in airoha_qdma_cleanup_rx_queue()
821 page_pool_put_full_page(q->page_pool, page, false); in airoha_qdma_cleanup_rx_queue()
822 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_rx_queue()
823 q->queued--; in airoha_qdma_cleanup_rx_queue()
827 static int airoha_qdma_init_rx(struct airoha_qdma *qdma) in airoha_qdma_init_rx() argument
831 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_init_rx()
835 /* rx-queue not binded to irq */ in airoha_qdma_init_rx()
839 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, in airoha_qdma_init_rx()
852 struct airoha_qdma *qdma; in airoha_qdma_tx_napi_poll() local
857 qdma = irq_q->qdma; in airoha_qdma_tx_napi_poll()
858 id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_napi_poll()
859 eth = qdma->eth; in airoha_qdma_tx_napi_poll()
861 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id)); in airoha_qdma_tx_napi_poll()
863 head = head % irq_q->size; in airoha_qdma_tx_napi_poll()
867 u32 qid, val = irq_q->q[head]; in airoha_qdma_tx_napi_poll()
877 irq_q->q[head] = 0xff; /* mark as done */ in airoha_qdma_tx_napi_poll()
878 head = (head + 1) % irq_q->size; in airoha_qdma_tx_napi_poll()
879 irq_queued--; in airoha_qdma_tx_napi_poll()
883 if (qid >= ARRAY_SIZE(qdma->q_tx)) in airoha_qdma_tx_napi_poll()
886 q = &qdma->q_tx[qid]; in airoha_qdma_tx_napi_poll()
887 if (!q->ndesc) in airoha_qdma_tx_napi_poll()
891 if (index >= q->ndesc) in airoha_qdma_tx_napi_poll()
894 spin_lock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
896 if (!q->queued) in airoha_qdma_tx_napi_poll()
899 desc = &q->desc[index]; in airoha_qdma_tx_napi_poll()
900 desc_ctrl = le32_to_cpu(desc->ctrl); in airoha_qdma_tx_napi_poll()
906 e = &q->entry[index]; in airoha_qdma_tx_napi_poll()
907 skb = e->skb; in airoha_qdma_tx_napi_poll()
909 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_tx_napi_poll()
912 WRITE_ONCE(desc->msg0, 0); in airoha_qdma_tx_napi_poll()
913 WRITE_ONCE(desc->msg1, 0); in airoha_qdma_tx_napi_poll()
914 q->queued--; in airoha_qdma_tx_napi_poll()
916 /* completion ring can report out-of-order indexes if hw QoS in airoha_qdma_tx_napi_poll()
918 * to same DMA ring. Take into account possible out-of-order in airoha_qdma_tx_napi_poll()
921 while (q->tail != q->head && !q->entry[q->tail].dma_addr) in airoha_qdma_tx_napi_poll()
922 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_tx_napi_poll()
928 txq = netdev_get_tx_queue(skb->dev, queue); in airoha_qdma_tx_napi_poll()
929 netdev_tx_completed_queue(txq, 1, skb->len); in airoha_qdma_tx_napi_poll()
931 q->ndesc - q->queued >= q->free_thr) in airoha_qdma_tx_napi_poll()
937 spin_unlock_bh(&q->lock); in airoha_qdma_tx_napi_poll()
944 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), in airoha_qdma_tx_napi_poll()
946 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), in airoha_qdma_tx_napi_poll()
951 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, in airoha_qdma_tx_napi_poll()
958 struct airoha_qdma *qdma, int size) in airoha_qdma_init_tx_queue() argument
960 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_tx_queue()
961 int i, qid = q - &qdma->q_tx[0]; in airoha_qdma_init_tx_queue()
964 spin_lock_init(&q->lock); in airoha_qdma_init_tx_queue()
965 q->ndesc = size; in airoha_qdma_init_tx_queue()
966 q->qdma = qdma; in airoha_qdma_init_tx_queue()
967 q->free_thr = 1 + MAX_SKB_FRAGS; in airoha_qdma_init_tx_queue()
969 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), in airoha_qdma_init_tx_queue()
971 if (!q->entry) in airoha_qdma_init_tx_queue()
972 return -ENOMEM; in airoha_qdma_init_tx_queue()
974 q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), in airoha_qdma_init_tx_queue()
976 if (!q->desc) in airoha_qdma_init_tx_queue()
977 return -ENOMEM; in airoha_qdma_init_tx_queue()
979 for (i = 0; i < q->ndesc; i++) { in airoha_qdma_init_tx_queue()
983 WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); in airoha_qdma_init_tx_queue()
987 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid), in airoha_qdma_init_tx_queue()
990 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); in airoha_qdma_init_tx_queue()
991 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, in airoha_qdma_init_tx_queue()
992 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
993 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, in airoha_qdma_init_tx_queue()
994 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); in airoha_qdma_init_tx_queue()
1000 struct airoha_qdma *qdma, int size) in airoha_qdma_tx_irq_init() argument
1002 int id = irq_q - &qdma->q_tx_irq[0]; in airoha_qdma_tx_irq_init()
1003 struct airoha_eth *eth = qdma->eth; in airoha_qdma_tx_irq_init()
1006 netif_napi_add_tx(eth->napi_dev, &irq_q->napi, in airoha_qdma_tx_irq_init()
1008 irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), in airoha_qdma_tx_irq_init()
1010 if (!irq_q->q) in airoha_qdma_tx_irq_init()
1011 return -ENOMEM; in airoha_qdma_tx_irq_init()
1013 memset(irq_q->q, 0xff, size * sizeof(u32)); in airoha_qdma_tx_irq_init()
1014 irq_q->size = size; in airoha_qdma_tx_irq_init()
1015 irq_q->qdma = qdma; in airoha_qdma_tx_irq_init()
1017 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); in airoha_qdma_tx_irq_init()
1018 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, in airoha_qdma_tx_irq_init()
1020 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, in airoha_qdma_tx_irq_init()
1026 static int airoha_qdma_init_tx(struct airoha_qdma *qdma) in airoha_qdma_init_tx() argument
1030 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_qdma_init_tx()
1031 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, in airoha_qdma_init_tx()
1037 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_init_tx()
1038 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, in airoha_qdma_init_tx()
1049 struct airoha_eth *eth = q->qdma->eth; in airoha_qdma_cleanup_tx_queue()
1051 spin_lock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1052 while (q->queued) { in airoha_qdma_cleanup_tx_queue()
1053 struct airoha_queue_entry *e = &q->entry[q->tail]; in airoha_qdma_cleanup_tx_queue()
1055 dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, in airoha_qdma_cleanup_tx_queue()
1057 dev_kfree_skb_any(e->skb); in airoha_qdma_cleanup_tx_queue()
1058 e->skb = NULL; in airoha_qdma_cleanup_tx_queue()
1060 q->tail = (q->tail + 1) % q->ndesc; in airoha_qdma_cleanup_tx_queue()
1061 q->queued--; in airoha_qdma_cleanup_tx_queue()
1063 spin_unlock_bh(&q->lock); in airoha_qdma_cleanup_tx_queue()
1066 static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) in airoha_qdma_init_hfwd_queues() argument
1069 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_hfwd_queues()
1070 int id = qdma - &eth->qdma[0]; in airoha_qdma_init_hfwd_queues()
1075 name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id); in airoha_qdma_init_hfwd_queues()
1077 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1080 index = of_property_match_string(eth->dev->of_node, in airoha_qdma_init_hfwd_queues()
1081 "memory-region-names", name); in airoha_qdma_init_hfwd_queues()
1089 np = of_parse_phandle(eth->dev->of_node, "memory-region", in airoha_qdma_init_hfwd_queues()
1092 return -ENODEV; in airoha_qdma_init_hfwd_queues()
1096 dma_addr = rmem->base; in airoha_qdma_init_hfwd_queues()
1100 num_desc = div_u64(rmem->size, buf_size); in airoha_qdma_init_hfwd_queues()
1103 if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, in airoha_qdma_init_hfwd_queues()
1105 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1108 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); in airoha_qdma_init_hfwd_queues()
1111 if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL)) in airoha_qdma_init_hfwd_queues()
1112 return -ENOMEM; in airoha_qdma_init_hfwd_queues()
1114 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); in airoha_qdma_init_hfwd_queues()
1116 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, in airoha_qdma_init_hfwd_queues()
1119 airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, in airoha_qdma_init_hfwd_queues()
1121 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, in airoha_qdma_init_hfwd_queues()
1129 30 * USEC_PER_MSEC, true, qdma, in airoha_qdma_init_hfwd_queues()
1133 static void airoha_qdma_init_qos(struct airoha_qdma *qdma) in airoha_qdma_init_qos() argument
1135 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); in airoha_qdma_init_qos()
1136 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); in airoha_qdma_init_qos()
1138 airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, in airoha_qdma_init_qos()
1141 airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, in airoha_qdma_init_qos()
1145 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, in airoha_qdma_init_qos()
1148 airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, in airoha_qdma_init_qos()
1153 airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); in airoha_qdma_init_qos()
1154 /* fast-tick 25us */ in airoha_qdma_init_qos()
1155 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, in airoha_qdma_init_qos()
1157 airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, in airoha_qdma_init_qos()
1160 airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); in airoha_qdma_init_qos()
1161 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, in airoha_qdma_init_qos()
1163 airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, in airoha_qdma_init_qos()
1167 airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); in airoha_qdma_init_qos()
1168 airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, in airoha_qdma_init_qos()
1170 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, in airoha_qdma_init_qos()
1172 airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, in airoha_qdma_init_qos()
1176 airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); in airoha_qdma_init_qos()
1177 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, in airoha_qdma_init_qos()
1179 airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, in airoha_qdma_init_qos()
1183 static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma) in airoha_qdma_init_qos_stats() argument
1188 /* Tx-cpu transferred count */ in airoha_qdma_init_qos_stats()
1189 airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0); in airoha_qdma_init_qos_stats()
1190 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), in airoha_qdma_init_qos_stats()
1194 /* Tx-fwd transferred count */ in airoha_qdma_init_qos_stats()
1195 airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0); in airoha_qdma_init_qos_stats()
1196 airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1), in airoha_qdma_init_qos_stats()
1204 static int airoha_qdma_hw_init(struct airoha_qdma *qdma) in airoha_qdma_hw_init() argument
1208 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { in airoha_qdma_hw_init()
1210 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); in airoha_qdma_hw_init()
1212 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0, in airoha_qdma_hw_init()
1214 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1, in airoha_qdma_hw_init()
1216 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2, in airoha_qdma_hw_init()
1218 airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3, in airoha_qdma_hw_init()
1222 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0, in airoha_qdma_hw_init()
1224 airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4, in airoha_qdma_hw_init()
1228 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_qdma_hw_init()
1229 if (!qdma->q_tx[i].ndesc) in airoha_qdma_hw_init()
1233 airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), in airoha_qdma_hw_init()
1236 airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), in airoha_qdma_hw_init()
1240 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, in airoha_qdma_hw_init()
1250 airoha_qdma_init_qos(qdma); in airoha_qdma_hw_init()
1252 /* disable qdma rx delay interrupt */ in airoha_qdma_hw_init()
1253 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_hw_init()
1254 if (!qdma->q_rx[i].ndesc) in airoha_qdma_hw_init()
1257 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), in airoha_qdma_hw_init()
1261 airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, in airoha_qdma_hw_init()
1263 airoha_qdma_init_qos_stats(qdma); in airoha_qdma_hw_init()
1271 struct airoha_qdma *qdma = irq_bank->qdma; in airoha_irq_handler() local
1273 u32 intr[ARRAY_SIZE(irq_bank->irqmask)]; in airoha_irq_handler()
1277 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); in airoha_irq_handler()
1278 intr[i] &= irq_bank->irqmask[i]; in airoha_irq_handler()
1279 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); in airoha_irq_handler()
1282 if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) in airoha_irq_handler()
1297 for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_irq_handler()
1298 if (!qdma->q_rx[i].ndesc) in airoha_irq_handler()
1302 napi_schedule(&qdma->q_rx[i].napi); in airoha_irq_handler()
1306 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { in airoha_irq_handler()
1312 napi_schedule(&qdma->q_tx_irq[i].napi); in airoha_irq_handler()
1320 struct airoha_qdma *qdma) in airoha_qdma_init_irq_banks() argument
1322 struct airoha_eth *eth = qdma->eth; in airoha_qdma_init_irq_banks()
1323 int i, id = qdma - &eth->qdma[0]; in airoha_qdma_init_irq_banks()
1325 for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) { in airoha_qdma_init_irq_banks()
1326 struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i]; in airoha_qdma_init_irq_banks()
1330 spin_lock_init(&irq_bank->irq_lock); in airoha_qdma_init_irq_banks()
1331 irq_bank->qdma = qdma; in airoha_qdma_init_irq_banks()
1333 irq_bank->irq = platform_get_irq(pdev, irq_index); in airoha_qdma_init_irq_banks()
1334 if (irq_bank->irq < 0) in airoha_qdma_init_irq_banks()
1335 return irq_bank->irq; in airoha_qdma_init_irq_banks()
1337 name = devm_kasprintf(eth->dev, GFP_KERNEL, in airoha_qdma_init_irq_banks()
1340 return -ENOMEM; in airoha_qdma_init_irq_banks()
1342 err = devm_request_irq(eth->dev, irq_bank->irq, in airoha_qdma_init_irq_banks()
1354 struct airoha_qdma *qdma) in airoha_qdma_init() argument
1356 int err, id = qdma - &eth->qdma[0]; in airoha_qdma_init()
1359 qdma->eth = eth; in airoha_qdma_init()
1360 res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); in airoha_qdma_init()
1362 return -ENOMEM; in airoha_qdma_init()
1364 qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); in airoha_qdma_init()
1365 if (IS_ERR(qdma->regs)) in airoha_qdma_init()
1366 return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), in airoha_qdma_init()
1367 "failed to iomap qdma%d regs\n", id); in airoha_qdma_init()
1369 err = airoha_qdma_init_irq_banks(pdev, qdma); in airoha_qdma_init()
1373 err = airoha_qdma_init_rx(qdma); in airoha_qdma_init()
1377 err = airoha_qdma_init_tx(qdma); in airoha_qdma_init()
1381 err = airoha_qdma_init_hfwd_queues(qdma); in airoha_qdma_init()
1385 return airoha_qdma_hw_init(qdma); in airoha_qdma_init()
1394 err = reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), in airoha_hw_init()
1395 eth->xsi_rsts); in airoha_hw_init()
1399 err = reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
1404 err = reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); in airoha_hw_init()
1413 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { in airoha_hw_init()
1414 err = airoha_qdma_init(pdev, eth, &eth->qdma[i]); in airoha_hw_init()
1423 set_bit(DEV_STATE_INITIALIZED, &eth->state); in airoha_hw_init()
1428 static void airoha_hw_cleanup(struct airoha_qdma *qdma) in airoha_hw_cleanup() argument
1432 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_hw_cleanup()
1433 if (!qdma->q_rx[i].ndesc) in airoha_hw_cleanup()
1436 netif_napi_del(&qdma->q_rx[i].napi); in airoha_hw_cleanup()
1437 airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); in airoha_hw_cleanup()
1438 if (qdma->q_rx[i].page_pool) in airoha_hw_cleanup()
1439 page_pool_destroy(qdma->q_rx[i].page_pool); in airoha_hw_cleanup()
1442 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_hw_cleanup()
1443 netif_napi_del(&qdma->q_tx_irq[i].napi); in airoha_hw_cleanup()
1445 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_hw_cleanup()
1446 if (!qdma->q_tx[i].ndesc) in airoha_hw_cleanup()
1449 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_hw_cleanup()
1453 static void airoha_qdma_start_napi(struct airoha_qdma *qdma) in airoha_qdma_start_napi() argument
1457 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_qdma_start_napi()
1458 napi_enable(&qdma->q_tx_irq[i].napi); in airoha_qdma_start_napi()
1460 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_start_napi()
1461 if (!qdma->q_rx[i].ndesc) in airoha_qdma_start_napi()
1464 napi_enable(&qdma->q_rx[i].napi); in airoha_qdma_start_napi()
1468 static void airoha_qdma_stop_napi(struct airoha_qdma *qdma) in airoha_qdma_stop_napi() argument
1472 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) in airoha_qdma_stop_napi()
1473 napi_disable(&qdma->q_tx_irq[i].napi); in airoha_qdma_stop_napi()
1475 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_stop_napi()
1476 if (!qdma->q_rx[i].ndesc) in airoha_qdma_stop_napi()
1479 napi_disable(&qdma->q_rx[i].napi); in airoha_qdma_stop_napi()
1485 struct airoha_eth *eth = port->qdma->eth; in airoha_update_hw_stats()
1488 spin_lock(&port->stats.lock); in airoha_update_hw_stats()
1489 u64_stats_update_begin(&port->stats.syncp); in airoha_update_hw_stats()
1492 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
1493 port->stats.tx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
1494 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
1495 port->stats.tx_ok_pkts += val; in airoha_update_hw_stats()
1497 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
1498 port->stats.tx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
1499 val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
1500 port->stats.tx_ok_bytes += val; in airoha_update_hw_stats()
1502 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
1503 port->stats.tx_drops += val; in airoha_update_hw_stats()
1505 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
1506 port->stats.tx_broadcast += val; in airoha_update_hw_stats()
1508 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
1509 port->stats.tx_multicast += val; in airoha_update_hw_stats()
1511 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
1512 port->stats.tx_len[i] += val; in airoha_update_hw_stats()
1514 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
1515 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1516 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
1517 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1519 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
1520 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1521 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
1522 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1524 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
1525 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1526 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
1527 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1529 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
1530 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1531 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
1532 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1534 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
1535 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1536 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
1537 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1539 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
1540 port->stats.tx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1541 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
1542 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1544 val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
1545 port->stats.tx_len[i++] += val; in airoha_update_hw_stats()
1548 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); in airoha_update_hw_stats()
1549 port->stats.rx_ok_pkts += ((u64)val << 32); in airoha_update_hw_stats()
1550 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); in airoha_update_hw_stats()
1551 port->stats.rx_ok_pkts += val; in airoha_update_hw_stats()
1553 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); in airoha_update_hw_stats()
1554 port->stats.rx_ok_bytes += ((u64)val << 32); in airoha_update_hw_stats()
1555 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); in airoha_update_hw_stats()
1556 port->stats.rx_ok_bytes += val; in airoha_update_hw_stats()
1558 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); in airoha_update_hw_stats()
1559 port->stats.rx_drops += val; in airoha_update_hw_stats()
1561 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); in airoha_update_hw_stats()
1562 port->stats.rx_broadcast += val; in airoha_update_hw_stats()
1564 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); in airoha_update_hw_stats()
1565 port->stats.rx_multicast += val; in airoha_update_hw_stats()
1567 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); in airoha_update_hw_stats()
1568 port->stats.rx_errors += val; in airoha_update_hw_stats()
1570 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); in airoha_update_hw_stats()
1571 port->stats.rx_crc_error += val; in airoha_update_hw_stats()
1573 val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); in airoha_update_hw_stats()
1574 port->stats.rx_over_errors += val; in airoha_update_hw_stats()
1576 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); in airoha_update_hw_stats()
1577 port->stats.rx_fragment += val; in airoha_update_hw_stats()
1579 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); in airoha_update_hw_stats()
1580 port->stats.rx_jabber += val; in airoha_update_hw_stats()
1583 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); in airoha_update_hw_stats()
1584 port->stats.rx_len[i] += val; in airoha_update_hw_stats()
1586 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); in airoha_update_hw_stats()
1587 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1588 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); in airoha_update_hw_stats()
1589 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1591 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); in airoha_update_hw_stats()
1592 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1593 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); in airoha_update_hw_stats()
1594 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1596 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); in airoha_update_hw_stats()
1597 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1598 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); in airoha_update_hw_stats()
1599 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1601 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); in airoha_update_hw_stats()
1602 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1603 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); in airoha_update_hw_stats()
1604 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1606 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); in airoha_update_hw_stats()
1607 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1608 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); in airoha_update_hw_stats()
1609 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1611 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); in airoha_update_hw_stats()
1612 port->stats.rx_len[i] += ((u64)val << 32); in airoha_update_hw_stats()
1613 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); in airoha_update_hw_stats()
1614 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1616 val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); in airoha_update_hw_stats()
1617 port->stats.rx_len[i++] += val; in airoha_update_hw_stats()
1620 airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), in airoha_update_hw_stats()
1623 u64_stats_update_end(&port->stats.syncp); in airoha_update_hw_stats()
1624 spin_unlock(&port->stats.lock); in airoha_update_hw_stats()
1629 int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN; in airoha_dev_open()
1631 struct airoha_qdma *qdma = port->qdma; in airoha_dev_open() local
1639 airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
1642 airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), in airoha_dev_open()
1645 airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id), in airoha_dev_open()
1650 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, in airoha_dev_open()
1653 atomic_inc(&qdma->users); in airoha_dev_open()
1661 struct airoha_qdma *qdma = port->qdma; in airoha_dev_stop() local
1669 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) in airoha_dev_stop()
1672 if (atomic_dec_and_test(&qdma->users)) { in airoha_dev_stop()
1673 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, in airoha_dev_stop()
1677 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { in airoha_dev_stop()
1678 if (!qdma->q_tx[i].ndesc) in airoha_dev_stop()
1681 airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); in airoha_dev_stop()
1697 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_set_macaddr()
1704 u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; in airhoha_set_gdm2_loopback()
1705 struct airoha_eth *eth = port->qdma->eth; in airhoha_set_gdm2_loopback()
1706 u32 chan = port->id == 3 ? 4 : 0; in airhoha_set_gdm2_loopback()
1727 if (port->id == 3) { in airhoha_set_gdm2_loopback()
1755 struct airoha_eth *eth = port->qdma->eth; in airoha_dev_init()
1758 airoha_set_macaddr(port, dev->dev_addr); in airoha_dev_init()
1760 switch (port->id) { in airoha_dev_init()
1764 if (!eth->ports[1]) in airoha_dev_init()
1775 airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); in airoha_dev_init()
1788 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_dev_get_stats64()
1789 storage->rx_packets = port->stats.rx_ok_pkts; in airoha_dev_get_stats64()
1790 storage->tx_packets = port->stats.tx_ok_pkts; in airoha_dev_get_stats64()
1791 storage->rx_bytes = port->stats.rx_ok_bytes; in airoha_dev_get_stats64()
1792 storage->tx_bytes = port->stats.tx_ok_bytes; in airoha_dev_get_stats64()
1793 storage->multicast = port->stats.rx_multicast; in airoha_dev_get_stats64()
1794 storage->rx_errors = port->stats.rx_errors; in airoha_dev_get_stats64()
1795 storage->rx_dropped = port->stats.rx_drops; in airoha_dev_get_stats64()
1796 storage->tx_dropped = port->stats.tx_drops; in airoha_dev_get_stats64()
1797 storage->rx_crc_errors = port->stats.rx_crc_error; in airoha_dev_get_stats64()
1798 storage->rx_over_errors = port->stats.rx_over_errors; in airoha_dev_get_stats64()
1799 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_dev_get_stats64()
1805 struct airoha_eth *eth = port->qdma->eth; in airoha_dev_change_mtu()
1808 airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id), in airoha_dev_change_mtu()
1811 WRITE_ONCE(dev->mtu, mtu); in airoha_dev_change_mtu()
1826 channel = netdev_uses_dsa(dev) ? skb_get_queue_mapping(skb) : port->id; in airoha_dev_select_queue()
1828 queue = (skb->priority - 1) % AIROHA_NUM_QOS_QUEUES; /* QoS queue */ in airoha_dev_select_queue()
1831 return queue < dev->num_tx_queues ? queue : 0; in airoha_dev_select_queue()
1844 if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) in airoha_get_dsa_tag()
1850 ehdr = (struct ethhdr *)skb->data; in airoha_get_dsa_tag()
1851 tag = be16_to_cpu(ehdr->h_proto); in airoha_get_dsa_tag()
1856 ehdr->h_proto = cpu_to_be16(ETH_P_8021Q); in airoha_get_dsa_tag()
1860 ehdr->h_proto = cpu_to_be16(ETH_P_8021AD); in airoha_get_dsa_tag()
1867 memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN); in airoha_get_dsa_tag()
1882 struct airoha_qdma *qdma = port->qdma; in airoha_dev_xmit() local
1891 qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); in airoha_dev_xmit()
1899 if (skb->ip_summed == CHECKSUM_PARTIAL) in airoha_dev_xmit()
1907 goto error; in airoha_dev_xmit()
1909 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | in airoha_dev_xmit()
1911 __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size); in airoha_dev_xmit()
1913 tcp_hdr(skb)->check = (__force __sum16)csum; in airoha_dev_xmit()
1918 fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; in airoha_dev_xmit()
1922 q = &qdma->q_tx[qid]; in airoha_dev_xmit()
1923 if (WARN_ON_ONCE(!q->ndesc)) in airoha_dev_xmit()
1924 goto error; in airoha_dev_xmit()
1926 spin_lock_bh(&q->lock); in airoha_dev_xmit()
1929 nr_frags = 1 + skb_shinfo(skb)->nr_frags; in airoha_dev_xmit()
1931 if (q->queued + nr_frags > q->ndesc) { in airoha_dev_xmit()
1934 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
1939 data = skb->data; in airoha_dev_xmit()
1940 index = q->head; in airoha_dev_xmit()
1943 struct airoha_qdma_desc *desc = &q->desc[index]; in airoha_dev_xmit()
1944 struct airoha_queue_entry *e = &q->entry[index]; in airoha_dev_xmit()
1945 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in airoha_dev_xmit()
1949 addr = dma_map_single(dev->dev.parent, data, len, in airoha_dev_xmit()
1951 if (unlikely(dma_mapping_error(dev->dev.parent, addr))) in airoha_dev_xmit()
1954 index = (index + 1) % q->ndesc; in airoha_dev_xmit()
1957 if (i < nr_frags - 1) in airoha_dev_xmit()
1959 WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); in airoha_dev_xmit()
1960 WRITE_ONCE(desc->addr, cpu_to_le32(addr)); in airoha_dev_xmit()
1962 WRITE_ONCE(desc->data, cpu_to_le32(val)); in airoha_dev_xmit()
1963 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); in airoha_dev_xmit()
1964 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); in airoha_dev_xmit()
1965 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); in airoha_dev_xmit()
1967 e->skb = i ? NULL : skb; in airoha_dev_xmit()
1968 e->dma_addr = addr; in airoha_dev_xmit()
1969 e->dma_len = len; in airoha_dev_xmit()
1975 q->head = index; in airoha_dev_xmit()
1976 q->queued += i; in airoha_dev_xmit()
1979 netdev_tx_sent_queue(txq, skb->len); in airoha_dev_xmit()
1982 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), in airoha_dev_xmit()
1984 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); in airoha_dev_xmit()
1986 if (q->ndesc - q->queued < q->free_thr) in airoha_dev_xmit()
1989 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
1994 for (i--; i >= 0; i--) { in airoha_dev_xmit()
1995 index = (q->head + i) % q->ndesc; in airoha_dev_xmit()
1996 dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr, in airoha_dev_xmit()
1997 q->entry[index].dma_len, DMA_TO_DEVICE); in airoha_dev_xmit()
2000 spin_unlock_bh(&q->lock); in airoha_dev_xmit()
2001 error: in airoha_dev_xmit()
2003 dev->stats.tx_dropped++; in airoha_dev_xmit()
2012 struct airoha_eth *eth = port->qdma->eth; in airoha_ethtool_get_drvinfo()
2014 strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); in airoha_ethtool_get_drvinfo()
2015 strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); in airoha_ethtool_get_drvinfo()
2026 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_mac_stats()
2027 stats->MulticastFramesXmittedOK = port->stats.tx_multicast; in airoha_ethtool_get_mac_stats()
2028 stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; in airoha_ethtool_get_mac_stats()
2029 stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; in airoha_ethtool_get_mac_stats()
2030 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_mac_stats()
2050 struct airoha_hw_stats *hw_stats = &port->stats; in airoha_ethtool_get_rmon_stats()
2054 ARRAY_SIZE(hw_stats->tx_len) + 1); in airoha_ethtool_get_rmon_stats()
2056 ARRAY_SIZE(hw_stats->rx_len) + 1); in airoha_ethtool_get_rmon_stats()
2063 start = u64_stats_fetch_begin(&port->stats.syncp); in airoha_ethtool_get_rmon_stats()
2064 stats->fragments = hw_stats->rx_fragment; in airoha_ethtool_get_rmon_stats()
2065 stats->jabbers = hw_stats->rx_jabber; in airoha_ethtool_get_rmon_stats()
2066 for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; in airoha_ethtool_get_rmon_stats()
2068 stats->hist[i] = hw_stats->rx_len[i]; in airoha_ethtool_get_rmon_stats()
2069 stats->hist_tx[i] = hw_stats->tx_len[i]; in airoha_ethtool_get_rmon_stats()
2071 } while (u64_stats_fetch_retry(&port->stats.syncp, start)); in airoha_ethtool_get_rmon_stats()
2081 airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel), in airoha_qdma_set_chan_tx_sched()
2088 airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG, in airoha_qdma_set_chan_tx_sched()
2096 true, port->qdma, in airoha_qdma_set_chan_tx_sched()
2102 airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3), in airoha_qdma_set_chan_tx_sched()
2122 struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params; in airoha_qdma_set_tx_ets_sched()
2127 if (p->bands > AIROHA_NUM_QOS_QUEUES) in airoha_qdma_set_tx_ets_sched()
2128 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2130 for (i = 0; i < p->bands; i++) { in airoha_qdma_set_tx_ets_sched()
2131 if (!p->quanta[i]) in airoha_qdma_set_tx_ets_sched()
2136 if (nstrict == AIROHA_NUM_QOS_QUEUES - 1) in airoha_qdma_set_tx_ets_sched()
2137 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2144 if (p->priomap[p->bands - i - 1] != i) in airoha_qdma_set_tx_ets_sched()
2145 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2148 for (i = 0; i < p->bands - nstrict; i++) { in airoha_qdma_set_tx_ets_sched()
2149 if (p->priomap[i] != nstrict + i) in airoha_qdma_set_tx_ets_sched()
2150 return -EINVAL; in airoha_qdma_set_tx_ets_sched()
2152 w[i] = p->weights[nstrict + i]; in airoha_qdma_set_tx_ets_sched()
2157 else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1) in airoha_qdma_set_tx_ets_sched()
2168 u64 cpu_tx_packets = airoha_qdma_rr(port->qdma, in airoha_qdma_get_tx_ets_stats()
2170 u64 fwd_tx_packets = airoha_qdma_rr(port->qdma, in airoha_qdma_get_tx_ets_stats()
2172 u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) + in airoha_qdma_get_tx_ets_stats()
2173 (fwd_tx_packets - port->fwd_tx_packets); in airoha_qdma_get_tx_ets_stats()
2174 _bstats_update(opt->stats.bstats, 0, tx_packets); in airoha_qdma_get_tx_ets_stats()
2176 port->cpu_tx_packets = cpu_tx_packets; in airoha_qdma_get_tx_ets_stats()
2177 port->fwd_tx_packets = fwd_tx_packets; in airoha_qdma_get_tx_ets_stats()
2187 if (opt->parent == TC_H_ROOT) in airoha_tc_setup_qdisc_ets()
2188 return -EINVAL; in airoha_tc_setup_qdisc_ets()
2190 channel = TC_H_MAJ(opt->handle) >> 16; in airoha_tc_setup_qdisc_ets()
2193 switch (opt->command) { in airoha_tc_setup_qdisc_ets()
2202 return -EOPNOTSUPP; in airoha_tc_setup_qdisc_ets()
2206 static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id, in airoha_qdma_get_rl_param() argument
2215 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); in airoha_qdma_get_rl_param()
2218 USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma, in airoha_qdma_get_rl_param()
2220 return -ETIMEDOUT; in airoha_qdma_get_rl_param()
2222 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); in airoha_qdma_get_rl_param()
2224 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); in airoha_qdma_get_rl_param()
2229 static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id, in airoha_qdma_set_rl_param() argument
2239 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); in airoha_qdma_set_rl_param()
2240 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); in airoha_qdma_set_rl_param()
2245 qdma, REG_TRTCM_CFG_PARAM(addr)); in airoha_qdma_set_rl_param()
2248 static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id, in airoha_qdma_set_rl_config() argument
2254 err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, in airoha_qdma_set_rl_config()
2261 return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE, in airoha_qdma_set_rl_config()
2265 static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma, in airoha_qdma_set_rl_token_bucket() argument
2272 err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, in airoha_qdma_set_rl_token_bucket()
2277 val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG); in airoha_qdma_set_rl_token_bucket()
2282 return -EINVAL; in airoha_qdma_set_rl_token_bucket()
2286 return -EINVAL; in airoha_qdma_set_rl_token_bucket()
2294 err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, in airoha_qdma_set_rl_token_bucket()
2304 return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG, in airoha_qdma_set_rl_token_bucket()
2308 static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id, in airoha_qdma_init_rl_config() argument
2316 err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, in airoha_qdma_init_rl_config()
2321 return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG, in airoha_qdma_init_rl_config()
2325 static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel, in airoha_qdma_get_trtcm_param() argument
2336 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); in airoha_qdma_get_trtcm_param()
2340 qdma, REG_TRTCM_CFG_PARAM(addr))) in airoha_qdma_get_trtcm_param()
2341 return -ETIMEDOUT; in airoha_qdma_get_trtcm_param()
2343 *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr)); in airoha_qdma_get_trtcm_param()
2345 *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr)); in airoha_qdma_get_trtcm_param()
2350 static int airoha_qdma_set_trtcm_param(struct airoha_qdma *qdma, int channel, in airoha_qdma_set_trtcm_param() argument
2361 airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val); in airoha_qdma_set_trtcm_param()
2362 airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config); in airoha_qdma_set_trtcm_param()
2367 qdma, REG_TRTCM_CFG_PARAM(addr)); in airoha_qdma_set_trtcm_param()
2370 static int airoha_qdma_set_trtcm_config(struct airoha_qdma *qdma, int channel, in airoha_qdma_set_trtcm_config() argument
2376 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, in airoha_qdma_set_trtcm_config()
2378 return -EINVAL; in airoha_qdma_set_trtcm_config()
2382 return airoha_qdma_set_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, in airoha_qdma_set_trtcm_config()
2386 static int airoha_qdma_set_trtcm_token_bucket(struct airoha_qdma *qdma, in airoha_qdma_set_trtcm_token_bucket() argument
2394 if (airoha_qdma_get_trtcm_param(qdma, channel, addr, TRTCM_MISC_MODE, in airoha_qdma_set_trtcm_token_bucket()
2396 return -EINVAL; in airoha_qdma_set_trtcm_token_bucket()
2398 val = airoha_qdma_rr(qdma, addr); in airoha_qdma_set_trtcm_token_bucket()
2403 return -EINVAL; in airoha_qdma_set_trtcm_token_bucket()
2407 return -EINVAL; in airoha_qdma_set_trtcm_token_bucket()
2415 err = airoha_qdma_set_trtcm_param(qdma, channel, addr, in airoha_qdma_set_trtcm_token_bucket()
2423 return airoha_qdma_set_trtcm_param(qdma, channel, addr, in airoha_qdma_set_trtcm_token_bucket()
2435 err = airoha_qdma_set_trtcm_config(port->qdma, channel, in airoha_qdma_set_tx_rate_limit()
2441 err = airoha_qdma_set_trtcm_token_bucket(port->qdma, channel, in airoha_qdma_set_tx_rate_limit()
2454 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; in airoha_tc_htb_alloc_leaf_queue()
2455 u32 rate = div_u64(opt->rate, 1000) << 3; /* kbps */ in airoha_tc_htb_alloc_leaf_queue()
2456 struct net_device *dev = port->dev; in airoha_tc_htb_alloc_leaf_queue()
2457 int num_tx_queues = dev->real_num_tx_queues; in airoha_tc_htb_alloc_leaf_queue()
2460 if (opt->parent_classid != TC_HTB_CLASSID_ROOT) { in airoha_tc_htb_alloc_leaf_queue()
2461 NL_SET_ERR_MSG_MOD(opt->extack, "invalid parent classid"); in airoha_tc_htb_alloc_leaf_queue()
2462 return -EINVAL; in airoha_tc_htb_alloc_leaf_queue()
2465 err = airoha_qdma_set_tx_rate_limit(port, channel, rate, opt->quantum); in airoha_tc_htb_alloc_leaf_queue()
2467 NL_SET_ERR_MSG_MOD(opt->extack, in airoha_tc_htb_alloc_leaf_queue()
2472 if (opt->command == TC_HTB_NODE_MODIFY) in airoha_tc_htb_alloc_leaf_queue()
2477 airoha_qdma_set_tx_rate_limit(port, channel, 0, opt->quantum); in airoha_tc_htb_alloc_leaf_queue()
2478 NL_SET_ERR_MSG_MOD(opt->extack, in airoha_tc_htb_alloc_leaf_queue()
2483 set_bit(channel, port->qos_sq_bmap); in airoha_tc_htb_alloc_leaf_queue()
2484 opt->qid = AIROHA_NUM_TX_RING + channel; in airoha_tc_htb_alloc_leaf_queue()
2493 struct airoha_qdma *qdma = port->qdma; in airoha_qdma_set_rx_meter() local
2496 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { in airoha_qdma_set_rx_meter()
2499 if (!qdma->q_rx[i].ndesc) in airoha_qdma_set_rx_meter()
2502 err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type); in airoha_qdma_set_rx_meter()
2506 err = airoha_qdma_set_rl_token_bucket(qdma, i, rate, in airoha_qdma_set_rx_meter()
2517 const struct flow_action *actions = &f->rule->action; in airoha_tc_matchall_act_validate()
2521 NL_SET_ERR_MSG_MOD(f->common.extack, in airoha_tc_matchall_act_validate()
2523 return -EINVAL; in airoha_tc_matchall_act_validate()
2527 NL_SET_ERR_MSG_MOD(f->common.extack, in airoha_tc_matchall_act_validate()
2529 return -EOPNOTSUPP; in airoha_tc_matchall_act_validate()
2532 act = &actions->entries[0]; in airoha_tc_matchall_act_validate()
2533 if (act->id != FLOW_ACTION_POLICE) { in airoha_tc_matchall_act_validate()
2534 NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action"); in airoha_tc_matchall_act_validate()
2535 return -EOPNOTSUPP; in airoha_tc_matchall_act_validate()
2538 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { in airoha_tc_matchall_act_validate()
2539 NL_SET_ERR_MSG_MOD(f->common.extack, in airoha_tc_matchall_act_validate()
2541 return -EOPNOTSUPP; in airoha_tc_matchall_act_validate()
2544 if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { in airoha_tc_matchall_act_validate()
2545 NL_SET_ERR_MSG_MOD(f->common.extack, in airoha_tc_matchall_act_validate()
2547 return -EOPNOTSUPP; in airoha_tc_matchall_act_validate()
2550 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && in airoha_tc_matchall_act_validate()
2552 NL_SET_ERR_MSG_MOD(f->common.extack, in airoha_tc_matchall_act_validate()
2554 return -EOPNOTSUPP; in airoha_tc_matchall_act_validate()
2557 if (act->police.peakrate_bytes_ps || act->police.avrate || in airoha_tc_matchall_act_validate()
2558 act->police.overhead || act->police.mtu) { in airoha_tc_matchall_act_validate()
2559 NL_SET_ERR_MSG_MOD(f->common.extack, in airoha_tc_matchall_act_validate()
2561 return -EOPNOTSUPP; in airoha_tc_matchall_act_validate()
2574 switch (f->command) { in airoha_dev_tc_matchall()
2583 act = &f->rule->action.entries[0]; in airoha_dev_tc_matchall()
2584 if (act->police.rate_pkt_ps) { in airoha_dev_tc_matchall()
2585 rate = act->police.rate_pkt_ps; in airoha_dev_tc_matchall()
2586 bucket_size = act->police.burst_pkt; in airoha_dev_tc_matchall()
2589 rate = div_u64(act->police.rate_bytes_ps, 1000); in airoha_dev_tc_matchall()
2591 bucket_size = act->police.burst; in airoha_dev_tc_matchall()
2599 return -EOPNOTSUPP; in airoha_dev_tc_matchall()
2609 return -EOPNOTSUPP; in airoha_dev_setup_tc_block_cb()
2617 return -EOPNOTSUPP; in airoha_dev_setup_tc_block_cb()
2628 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in airoha_dev_setup_tc_block()
2629 return -EOPNOTSUPP; in airoha_dev_setup_tc_block()
2631 f->driver_block_list = &block_cb_list; in airoha_dev_setup_tc_block()
2632 switch (f->command) { in airoha_dev_setup_tc_block()
2634 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); in airoha_dev_setup_tc_block()
2639 block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); in airoha_dev_setup_tc_block()
2645 list_add_tail(&block_cb->driver_list, &block_cb_list); in airoha_dev_setup_tc_block()
2648 block_cb = flow_block_cb_lookup(f->block, cb, port->dev); in airoha_dev_setup_tc_block()
2650 return -ENOENT; in airoha_dev_setup_tc_block()
2654 list_del(&block_cb->driver_list); in airoha_dev_setup_tc_block()
2658 return -EOPNOTSUPP; in airoha_dev_setup_tc_block()
2664 struct net_device *dev = port->dev; in airoha_tc_remove_htb_queue()
2666 netif_set_real_num_tx_queues(dev, dev->real_num_tx_queues - 1); in airoha_tc_remove_htb_queue()
2668 clear_bit(queue, port->qos_sq_bmap); in airoha_tc_remove_htb_queue()
2674 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; in airoha_tc_htb_delete_leaf_queue()
2676 if (!test_bit(channel, port->qos_sq_bmap)) { in airoha_tc_htb_delete_leaf_queue()
2677 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); in airoha_tc_htb_delete_leaf_queue()
2678 return -EINVAL; in airoha_tc_htb_delete_leaf_queue()
2690 for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS) in airoha_tc_htb_destroy()
2699 u32 channel = TC_H_MIN(opt->classid) % AIROHA_NUM_QOS_CHANNELS; in airoha_tc_get_htb_get_leaf_queue()
2701 if (!test_bit(channel, port->qos_sq_bmap)) { in airoha_tc_get_htb_get_leaf_queue()
2702 NL_SET_ERR_MSG_MOD(opt->extack, "invalid queue id"); in airoha_tc_get_htb_get_leaf_queue()
2703 return -EINVAL; in airoha_tc_get_htb_get_leaf_queue()
2706 opt->qid = AIROHA_NUM_TX_RING + channel; in airoha_tc_get_htb_get_leaf_queue()
2714 switch (opt->command) { in airoha_tc_setup_qdisc_htb()
2729 return -EOPNOTSUPP; in airoha_tc_setup_qdisc_htb()
2749 return -EOPNOTSUPP; in airoha_dev_tc_setup()
2775 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { in airoha_metadata_dst_alloc()
2781 return -ENOMEM; in airoha_metadata_dst_alloc()
2783 md_dst->u.port_info.port_id = i; in airoha_metadata_dst_alloc()
2784 port->dsa_meta[i] = md_dst; in airoha_metadata_dst_alloc()
2794 for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { in airoha_metadata_dst_free()
2795 if (!port->dsa_meta[i]) in airoha_metadata_dst_free()
2798 metadata_dst_free(port->dsa_meta[i]); in airoha_metadata_dst_free()
2807 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_is_valid_gdm_port()
2808 if (eth->ports[i] == port) in airoha_is_valid_gdm_port()
2820 struct airoha_qdma *qdma; in airoha_alloc_gdm_port() local
2826 dev_err(eth->dev, "missing gdm port id\n"); in airoha_alloc_gdm_port()
2827 return -EINVAL; in airoha_alloc_gdm_port()
2831 p = id - 1; in airoha_alloc_gdm_port()
2833 if (!id || id > ARRAY_SIZE(eth->ports)) { in airoha_alloc_gdm_port()
2834 dev_err(eth->dev, "invalid gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2835 return -EINVAL; in airoha_alloc_gdm_port()
2838 if (eth->ports[p]) { in airoha_alloc_gdm_port()
2839 dev_err(eth->dev, "duplicate gdm port id: %d\n", id); in airoha_alloc_gdm_port()
2840 return -EINVAL; in airoha_alloc_gdm_port()
2843 dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), in airoha_alloc_gdm_port()
2847 dev_err(eth->dev, "alloc_etherdev failed\n"); in airoha_alloc_gdm_port()
2848 return -ENOMEM; in airoha_alloc_gdm_port()
2851 qdma = &eth->qdma[index % AIROHA_MAX_NUM_QDMA]; in airoha_alloc_gdm_port()
2852 dev->netdev_ops = &airoha_netdev_ops; in airoha_alloc_gdm_port()
2853 dev->ethtool_ops = &airoha_ethtool_ops; in airoha_alloc_gdm_port()
2854 dev->max_mtu = AIROHA_MAX_MTU; in airoha_alloc_gdm_port()
2855 dev->watchdog_timeo = 5 * HZ; in airoha_alloc_gdm_port()
2856 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | in airoha_alloc_gdm_port()
2860 dev->features |= dev->hw_features; in airoha_alloc_gdm_port()
2861 dev->vlan_features = dev->hw_features; in airoha_alloc_gdm_port()
2862 dev->dev.of_node = np; in airoha_alloc_gdm_port()
2863 dev->irq = qdma->irq_banks[0].irq; in airoha_alloc_gdm_port()
2864 SET_NETDEV_DEV(dev, eth->dev); in airoha_alloc_gdm_port()
2873 if (err == -EPROBE_DEFER) in airoha_alloc_gdm_port()
2877 dev_info(eth->dev, "generated random MAC address %pM\n", in airoha_alloc_gdm_port()
2878 dev->dev_addr); in airoha_alloc_gdm_port()
2882 u64_stats_init(&port->stats.syncp); in airoha_alloc_gdm_port()
2883 spin_lock_init(&port->stats.lock); in airoha_alloc_gdm_port()
2884 port->qdma = qdma; in airoha_alloc_gdm_port()
2885 port->dev = dev; in airoha_alloc_gdm_port()
2886 port->id = id; in airoha_alloc_gdm_port()
2887 eth->ports[p] = port; in airoha_alloc_gdm_port()
2910 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in airoha_probe()
2912 return -ENOMEM; in airoha_probe()
2914 eth->dev = &pdev->dev; in airoha_probe()
2916 err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); in airoha_probe()
2918 dev_err(eth->dev, "failed configuring DMA mask\n"); in airoha_probe()
2922 eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); in airoha_probe()
2923 if (IS_ERR(eth->fe_regs)) in airoha_probe()
2924 return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), in airoha_probe()
2927 eth->rsts[0].id = "fe"; in airoha_probe()
2928 eth->rsts[1].id = "pdma"; in airoha_probe()
2929 eth->rsts[2].id = "qdma"; in airoha_probe()
2930 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2931 ARRAY_SIZE(eth->rsts), in airoha_probe()
2932 eth->rsts); in airoha_probe()
2934 dev_err(eth->dev, "failed to get bulk reset lines\n"); in airoha_probe()
2938 eth->xsi_rsts[0].id = "xsi-mac"; in airoha_probe()
2939 eth->xsi_rsts[1].id = "hsi0-mac"; in airoha_probe()
2940 eth->xsi_rsts[2].id = "hsi1-mac"; in airoha_probe()
2941 eth->xsi_rsts[3].id = "hsi-mac"; in airoha_probe()
2942 eth->xsi_rsts[4].id = "xfp-mac"; in airoha_probe()
2943 err = devm_reset_control_bulk_get_exclusive(eth->dev, in airoha_probe()
2944 ARRAY_SIZE(eth->xsi_rsts), in airoha_probe()
2945 eth->xsi_rsts); in airoha_probe()
2947 dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); in airoha_probe()
2951 eth->napi_dev = alloc_netdev_dummy(0); in airoha_probe()
2952 if (!eth->napi_dev) in airoha_probe()
2953 return -ENOMEM; in airoha_probe()
2956 eth->napi_dev->threaded = true; in airoha_probe()
2957 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); in airoha_probe()
2964 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2965 airoha_qdma_start_napi(&eth->qdma[i]); in airoha_probe()
2968 for_each_child_of_node(pdev->dev.of_node, np) { in airoha_probe()
2969 if (!of_device_is_compatible(np, "airoha,eth-mac")) in airoha_probe()
2985 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2986 airoha_qdma_stop_napi(&eth->qdma[i]); in airoha_probe()
2988 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) in airoha_probe()
2989 airoha_hw_cleanup(&eth->qdma[i]); in airoha_probe()
2991 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_probe()
2992 struct airoha_gdm_port *port = eth->ports[i]; in airoha_probe()
2994 if (port && port->dev->reg_state == NETREG_REGISTERED) { in airoha_probe()
2995 unregister_netdev(port->dev); in airoha_probe()
2999 free_netdev(eth->napi_dev); in airoha_probe()
3010 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { in airoha_remove()
3011 airoha_qdma_stop_napi(&eth->qdma[i]); in airoha_remove()
3012 airoha_hw_cleanup(&eth->qdma[i]); in airoha_remove()
3015 for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { in airoha_remove()
3016 struct airoha_gdm_port *port = eth->ports[i]; in airoha_remove()
3021 airoha_dev_stop(port->dev); in airoha_remove()
3022 unregister_netdev(port->dev); in airoha_remove()
3025 free_netdev(eth->napi_dev); in airoha_remove()
3032 { .compatible = "airoha,en7581-eth" },