Lines Matching +full:t +full:- +full:head
1 // SPDX-License-Identifier: ISC
6 #include <linux/dma-mapping.h>
15 if ((_q)->flags & MT_QFLAG_WED) \
16 _val = mtk_wed_device_reg_read((_q)->wed, \
17 ((_q)->wed_regs + \
20 _val = readl(&(_q)->regs->_field); \
26 if ((_q)->flags & MT_QFLAG_WED) \
27 mtk_wed_device_reg_write((_q)->wed, \
28 ((_q)->wed_regs + _offset), \
31 writel(_val, &(_q)->regs->_field); \
36 #define Q_READ(_q, _field) readl(&(_q)->regs->_field)
37 #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
44 struct mt76_txwi_cache *t; in mt76_alloc_txwi() local
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); in mt76_alloc_txwi()
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, in mt76_alloc_txwi()
56 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { in mt76_alloc_txwi()
61 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); in mt76_alloc_txwi()
62 t->dma_addr = addr; in mt76_alloc_txwi()
64 return t; in mt76_alloc_txwi()
70 struct mt76_txwi_cache *t; in mt76_alloc_rxwi() local
72 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); in mt76_alloc_rxwi()
73 if (!t) in mt76_alloc_rxwi()
76 t->ptr = NULL; in mt76_alloc_rxwi()
77 return t; in mt76_alloc_rxwi()
83 struct mt76_txwi_cache *t = NULL; in __mt76_get_txwi() local
85 spin_lock(&dev->lock); in __mt76_get_txwi()
86 if (!list_empty(&dev->txwi_cache)) { in __mt76_get_txwi()
87 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, in __mt76_get_txwi()
89 list_del(&t->list); in __mt76_get_txwi()
91 spin_unlock(&dev->lock); in __mt76_get_txwi()
93 return t; in __mt76_get_txwi()
99 struct mt76_txwi_cache *t = NULL; in __mt76_get_rxwi() local
101 spin_lock_bh(&dev->wed_lock); in __mt76_get_rxwi()
102 if (!list_empty(&dev->rxwi_cache)) { in __mt76_get_rxwi()
103 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, in __mt76_get_rxwi()
105 list_del(&t->list); in __mt76_get_rxwi()
107 spin_unlock_bh(&dev->wed_lock); in __mt76_get_rxwi()
109 return t; in __mt76_get_rxwi()
115 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); in mt76_get_txwi() local
117 if (t) in mt76_get_txwi()
118 return t; in mt76_get_txwi()
126 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); in mt76_get_rxwi() local
128 if (t) in mt76_get_rxwi()
129 return t; in mt76_get_rxwi()
136 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) in mt76_put_txwi() argument
138 if (!t) in mt76_put_txwi()
141 spin_lock(&dev->lock); in mt76_put_txwi()
142 list_add(&t->list, &dev->txwi_cache); in mt76_put_txwi()
143 spin_unlock(&dev->lock); in mt76_put_txwi()
148 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) in mt76_put_rxwi() argument
150 if (!t) in mt76_put_rxwi()
153 spin_lock_bh(&dev->wed_lock); in mt76_put_rxwi()
154 list_add(&t->list, &dev->rxwi_cache); in mt76_put_rxwi()
155 spin_unlock_bh(&dev->wed_lock); in mt76_put_rxwi()
162 struct mt76_txwi_cache *t; in mt76_free_pending_txwi() local
165 while ((t = __mt76_get_txwi(dev)) != NULL) { in mt76_free_pending_txwi()
166 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, in mt76_free_pending_txwi()
168 kfree(mt76_get_txwi_ptr(dev, t)); in mt76_free_pending_txwi()
176 struct mt76_txwi_cache *t; in mt76_free_pending_rxwi() local
179 while ((t = __mt76_get_rxwi(dev)) != NULL) { in mt76_free_pending_rxwi()
180 if (t->ptr) in mt76_free_pending_rxwi()
181 mt76_put_page_pool_buf(t->ptr, false); in mt76_free_pending_rxwi()
182 kfree(t); in mt76_free_pending_rxwi()
191 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
192 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx()
193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx()
195 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
196 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx()
197 q->tail = q->head; in mt76_dma_sync_idx()
203 if (!q || !q->ndesc) in __mt76_dma_queue_reset()
210 for (i = 0; i < q->ndesc; i++) in __mt76_dma_queue_reset()
211 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in __mt76_dma_queue_reset()
230 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
233 int idx = q->head; in mt76_dma_add_rx_buf()
240 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_add_rx_buf()
241 data = &rro_desc[q->head]; in mt76_dma_add_rx_buf()
245 desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
248 buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32); in mt76_dma_add_rx_buf()
254 return -ENOMEM; in mt76_dma_add_rx_buf()
256 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); in mt76_dma_add_rx_buf()
259 return -ENOMEM; in mt76_dma_add_rx_buf()
266 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); in mt76_dma_add_rx_buf()
267 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); in mt76_dma_add_rx_buf()
268 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); in mt76_dma_add_rx_buf()
269 WRITE_ONCE(desc->info, 0); in mt76_dma_add_rx_buf()
272 entry->dma_addr[0] = buf->addr; in mt76_dma_add_rx_buf()
273 entry->dma_len[0] = buf->len; in mt76_dma_add_rx_buf()
274 entry->txwi = txwi; in mt76_dma_add_rx_buf()
275 entry->buf = data; in mt76_dma_add_rx_buf()
276 entry->wcid = 0xffff; in mt76_dma_add_rx_buf()
277 entry->skip_buf1 = true; in mt76_dma_add_rx_buf()
278 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
279 q->queued++; in mt76_dma_add_rx_buf()
291 int i, idx = -1; in mt76_dma_add_buf()
295 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
296 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
302 idx = q->head; in mt76_dma_add_buf()
303 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
305 desc = &q->desc[idx]; in mt76_dma_add_buf()
306 entry = &q->entry[idx]; in mt76_dma_add_buf()
309 entry->skip_buf0 = true; in mt76_dma_add_buf()
310 entry->skip_buf1 = i == nbufs - 1; in mt76_dma_add_buf()
312 entry->dma_addr[0] = buf[0].addr; in mt76_dma_add_buf()
313 entry->dma_len[0] = buf[0].len; in mt76_dma_add_buf()
319 if (i < nbufs - 1) { in mt76_dma_add_buf()
320 entry->dma_addr[1] = buf[1].addr; in mt76_dma_add_buf()
321 entry->dma_len[1] = buf[1].len; in mt76_dma_add_buf()
329 entry->skip_buf1 = true; in mt76_dma_add_buf()
332 if (i == nbufs - 1) in mt76_dma_add_buf()
334 else if (i == nbufs - 2) in mt76_dma_add_buf()
337 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); in mt76_dma_add_buf()
338 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); in mt76_dma_add_buf()
339 WRITE_ONCE(desc->info, cpu_to_le32(info)); in mt76_dma_add_buf()
340 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); in mt76_dma_add_buf()
342 q->head = next; in mt76_dma_add_buf()
343 q->queued++; in mt76_dma_add_buf()
346 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
347 q->entry[idx].skb = skb; in mt76_dma_add_buf()
348 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
357 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
359 if (!e->skip_buf0) in mt76_dma_tx_cleanup_idx()
360 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], in mt76_dma_tx_cleanup_idx()
363 if (!e->skip_buf1) in mt76_dma_tx_cleanup_idx()
364 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], in mt76_dma_tx_cleanup_idx()
367 if (e->txwi == DMA_DUMMY_DATA) in mt76_dma_tx_cleanup_idx()
368 e->txwi = NULL; in mt76_dma_tx_cleanup_idx()
378 Q_WRITE(q, cpu_idx, q->head); in mt76_dma_kick_queue()
387 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
390 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
392 last = -1; in mt76_dma_tx_cleanup()
396 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
397 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
401 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) in mt76_dma_tx_cleanup()
405 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
408 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
411 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
414 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
417 if (!q->queued) in mt76_dma_tx_cleanup()
418 wake_up(&dev->tx_wait); in mt76_dma_tx_cleanup()
425 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
426 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
428 void *buf = e->buf; in mt76_dma_get_buf()
433 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); in mt76_dma_get_buf()
439 desc_info = le32_to_cpu(desc->info); in mt76_dma_get_buf()
443 buf1 = le32_to_cpu(desc->buf1); in mt76_dma_get_buf()
448 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); in mt76_dma_get_buf() local
450 if (!t) in mt76_dma_get_buf()
453 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, in mt76_dma_get_buf()
454 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
455 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
457 buf = t->ptr; in mt76_dma_get_buf()
458 t->dma_addr = 0; in mt76_dma_get_buf()
459 t->ptr = NULL; in mt76_dma_get_buf()
461 mt76_put_rxwi(dev, t); in mt76_dma_get_buf()
465 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], in mt76_dma_get_buf()
466 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
467 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
471 e->buf = NULL; in mt76_dma_get_buf()
479 int idx = q->tail; in mt76_dma_dequeue()
482 if (!q->queued) in mt76_dma_dequeue()
490 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
491 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
495 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
496 q->queued--; in mt76_dma_dequeue()
508 if (test_bit(MT76_MCU_RESET, &dev->phy.state)) in mt76_dma_tx_queue_skb_raw()
511 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
514 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, in mt76_dma_tx_queue_skb_raw()
516 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) in mt76_dma_tx_queue_skb_raw()
520 buf.len = skb->len; in mt76_dma_tx_queue_skb_raw()
522 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
525 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
531 return -ENOMEM; in mt76_dma_tx_queue_skb_raw()
545 struct mt76_dev *dev = phy->dev; in mt76_dma_tx_queue_skb()
547 int len, n = 0, ret = -ENOMEM; in mt76_dma_tx_queue_skb()
548 struct mt76_txwi_cache *t; in mt76_dma_tx_queue_skb() local
553 if (test_bit(MT76_RESET, &phy->state)) in mt76_dma_tx_queue_skb()
556 t = mt76_get_txwi(dev); in mt76_dma_tx_queue_skb()
557 if (!t) in mt76_dma_tx_queue_skb()
560 txwi = mt76_get_txwi_ptr(dev, t); in mt76_dma_tx_queue_skb()
562 skb->prev = skb->next = NULL; in mt76_dma_tx_queue_skb()
563 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) in mt76_dma_tx_queue_skb()
567 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); in mt76_dma_tx_queue_skb()
568 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) in mt76_dma_tx_queue_skb()
571 tx_info.buf[n].addr = t->dma_addr; in mt76_dma_tx_queue_skb()
572 tx_info.buf[n++].len = dev->drv->txwi_size; in mt76_dma_tx_queue_skb()
580 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, in mt76_dma_tx_queue_skb()
582 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) in mt76_dma_tx_queue_skb()
586 tx_info.buf[n++].len = iter->len; in mt76_dma_tx_queue_skb()
590 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
591 ret = -ENOMEM; in mt76_dma_tx_queue_skb()
595 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, in mt76_dma_tx_queue_skb()
597 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); in mt76_dma_tx_queue_skb()
598 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, in mt76_dma_tx_queue_skb()
604 tx_info.info, tx_info.skb, t); in mt76_dma_tx_queue_skb()
607 for (n--; n > 0; n--) in mt76_dma_tx_queue_skb()
608 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, in mt76_dma_tx_queue_skb()
615 struct mt76_phy *phy = hw->priv; in mt76_dma_tx_queue_skb()
617 if (tx_info.skb == phy->test.tx_skb) in mt76_dma_tx_queue_skb()
618 phy->test.tx_done--; in mt76_dma_tx_queue_skb()
622 mt76_put_txwi(dev, t); in mt76_dma_tx_queue_skb()
627 spin_lock_bh(&dev->rx_lock); in mt76_dma_tx_queue_skb()
629 spin_unlock_bh(&dev->rx_lock); in mt76_dma_tx_queue_skb()
638 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill_buf()
641 if (!q->ndesc) in mt76_dma_rx_fill_buf()
644 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill_buf()
654 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill_buf()
659 dir = page_pool_get_dma_dir(q->page_pool); in mt76_dma_rx_fill_buf()
660 dma_sync_single_for_device(dev->dma_dev, addr, len, dir); in mt76_dma_rx_fill_buf()
662 qbuf.addr = addr + q->buf_offset; in mt76_dma_rx_fill_buf()
664 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill_buf()
684 if (!q->ndesc) in mt76_dma_rx_fill()
687 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
689 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
701 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
702 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
704 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
705 q->ndesc = n_desc; in mt76_dma_alloc_queue()
706 q->buf_size = bufsize; in mt76_dma_alloc_queue()
707 q->hw_idx = idx; in mt76_dma_alloc_queue()
711 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, in mt76_dma_alloc_queue()
712 &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
713 if (!q->desc) in mt76_dma_alloc_queue()
714 return -ENOMEM; in mt76_dma_alloc_queue()
720 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_alloc_queue()
721 for (i = 0; i < q->ndesc; i++) { in mt76_dma_alloc_queue()
725 cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; in mt76_dma_alloc_queue()
729 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
730 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
731 if (!q->entry) in mt76_dma_alloc_queue()
732 return -ENOMEM; in mt76_dma_alloc_queue()
742 if (mtk_wed_device_active(&dev->mmio.wed)) { in mt76_dma_alloc_queue()
743 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || in mt76_dma_alloc_queue()
759 if (!q->ndesc) in mt76_dma_rx_cleanup()
763 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
765 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
774 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
775 if (q->rx_head) { in mt76_dma_rx_cleanup()
776 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
777 q->rx_head = NULL; in mt76_dma_rx_cleanup()
780 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
786 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset()
788 if (!q->ndesc) in mt76_dma_rx_reset()
794 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
795 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
806 if (mtk_wed_device_active(&dev->mmio.wed) && in mt76_dma_rx_reset()
818 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
820 int nr_frags = shinfo->nr_frags; in mt76_add_fragment()
822 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { in mt76_add_fragment()
824 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
826 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
834 q->rx_head = NULL; in mt76_add_fragment()
835 if (nr_frags < ARRAY_SIZE(shinfo->frags)) in mt76_add_fragment()
836 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
862 if (q->tail == dma_idx) in mt76_dma_rx_process()
865 if (q->tail == dma_idx) in mt76_dma_rx_process()
877 if (q->rx_head) in mt76_dma_rx_process()
878 data_len = q->buf_size; in mt76_dma_rx_process()
880 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
882 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
883 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
884 q->rx_head = NULL; in mt76_dma_rx_process()
888 if (q->rx_head) { in mt76_dma_rx_process()
894 if (!more && dev->drv->rx_check && in mt76_dma_rx_process()
895 !(dev->drv->rx_check(dev, data, len))) in mt76_dma_rx_process()
898 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
902 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
905 *(u32 *)skb->cb = info; in mt76_dma_rx_process()
911 q->rx_head = skb; in mt76_dma_rx_process()
915 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
931 dev = mt76_priv(napi->dev); in mt76_dma_rx_poll()
932 qid = napi - dev->napi; in mt76_dma_rx_poll()
937 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); in mt76_dma_rx_poll()
945 dev->drv->rx_poll_complete(dev, qid); in mt76_dma_rx_poll()
958 dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); in mt76_dma_init()
959 if (!dev->napi_dev) in mt76_dma_init()
960 return -ENOMEM; in mt76_dma_init()
965 priv = netdev_priv(dev->napi_dev); in mt76_dma_init()
968 dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); in mt76_dma_init()
969 if (!dev->tx_napi_dev) { in mt76_dma_init()
970 free_netdev(dev->napi_dev); in mt76_dma_init()
971 return -ENOMEM; in mt76_dma_init()
973 priv = netdev_priv(dev->tx_napi_dev); in mt76_dma_init()
976 snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s", in mt76_dma_init()
977 wiphy_name(dev->hw->wiphy)); in mt76_dma_init()
978 dev->napi_dev->threaded = 1; in mt76_dma_init()
979 init_completion(&dev->mmio.wed_reset); in mt76_dma_init()
980 init_completion(&dev->mmio.wed_reset_complete); in mt76_dma_init()
983 netif_napi_add(dev->napi_dev, &dev->napi[i], poll); in mt76_dma_init()
984 mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false); in mt76_dma_init()
985 napi_enable(&dev->napi[i]); in mt76_dma_init()
1005 dev->queue_ops = &mt76_dma_ops; in mt76_dma_attach()
1013 mt76_worker_disable(&dev->tx_worker); in mt76_dma_cleanup()
1014 napi_disable(&dev->tx_napi); in mt76_dma_cleanup()
1015 netif_napi_del(&dev->tx_napi); in mt76_dma_cleanup()
1017 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { in mt76_dma_cleanup()
1018 struct mt76_phy *phy = dev->phys[i]; in mt76_dma_cleanup()
1024 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) in mt76_dma_cleanup()
1025 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); in mt76_dma_cleanup()
1028 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) in mt76_dma_cleanup()
1029 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); in mt76_dma_cleanup()
1032 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup()
1034 if (mtk_wed_device_active(&dev->mmio.wed) && in mt76_dma_cleanup()
1038 netif_napi_del(&dev->napi[i]); in mt76_dma_cleanup()
1041 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()
1044 if (mtk_wed_device_active(&dev->mmio.wed)) in mt76_dma_cleanup()
1045 mtk_wed_device_detach(&dev->mmio.wed); in mt76_dma_cleanup()
1047 if (mtk_wed_device_active(&dev->mmio.wed_hif2)) in mt76_dma_cleanup()
1048 mtk_wed_device_detach(&dev->mmio.wed_hif2); in mt76_dma_cleanup()
1052 free_netdev(dev->napi_dev); in mt76_dma_cleanup()
1053 free_netdev(dev->tx_napi_dev); in mt76_dma_cleanup()