16c92544dSBjoern A. Zeeb // SPDX-License-Identifier: ISC
26c92544dSBjoern A. Zeeb /*
36c92544dSBjoern A. Zeeb * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
46c92544dSBjoern A. Zeeb */
56c92544dSBjoern A. Zeeb
66c92544dSBjoern A. Zeeb #include <linux/dma-mapping.h>
76c92544dSBjoern A. Zeeb #if defined(__FreeBSD__)
86c92544dSBjoern A. Zeeb #include <linux/cache.h>
9cbb3ec25SBjoern A. Zeeb #include <net/page_pool.h>
106c92544dSBjoern A. Zeeb #endif
116c92544dSBjoern A. Zeeb #include "mt76.h"
126c92544dSBjoern A. Zeeb #include "dma.h"
136c92544dSBjoern A. Zeeb
146c92544dSBjoern A. Zeeb #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
156c92544dSBjoern A. Zeeb
16*8ba4d145SBjoern A. Zeeb #define Q_READ(_q, _field) ({ \
176c92544dSBjoern A. Zeeb u32 _offset = offsetof(struct mt76_queue_regs, _field); \
186c92544dSBjoern A. Zeeb u32 _val; \
196c92544dSBjoern A. Zeeb if ((_q)->flags & MT_QFLAG_WED) \
20*8ba4d145SBjoern A. Zeeb _val = mtk_wed_device_reg_read((_q)->wed, \
216c92544dSBjoern A. Zeeb ((_q)->wed_regs + \
226c92544dSBjoern A. Zeeb _offset)); \
236c92544dSBjoern A. Zeeb else \
246c92544dSBjoern A. Zeeb _val = readl(&(_q)->regs->_field); \
256c92544dSBjoern A. Zeeb _val; \
266c92544dSBjoern A. Zeeb })
276c92544dSBjoern A. Zeeb
28*8ba4d145SBjoern A. Zeeb #define Q_WRITE(_q, _field, _val) do { \
296c92544dSBjoern A. Zeeb u32 _offset = offsetof(struct mt76_queue_regs, _field); \
306c92544dSBjoern A. Zeeb if ((_q)->flags & MT_QFLAG_WED) \
31*8ba4d145SBjoern A. Zeeb mtk_wed_device_reg_write((_q)->wed, \
326c92544dSBjoern A. Zeeb ((_q)->wed_regs + _offset), \
336c92544dSBjoern A. Zeeb _val); \
346c92544dSBjoern A. Zeeb else \
356c92544dSBjoern A. Zeeb writel(_val, &(_q)->regs->_field); \
366c92544dSBjoern A. Zeeb } while (0)
376c92544dSBjoern A. Zeeb
386c92544dSBjoern A. Zeeb #else
396c92544dSBjoern A. Zeeb
40*8ba4d145SBjoern A. Zeeb #define Q_READ(_q, _field) readl(&(_q)->regs->_field)
41*8ba4d145SBjoern A. Zeeb #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
426c92544dSBjoern A. Zeeb
436c92544dSBjoern A. Zeeb #endif
446c92544dSBjoern A. Zeeb
456c92544dSBjoern A. Zeeb static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev * dev)466c92544dSBjoern A. Zeeb mt76_alloc_txwi(struct mt76_dev *dev)
476c92544dSBjoern A. Zeeb {
486c92544dSBjoern A. Zeeb struct mt76_txwi_cache *t;
496c92544dSBjoern A. Zeeb dma_addr_t addr;
506c92544dSBjoern A. Zeeb u8 *txwi;
516c92544dSBjoern A. Zeeb int size;
526c92544dSBjoern A. Zeeb
536c92544dSBjoern A. Zeeb size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
546c92544dSBjoern A. Zeeb txwi = kzalloc(size, GFP_ATOMIC);
556c92544dSBjoern A. Zeeb if (!txwi)
566c92544dSBjoern A. Zeeb return NULL;
576c92544dSBjoern A. Zeeb
586c92544dSBjoern A. Zeeb addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
596c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
60*8ba4d145SBjoern A. Zeeb if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
61*8ba4d145SBjoern A. Zeeb kfree(txwi);
62*8ba4d145SBjoern A. Zeeb return NULL;
63*8ba4d145SBjoern A. Zeeb }
64*8ba4d145SBjoern A. Zeeb
656c92544dSBjoern A. Zeeb t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
666c92544dSBjoern A. Zeeb t->dma_addr = addr;
676c92544dSBjoern A. Zeeb
686c92544dSBjoern A. Zeeb return t;
696c92544dSBjoern A. Zeeb }
706c92544dSBjoern A. Zeeb
716c92544dSBjoern A. Zeeb static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev * dev)72cbb3ec25SBjoern A. Zeeb mt76_alloc_rxwi(struct mt76_dev *dev)
73cbb3ec25SBjoern A. Zeeb {
74cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *t;
75cbb3ec25SBjoern A. Zeeb
76cbb3ec25SBjoern A. Zeeb t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
77cbb3ec25SBjoern A. Zeeb if (!t)
78cbb3ec25SBjoern A. Zeeb return NULL;
79cbb3ec25SBjoern A. Zeeb
80cbb3ec25SBjoern A. Zeeb t->ptr = NULL;
81cbb3ec25SBjoern A. Zeeb return t;
82cbb3ec25SBjoern A. Zeeb }
83cbb3ec25SBjoern A. Zeeb
84cbb3ec25SBjoern A. Zeeb static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev * dev)856c92544dSBjoern A. Zeeb __mt76_get_txwi(struct mt76_dev *dev)
866c92544dSBjoern A. Zeeb {
876c92544dSBjoern A. Zeeb struct mt76_txwi_cache *t = NULL;
886c92544dSBjoern A. Zeeb
896c92544dSBjoern A. Zeeb spin_lock(&dev->lock);
906c92544dSBjoern A. Zeeb if (!list_empty(&dev->txwi_cache)) {
916c92544dSBjoern A. Zeeb t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
926c92544dSBjoern A. Zeeb list);
936c92544dSBjoern A. Zeeb list_del(&t->list);
946c92544dSBjoern A. Zeeb }
956c92544dSBjoern A. Zeeb spin_unlock(&dev->lock);
966c92544dSBjoern A. Zeeb
976c92544dSBjoern A. Zeeb return t;
986c92544dSBjoern A. Zeeb }
996c92544dSBjoern A. Zeeb
1006c92544dSBjoern A. Zeeb static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev * dev)101cbb3ec25SBjoern A. Zeeb __mt76_get_rxwi(struct mt76_dev *dev)
102cbb3ec25SBjoern A. Zeeb {
103cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *t = NULL;
104cbb3ec25SBjoern A. Zeeb
105*8ba4d145SBjoern A. Zeeb spin_lock_bh(&dev->wed_lock);
106cbb3ec25SBjoern A. Zeeb if (!list_empty(&dev->rxwi_cache)) {
107cbb3ec25SBjoern A. Zeeb t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
108cbb3ec25SBjoern A. Zeeb list);
109cbb3ec25SBjoern A. Zeeb list_del(&t->list);
110cbb3ec25SBjoern A. Zeeb }
111*8ba4d145SBjoern A. Zeeb spin_unlock_bh(&dev->wed_lock);
112cbb3ec25SBjoern A. Zeeb
113cbb3ec25SBjoern A. Zeeb return t;
114cbb3ec25SBjoern A. Zeeb }
115cbb3ec25SBjoern A. Zeeb
116cbb3ec25SBjoern A. Zeeb static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev * dev)1176c92544dSBjoern A. Zeeb mt76_get_txwi(struct mt76_dev *dev)
1186c92544dSBjoern A. Zeeb {
1196c92544dSBjoern A. Zeeb struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
1206c92544dSBjoern A. Zeeb
1216c92544dSBjoern A. Zeeb if (t)
1226c92544dSBjoern A. Zeeb return t;
1236c92544dSBjoern A. Zeeb
1246c92544dSBjoern A. Zeeb return mt76_alloc_txwi(dev);
1256c92544dSBjoern A. Zeeb }
1266c92544dSBjoern A. Zeeb
127cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev * dev)128cbb3ec25SBjoern A. Zeeb mt76_get_rxwi(struct mt76_dev *dev)
129cbb3ec25SBjoern A. Zeeb {
130cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
131cbb3ec25SBjoern A. Zeeb
132cbb3ec25SBjoern A. Zeeb if (t)
133cbb3ec25SBjoern A. Zeeb return t;
134cbb3ec25SBjoern A. Zeeb
135cbb3ec25SBjoern A. Zeeb return mt76_alloc_rxwi(dev);
136cbb3ec25SBjoern A. Zeeb }
137cbb3ec25SBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_get_rxwi);
138cbb3ec25SBjoern A. Zeeb
1396c92544dSBjoern A. Zeeb void
mt76_put_txwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)1406c92544dSBjoern A. Zeeb mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
1416c92544dSBjoern A. Zeeb {
1426c92544dSBjoern A. Zeeb if (!t)
1436c92544dSBjoern A. Zeeb return;
1446c92544dSBjoern A. Zeeb
1456c92544dSBjoern A. Zeeb spin_lock(&dev->lock);
1466c92544dSBjoern A. Zeeb list_add(&t->list, &dev->txwi_cache);
1476c92544dSBjoern A. Zeeb spin_unlock(&dev->lock);
1486c92544dSBjoern A. Zeeb }
1496c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_put_txwi);
1506c92544dSBjoern A. Zeeb
151cbb3ec25SBjoern A. Zeeb void
mt76_put_rxwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)152cbb3ec25SBjoern A. Zeeb mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
153cbb3ec25SBjoern A. Zeeb {
154cbb3ec25SBjoern A. Zeeb if (!t)
155cbb3ec25SBjoern A. Zeeb return;
156cbb3ec25SBjoern A. Zeeb
157*8ba4d145SBjoern A. Zeeb spin_lock_bh(&dev->wed_lock);
158cbb3ec25SBjoern A. Zeeb list_add(&t->list, &dev->rxwi_cache);
159*8ba4d145SBjoern A. Zeeb spin_unlock_bh(&dev->wed_lock);
160cbb3ec25SBjoern A. Zeeb }
161cbb3ec25SBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_put_rxwi);
162cbb3ec25SBjoern A. Zeeb
1636c92544dSBjoern A. Zeeb static void
mt76_free_pending_txwi(struct mt76_dev * dev)1646c92544dSBjoern A. Zeeb mt76_free_pending_txwi(struct mt76_dev *dev)
1656c92544dSBjoern A. Zeeb {
1666c92544dSBjoern A. Zeeb struct mt76_txwi_cache *t;
1676c92544dSBjoern A. Zeeb
1686c92544dSBjoern A. Zeeb local_bh_disable();
1696c92544dSBjoern A. Zeeb while ((t = __mt76_get_txwi(dev)) != NULL) {
1706c92544dSBjoern A. Zeeb dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
1716c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
1726c92544dSBjoern A. Zeeb kfree(mt76_get_txwi_ptr(dev, t));
1736c92544dSBjoern A. Zeeb }
1746c92544dSBjoern A. Zeeb local_bh_enable();
1756c92544dSBjoern A. Zeeb }
1766c92544dSBjoern A. Zeeb
177cbb3ec25SBjoern A. Zeeb void
mt76_free_pending_rxwi(struct mt76_dev * dev)178cbb3ec25SBjoern A. Zeeb mt76_free_pending_rxwi(struct mt76_dev *dev)
179cbb3ec25SBjoern A. Zeeb {
180cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *t;
181cbb3ec25SBjoern A. Zeeb
182cbb3ec25SBjoern A. Zeeb local_bh_disable();
183cbb3ec25SBjoern A. Zeeb while ((t = __mt76_get_rxwi(dev)) != NULL) {
184cbb3ec25SBjoern A. Zeeb if (t->ptr)
185cbb3ec25SBjoern A. Zeeb mt76_put_page_pool_buf(t->ptr, false);
186cbb3ec25SBjoern A. Zeeb kfree(t);
187cbb3ec25SBjoern A. Zeeb }
188cbb3ec25SBjoern A. Zeeb local_bh_enable();
189cbb3ec25SBjoern A. Zeeb }
190cbb3ec25SBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
191cbb3ec25SBjoern A. Zeeb
1926c92544dSBjoern A. Zeeb static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)1936c92544dSBjoern A. Zeeb mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
1946c92544dSBjoern A. Zeeb {
195*8ba4d145SBjoern A. Zeeb Q_WRITE(q, desc_base, q->desc_dma);
196*8ba4d145SBjoern A. Zeeb if (q->flags & MT_QFLAG_WED_RRO_EN)
197*8ba4d145SBjoern A. Zeeb Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
198*8ba4d145SBjoern A. Zeeb else
199*8ba4d145SBjoern A. Zeeb Q_WRITE(q, ring_size, q->ndesc);
200*8ba4d145SBjoern A. Zeeb q->head = Q_READ(q, dma_idx);
2016c92544dSBjoern A. Zeeb q->tail = q->head;
2026c92544dSBjoern A. Zeeb }
2036c92544dSBjoern A. Zeeb
__mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q,bool reset_idx)204*8ba4d145SBjoern A. Zeeb void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
205*8ba4d145SBjoern A. Zeeb bool reset_idx)
2066c92544dSBjoern A. Zeeb {
2076c92544dSBjoern A. Zeeb if (!q || !q->ndesc)
2086c92544dSBjoern A. Zeeb return;
2096c92544dSBjoern A. Zeeb
210*8ba4d145SBjoern A. Zeeb if (!mt76_queue_is_wed_rro_ind(q)) {
211*8ba4d145SBjoern A. Zeeb int i;
212*8ba4d145SBjoern A. Zeeb
2136c92544dSBjoern A. Zeeb /* clear descriptors */
2146c92544dSBjoern A. Zeeb for (i = 0; i < q->ndesc; i++)
2156c92544dSBjoern A. Zeeb q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
216*8ba4d145SBjoern A. Zeeb }
2176c92544dSBjoern A. Zeeb
218*8ba4d145SBjoern A. Zeeb if (reset_idx) {
219*8ba4d145SBjoern A. Zeeb Q_WRITE(q, cpu_idx, 0);
220*8ba4d145SBjoern A. Zeeb Q_WRITE(q, dma_idx, 0);
221*8ba4d145SBjoern A. Zeeb }
2226c92544dSBjoern A. Zeeb mt76_dma_sync_idx(dev, q);
2236c92544dSBjoern A. Zeeb }
2246c92544dSBjoern A. Zeeb
mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q)225*8ba4d145SBjoern A. Zeeb void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
226*8ba4d145SBjoern A. Zeeb {
227*8ba4d145SBjoern A. Zeeb __mt76_dma_queue_reset(dev, q, true);
228*8ba4d145SBjoern A. Zeeb }
229*8ba4d145SBjoern A. Zeeb
2306c92544dSBjoern A. Zeeb static int
mt76_dma_add_rx_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,void * data)231cbb3ec25SBjoern A. Zeeb mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
232cbb3ec25SBjoern A. Zeeb struct mt76_queue_buf *buf, void *data)
233cbb3ec25SBjoern A. Zeeb {
234cbb3ec25SBjoern A. Zeeb struct mt76_queue_entry *entry = &q->entry[q->head];
235cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *txwi = NULL;
236*8ba4d145SBjoern A. Zeeb struct mt76_desc *desc;
237cbb3ec25SBjoern A. Zeeb int idx = q->head;
238*8ba4d145SBjoern A. Zeeb u32 buf1 = 0, ctrl;
239cbb3ec25SBjoern A. Zeeb int rx_token;
240cbb3ec25SBjoern A. Zeeb
241*8ba4d145SBjoern A. Zeeb if (mt76_queue_is_wed_rro_ind(q)) {
242*8ba4d145SBjoern A. Zeeb struct mt76_wed_rro_desc *rro_desc;
243*8ba4d145SBjoern A. Zeeb
244*8ba4d145SBjoern A. Zeeb rro_desc = (struct mt76_wed_rro_desc *)q->desc;
245*8ba4d145SBjoern A. Zeeb data = &rro_desc[q->head];
246*8ba4d145SBjoern A. Zeeb goto done;
247*8ba4d145SBjoern A. Zeeb }
248*8ba4d145SBjoern A. Zeeb
249*8ba4d145SBjoern A. Zeeb desc = &q->desc[q->head];
250cbb3ec25SBjoern A. Zeeb ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
251*8ba4d145SBjoern A. Zeeb #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
252*8ba4d145SBjoern A. Zeeb buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
253*8ba4d145SBjoern A. Zeeb #endif
254cbb3ec25SBjoern A. Zeeb
255cbb3ec25SBjoern A. Zeeb if (mt76_queue_is_wed_rx(q)) {
256cbb3ec25SBjoern A. Zeeb txwi = mt76_get_rxwi(dev);
257cbb3ec25SBjoern A. Zeeb if (!txwi)
258cbb3ec25SBjoern A. Zeeb return -ENOMEM;
259cbb3ec25SBjoern A. Zeeb
260cbb3ec25SBjoern A. Zeeb rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
261cbb3ec25SBjoern A. Zeeb if (rx_token < 0) {
262cbb3ec25SBjoern A. Zeeb mt76_put_rxwi(dev, txwi);
263cbb3ec25SBjoern A. Zeeb return -ENOMEM;
264cbb3ec25SBjoern A. Zeeb }
265cbb3ec25SBjoern A. Zeeb
266cbb3ec25SBjoern A. Zeeb buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
267cbb3ec25SBjoern A. Zeeb ctrl |= MT_DMA_CTL_TO_HOST;
268cbb3ec25SBjoern A. Zeeb }
269cbb3ec25SBjoern A. Zeeb
270cbb3ec25SBjoern A. Zeeb WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
271cbb3ec25SBjoern A. Zeeb WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
272cbb3ec25SBjoern A. Zeeb WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
273cbb3ec25SBjoern A. Zeeb WRITE_ONCE(desc->info, 0);
274cbb3ec25SBjoern A. Zeeb
275*8ba4d145SBjoern A. Zeeb done:
276cbb3ec25SBjoern A. Zeeb entry->dma_addr[0] = buf->addr;
277cbb3ec25SBjoern A. Zeeb entry->dma_len[0] = buf->len;
278cbb3ec25SBjoern A. Zeeb entry->txwi = txwi;
279cbb3ec25SBjoern A. Zeeb entry->buf = data;
280cbb3ec25SBjoern A. Zeeb entry->wcid = 0xffff;
281cbb3ec25SBjoern A. Zeeb entry->skip_buf1 = true;
282cbb3ec25SBjoern A. Zeeb q->head = (q->head + 1) % q->ndesc;
283cbb3ec25SBjoern A. Zeeb q->queued++;
284cbb3ec25SBjoern A. Zeeb
285cbb3ec25SBjoern A. Zeeb return idx;
286cbb3ec25SBjoern A. Zeeb }
287cbb3ec25SBjoern A. Zeeb
288cbb3ec25SBjoern A. Zeeb static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)2896c92544dSBjoern A. Zeeb mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
2906c92544dSBjoern A. Zeeb struct mt76_queue_buf *buf, int nbufs, u32 info,
2916c92544dSBjoern A. Zeeb struct sk_buff *skb, void *txwi)
2926c92544dSBjoern A. Zeeb {
2936c92544dSBjoern A. Zeeb struct mt76_queue_entry *entry;
2946c92544dSBjoern A. Zeeb struct mt76_desc *desc;
2956c92544dSBjoern A. Zeeb int i, idx = -1;
296cbb3ec25SBjoern A. Zeeb u32 ctrl, next;
2976c92544dSBjoern A. Zeeb
2986c92544dSBjoern A. Zeeb if (txwi) {
2996c92544dSBjoern A. Zeeb q->entry[q->head].txwi = DMA_DUMMY_DATA;
3006c92544dSBjoern A. Zeeb q->entry[q->head].skip_buf0 = true;
3016c92544dSBjoern A. Zeeb }
3026c92544dSBjoern A. Zeeb
3036c92544dSBjoern A. Zeeb for (i = 0; i < nbufs; i += 2, buf += 2) {
3046c92544dSBjoern A. Zeeb u32 buf0 = buf[0].addr, buf1 = 0;
3056c92544dSBjoern A. Zeeb
3066c92544dSBjoern A. Zeeb idx = q->head;
307cbb3ec25SBjoern A. Zeeb next = (q->head + 1) % q->ndesc;
3086c92544dSBjoern A. Zeeb
3096c92544dSBjoern A. Zeeb desc = &q->desc[idx];
3106c92544dSBjoern A. Zeeb entry = &q->entry[idx];
3116c92544dSBjoern A. Zeeb
3126c92544dSBjoern A. Zeeb if (buf[0].skip_unmap)
3136c92544dSBjoern A. Zeeb entry->skip_buf0 = true;
3146c92544dSBjoern A. Zeeb entry->skip_buf1 = i == nbufs - 1;
3156c92544dSBjoern A. Zeeb
3166c92544dSBjoern A. Zeeb entry->dma_addr[0] = buf[0].addr;
3176c92544dSBjoern A. Zeeb entry->dma_len[0] = buf[0].len;
3186c92544dSBjoern A. Zeeb
3196c92544dSBjoern A. Zeeb ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
320*8ba4d145SBjoern A. Zeeb #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
321*8ba4d145SBjoern A. Zeeb info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
322*8ba4d145SBjoern A. Zeeb #endif
3236c92544dSBjoern A. Zeeb if (i < nbufs - 1) {
3246c92544dSBjoern A. Zeeb entry->dma_addr[1] = buf[1].addr;
3256c92544dSBjoern A. Zeeb entry->dma_len[1] = buf[1].len;
3266c92544dSBjoern A. Zeeb buf1 = buf[1].addr;
3276c92544dSBjoern A. Zeeb ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
328*8ba4d145SBjoern A. Zeeb #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
329*8ba4d145SBjoern A. Zeeb info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
330*8ba4d145SBjoern A. Zeeb buf[1].addr >> 32);
331*8ba4d145SBjoern A. Zeeb #endif
3326c92544dSBjoern A. Zeeb if (buf[1].skip_unmap)
3336c92544dSBjoern A. Zeeb entry->skip_buf1 = true;
3346c92544dSBjoern A. Zeeb }
3356c92544dSBjoern A. Zeeb
3366c92544dSBjoern A. Zeeb if (i == nbufs - 1)
3376c92544dSBjoern A. Zeeb ctrl |= MT_DMA_CTL_LAST_SEC0;
3386c92544dSBjoern A. Zeeb else if (i == nbufs - 2)
3396c92544dSBjoern A. Zeeb ctrl |= MT_DMA_CTL_LAST_SEC1;
3406c92544dSBjoern A. Zeeb
3416c92544dSBjoern A. Zeeb WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
3426c92544dSBjoern A. Zeeb WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
3436c92544dSBjoern A. Zeeb WRITE_ONCE(desc->info, cpu_to_le32(info));
3446c92544dSBjoern A. Zeeb WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
3456c92544dSBjoern A. Zeeb
346cbb3ec25SBjoern A. Zeeb q->head = next;
3476c92544dSBjoern A. Zeeb q->queued++;
3486c92544dSBjoern A. Zeeb }
3496c92544dSBjoern A. Zeeb
3506c92544dSBjoern A. Zeeb q->entry[idx].txwi = txwi;
3516c92544dSBjoern A. Zeeb q->entry[idx].skb = skb;
3526c92544dSBjoern A. Zeeb q->entry[idx].wcid = 0xffff;
3536c92544dSBjoern A. Zeeb
3546c92544dSBjoern A. Zeeb return idx;
3556c92544dSBjoern A. Zeeb }
3566c92544dSBjoern A. Zeeb
3576c92544dSBjoern A. Zeeb static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)3586c92544dSBjoern A. Zeeb mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
3596c92544dSBjoern A. Zeeb struct mt76_queue_entry *prev_e)
3606c92544dSBjoern A. Zeeb {
3616c92544dSBjoern A. Zeeb struct mt76_queue_entry *e = &q->entry[idx];
3626c92544dSBjoern A. Zeeb
3636c92544dSBjoern A. Zeeb if (!e->skip_buf0)
3646c92544dSBjoern A. Zeeb dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
3656c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
3666c92544dSBjoern A. Zeeb
3676c92544dSBjoern A. Zeeb if (!e->skip_buf1)
3686c92544dSBjoern A. Zeeb dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
3696c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
3706c92544dSBjoern A. Zeeb
3716c92544dSBjoern A. Zeeb if (e->txwi == DMA_DUMMY_DATA)
3726c92544dSBjoern A. Zeeb e->txwi = NULL;
3736c92544dSBjoern A. Zeeb
3746c92544dSBjoern A. Zeeb *prev_e = *e;
3756c92544dSBjoern A. Zeeb memset(e, 0, sizeof(*e));
3766c92544dSBjoern A. Zeeb }
3776c92544dSBjoern A. Zeeb
3786c92544dSBjoern A. Zeeb static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)3796c92544dSBjoern A. Zeeb mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
3806c92544dSBjoern A. Zeeb {
3816c92544dSBjoern A. Zeeb wmb();
382*8ba4d145SBjoern A. Zeeb Q_WRITE(q, cpu_idx, q->head);
3836c92544dSBjoern A. Zeeb }
3846c92544dSBjoern A. Zeeb
3856c92544dSBjoern A. Zeeb static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,struct mt76_queue * q,bool flush)3866c92544dSBjoern A. Zeeb mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
3876c92544dSBjoern A. Zeeb {
3886c92544dSBjoern A. Zeeb struct mt76_queue_entry entry;
3896c92544dSBjoern A. Zeeb int last;
3906c92544dSBjoern A. Zeeb
3916c92544dSBjoern A. Zeeb if (!q || !q->ndesc)
3926c92544dSBjoern A. Zeeb return;
3936c92544dSBjoern A. Zeeb
3946c92544dSBjoern A. Zeeb spin_lock_bh(&q->cleanup_lock);
3956c92544dSBjoern A. Zeeb if (flush)
3966c92544dSBjoern A. Zeeb last = -1;
3976c92544dSBjoern A. Zeeb else
398*8ba4d145SBjoern A. Zeeb last = Q_READ(q, dma_idx);
3996c92544dSBjoern A. Zeeb
4006c92544dSBjoern A. Zeeb while (q->queued > 0 && q->tail != last) {
4016c92544dSBjoern A. Zeeb mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
4026c92544dSBjoern A. Zeeb mt76_queue_tx_complete(dev, q, &entry);
4036c92544dSBjoern A. Zeeb
4046c92544dSBjoern A. Zeeb if (entry.txwi) {
4056c92544dSBjoern A. Zeeb if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
4066c92544dSBjoern A. Zeeb mt76_put_txwi(dev, entry.txwi);
4076c92544dSBjoern A. Zeeb }
4086c92544dSBjoern A. Zeeb
4096c92544dSBjoern A. Zeeb if (!flush && q->tail == last)
410*8ba4d145SBjoern A. Zeeb last = Q_READ(q, dma_idx);
4116c92544dSBjoern A. Zeeb }
4126c92544dSBjoern A. Zeeb spin_unlock_bh(&q->cleanup_lock);
4136c92544dSBjoern A. Zeeb
4146c92544dSBjoern A. Zeeb if (flush) {
4156c92544dSBjoern A. Zeeb spin_lock_bh(&q->lock);
4166c92544dSBjoern A. Zeeb mt76_dma_sync_idx(dev, q);
4176c92544dSBjoern A. Zeeb mt76_dma_kick_queue(dev, q);
4186c92544dSBjoern A. Zeeb spin_unlock_bh(&q->lock);
4196c92544dSBjoern A. Zeeb }
4206c92544dSBjoern A. Zeeb
4216c92544dSBjoern A. Zeeb if (!q->queued)
4226c92544dSBjoern A. Zeeb wake_up(&dev->tx_wait);
4236c92544dSBjoern A. Zeeb }
4246c92544dSBjoern A. Zeeb
4256c92544dSBjoern A. Zeeb static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more,bool * drop)4266c92544dSBjoern A. Zeeb mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
427cbb3ec25SBjoern A. Zeeb int *len, u32 *info, bool *more, bool *drop)
4286c92544dSBjoern A. Zeeb {
4296c92544dSBjoern A. Zeeb struct mt76_queue_entry *e = &q->entry[idx];
4306c92544dSBjoern A. Zeeb struct mt76_desc *desc = &q->desc[idx];
431*8ba4d145SBjoern A. Zeeb u32 ctrl, desc_info, buf1;
432*8ba4d145SBjoern A. Zeeb void *buf = e->buf;
4336c92544dSBjoern A. Zeeb
434*8ba4d145SBjoern A. Zeeb if (mt76_queue_is_wed_rro_ind(q))
435*8ba4d145SBjoern A. Zeeb goto done;
436*8ba4d145SBjoern A. Zeeb
437*8ba4d145SBjoern A. Zeeb ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
4386c92544dSBjoern A. Zeeb if (len) {
439cbb3ec25SBjoern A. Zeeb *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
440cbb3ec25SBjoern A. Zeeb *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
4416c92544dSBjoern A. Zeeb }
4426c92544dSBjoern A. Zeeb
443*8ba4d145SBjoern A. Zeeb desc_info = le32_to_cpu(desc->info);
4446c92544dSBjoern A. Zeeb if (info)
445*8ba4d145SBjoern A. Zeeb *info = desc_info;
446*8ba4d145SBjoern A. Zeeb
447*8ba4d145SBjoern A. Zeeb buf1 = le32_to_cpu(desc->buf1);
448*8ba4d145SBjoern A. Zeeb mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
4496c92544dSBjoern A. Zeeb
450cbb3ec25SBjoern A. Zeeb if (mt76_queue_is_wed_rx(q)) {
451cbb3ec25SBjoern A. Zeeb u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
452cbb3ec25SBjoern A. Zeeb struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
453cbb3ec25SBjoern A. Zeeb
454cbb3ec25SBjoern A. Zeeb if (!t)
455cbb3ec25SBjoern A. Zeeb return NULL;
456cbb3ec25SBjoern A. Zeeb
457cbb3ec25SBjoern A. Zeeb dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
458cbb3ec25SBjoern A. Zeeb SKB_WITH_OVERHEAD(q->buf_size),
459cbb3ec25SBjoern A. Zeeb page_pool_get_dma_dir(q->page_pool));
460cbb3ec25SBjoern A. Zeeb
461cbb3ec25SBjoern A. Zeeb buf = t->ptr;
462cbb3ec25SBjoern A. Zeeb t->dma_addr = 0;
463cbb3ec25SBjoern A. Zeeb t->ptr = NULL;
464cbb3ec25SBjoern A. Zeeb
465cbb3ec25SBjoern A. Zeeb mt76_put_rxwi(dev, t);
466*8ba4d145SBjoern A. Zeeb if (drop)
467cbb3ec25SBjoern A. Zeeb *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
468cbb3ec25SBjoern A. Zeeb } else {
469cbb3ec25SBjoern A. Zeeb dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
470cbb3ec25SBjoern A. Zeeb SKB_WITH_OVERHEAD(q->buf_size),
471cbb3ec25SBjoern A. Zeeb page_pool_get_dma_dir(q->page_pool));
472cbb3ec25SBjoern A. Zeeb }
4736c92544dSBjoern A. Zeeb
474*8ba4d145SBjoern A. Zeeb done:
475*8ba4d145SBjoern A. Zeeb e->buf = NULL;
4766c92544dSBjoern A. Zeeb return buf;
4776c92544dSBjoern A. Zeeb }
4786c92544dSBjoern A. Zeeb
4796c92544dSBjoern A. Zeeb static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more,bool * drop)4806c92544dSBjoern A. Zeeb mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
481cbb3ec25SBjoern A. Zeeb int *len, u32 *info, bool *more, bool *drop)
4826c92544dSBjoern A. Zeeb {
4836c92544dSBjoern A. Zeeb int idx = q->tail;
4846c92544dSBjoern A. Zeeb
4856c92544dSBjoern A. Zeeb *more = false;
4866c92544dSBjoern A. Zeeb if (!q->queued)
4876c92544dSBjoern A. Zeeb return NULL;
4886c92544dSBjoern A. Zeeb
489*8ba4d145SBjoern A. Zeeb if (mt76_queue_is_wed_rro_data(q))
490*8ba4d145SBjoern A. Zeeb return NULL;
491*8ba4d145SBjoern A. Zeeb
492*8ba4d145SBjoern A. Zeeb if (!mt76_queue_is_wed_rro_ind(q)) {
4936c92544dSBjoern A. Zeeb if (flush)
4946c92544dSBjoern A. Zeeb q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
4956c92544dSBjoern A. Zeeb else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
4966c92544dSBjoern A. Zeeb return NULL;
497*8ba4d145SBjoern A. Zeeb }
4986c92544dSBjoern A. Zeeb
4996c92544dSBjoern A. Zeeb q->tail = (q->tail + 1) % q->ndesc;
5006c92544dSBjoern A. Zeeb q->queued--;
5016c92544dSBjoern A. Zeeb
502cbb3ec25SBjoern A. Zeeb return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
5036c92544dSBjoern A. Zeeb }
5046c92544dSBjoern A. Zeeb
5056c92544dSBjoern A. Zeeb static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info)5066c92544dSBjoern A. Zeeb mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
5076c92544dSBjoern A. Zeeb struct sk_buff *skb, u32 tx_info)
5086c92544dSBjoern A. Zeeb {
5096c92544dSBjoern A. Zeeb struct mt76_queue_buf buf = {};
5106c92544dSBjoern A. Zeeb dma_addr_t addr;
5116c92544dSBjoern A. Zeeb
512cbb3ec25SBjoern A. Zeeb if (test_bit(MT76_MCU_RESET, &dev->phy.state))
513cbb3ec25SBjoern A. Zeeb goto error;
514cbb3ec25SBjoern A. Zeeb
5156c92544dSBjoern A. Zeeb if (q->queued + 1 >= q->ndesc - 1)
5166c92544dSBjoern A. Zeeb goto error;
5176c92544dSBjoern A. Zeeb
5186c92544dSBjoern A. Zeeb addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
5196c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
5206c92544dSBjoern A. Zeeb if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
5216c92544dSBjoern A. Zeeb goto error;
5226c92544dSBjoern A. Zeeb
5236c92544dSBjoern A. Zeeb buf.addr = addr;
5246c92544dSBjoern A. Zeeb buf.len = skb->len;
5256c92544dSBjoern A. Zeeb
5266c92544dSBjoern A. Zeeb spin_lock_bh(&q->lock);
5276c92544dSBjoern A. Zeeb mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
5286c92544dSBjoern A. Zeeb mt76_dma_kick_queue(dev, q);
5296c92544dSBjoern A. Zeeb spin_unlock_bh(&q->lock);
5306c92544dSBjoern A. Zeeb
5316c92544dSBjoern A. Zeeb return 0;
5326c92544dSBjoern A. Zeeb
5336c92544dSBjoern A. Zeeb error:
5346c92544dSBjoern A. Zeeb dev_kfree_skb(skb);
5356c92544dSBjoern A. Zeeb return -ENOMEM;
5366c92544dSBjoern A. Zeeb }
5376c92544dSBjoern A. Zeeb
5386c92544dSBjoern A. Zeeb static int
mt76_dma_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)539*8ba4d145SBjoern A. Zeeb mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
5406c92544dSBjoern A. Zeeb enum mt76_txq_id qid, struct sk_buff *skb,
5416c92544dSBjoern A. Zeeb struct mt76_wcid *wcid, struct ieee80211_sta *sta)
5426c92544dSBjoern A. Zeeb {
5436c92544dSBjoern A. Zeeb struct ieee80211_tx_status status = {
5446c92544dSBjoern A. Zeeb .sta = sta,
5456c92544dSBjoern A. Zeeb };
5466c92544dSBjoern A. Zeeb struct mt76_tx_info tx_info = {
5476c92544dSBjoern A. Zeeb .skb = skb,
5486c92544dSBjoern A. Zeeb };
549*8ba4d145SBjoern A. Zeeb struct mt76_dev *dev = phy->dev;
5506c92544dSBjoern A. Zeeb struct ieee80211_hw *hw;
5516c92544dSBjoern A. Zeeb int len, n = 0, ret = -ENOMEM;
5526c92544dSBjoern A. Zeeb struct mt76_txwi_cache *t;
5536c92544dSBjoern A. Zeeb struct sk_buff *iter;
5546c92544dSBjoern A. Zeeb dma_addr_t addr;
5556c92544dSBjoern A. Zeeb u8 *txwi;
5566c92544dSBjoern A. Zeeb
557*8ba4d145SBjoern A. Zeeb if (test_bit(MT76_RESET, &phy->state))
558cbb3ec25SBjoern A. Zeeb goto free_skb;
559cbb3ec25SBjoern A. Zeeb
5606c92544dSBjoern A. Zeeb t = mt76_get_txwi(dev);
5616c92544dSBjoern A. Zeeb if (!t)
5626c92544dSBjoern A. Zeeb goto free_skb;
5636c92544dSBjoern A. Zeeb
5646c92544dSBjoern A. Zeeb txwi = mt76_get_txwi_ptr(dev, t);
5656c92544dSBjoern A. Zeeb
5666c92544dSBjoern A. Zeeb skb->prev = skb->next = NULL;
5676c92544dSBjoern A. Zeeb if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
5686c92544dSBjoern A. Zeeb mt76_insert_hdr_pad(skb);
5696c92544dSBjoern A. Zeeb
5706c92544dSBjoern A. Zeeb len = skb_headlen(skb);
5716c92544dSBjoern A. Zeeb addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
5726c92544dSBjoern A. Zeeb if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
5736c92544dSBjoern A. Zeeb goto free;
5746c92544dSBjoern A. Zeeb
5756c92544dSBjoern A. Zeeb tx_info.buf[n].addr = t->dma_addr;
5766c92544dSBjoern A. Zeeb tx_info.buf[n++].len = dev->drv->txwi_size;
5776c92544dSBjoern A. Zeeb tx_info.buf[n].addr = addr;
5786c92544dSBjoern A. Zeeb tx_info.buf[n++].len = len;
5796c92544dSBjoern A. Zeeb
5806c92544dSBjoern A. Zeeb skb_walk_frags(skb, iter) {
5816c92544dSBjoern A. Zeeb if (n == ARRAY_SIZE(tx_info.buf))
5826c92544dSBjoern A. Zeeb goto unmap;
5836c92544dSBjoern A. Zeeb
5846c92544dSBjoern A. Zeeb addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
5856c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
5866c92544dSBjoern A. Zeeb if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
5876c92544dSBjoern A. Zeeb goto unmap;
5886c92544dSBjoern A. Zeeb
5896c92544dSBjoern A. Zeeb tx_info.buf[n].addr = addr;
5906c92544dSBjoern A. Zeeb tx_info.buf[n++].len = iter->len;
5916c92544dSBjoern A. Zeeb }
5926c92544dSBjoern A. Zeeb tx_info.nbuf = n;
5936c92544dSBjoern A. Zeeb
5946c92544dSBjoern A. Zeeb if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
5956c92544dSBjoern A. Zeeb ret = -ENOMEM;
5966c92544dSBjoern A. Zeeb goto unmap;
5976c92544dSBjoern A. Zeeb }
5986c92544dSBjoern A. Zeeb
5996c92544dSBjoern A. Zeeb dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
6006c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
6016c92544dSBjoern A. Zeeb ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
6026c92544dSBjoern A. Zeeb dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
6036c92544dSBjoern A. Zeeb DMA_TO_DEVICE);
6046c92544dSBjoern A. Zeeb if (ret < 0)
6056c92544dSBjoern A. Zeeb goto unmap;
6066c92544dSBjoern A. Zeeb
6076c92544dSBjoern A. Zeeb return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
6086c92544dSBjoern A. Zeeb tx_info.info, tx_info.skb, t);
6096c92544dSBjoern A. Zeeb
6106c92544dSBjoern A. Zeeb unmap:
6116c92544dSBjoern A. Zeeb for (n--; n > 0; n--)
6126c92544dSBjoern A. Zeeb dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
6136c92544dSBjoern A. Zeeb tx_info.buf[n].len, DMA_TO_DEVICE);
6146c92544dSBjoern A. Zeeb
6156c92544dSBjoern A. Zeeb free:
6166c92544dSBjoern A. Zeeb #ifdef CONFIG_NL80211_TESTMODE
6176c92544dSBjoern A. Zeeb /* fix tx_done accounting on queue overflow */
6186c92544dSBjoern A. Zeeb if (mt76_is_testmode_skb(dev, skb, &hw)) {
6196c92544dSBjoern A. Zeeb struct mt76_phy *phy = hw->priv;
6206c92544dSBjoern A. Zeeb
6216c92544dSBjoern A. Zeeb if (tx_info.skb == phy->test.tx_skb)
6226c92544dSBjoern A. Zeeb phy->test.tx_done--;
6236c92544dSBjoern A. Zeeb }
6246c92544dSBjoern A. Zeeb #endif
6256c92544dSBjoern A. Zeeb
6266c92544dSBjoern A. Zeeb mt76_put_txwi(dev, t);
6276c92544dSBjoern A. Zeeb
6286c92544dSBjoern A. Zeeb free_skb:
6296c92544dSBjoern A. Zeeb status.skb = tx_info.skb;
6306c92544dSBjoern A. Zeeb hw = mt76_tx_status_get_hw(dev, tx_info.skb);
631cbb3ec25SBjoern A. Zeeb spin_lock_bh(&dev->rx_lock);
6326c92544dSBjoern A. Zeeb ieee80211_tx_status_ext(hw, &status);
633cbb3ec25SBjoern A. Zeeb spin_unlock_bh(&dev->rx_lock);
6346c92544dSBjoern A. Zeeb
6356c92544dSBjoern A. Zeeb return ret;
6366c92544dSBjoern A. Zeeb }
6376c92544dSBjoern A. Zeeb
6386c92544dSBjoern A. Zeeb static int
mt76_dma_rx_fill_buf(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct)639*8ba4d145SBjoern A. Zeeb mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
640cbb3ec25SBjoern A. Zeeb bool allow_direct)
6416c92544dSBjoern A. Zeeb {
6426c92544dSBjoern A. Zeeb int len = SKB_WITH_OVERHEAD(q->buf_size);
643cbb3ec25SBjoern A. Zeeb int frames = 0;
6446c92544dSBjoern A. Zeeb
6456c92544dSBjoern A. Zeeb if (!q->ndesc)
6466c92544dSBjoern A. Zeeb return 0;
6476c92544dSBjoern A. Zeeb
6486c92544dSBjoern A. Zeeb while (q->queued < q->ndesc - 1) {
649*8ba4d145SBjoern A. Zeeb struct mt76_queue_buf qbuf = {};
650cbb3ec25SBjoern A. Zeeb enum dma_data_direction dir;
651cbb3ec25SBjoern A. Zeeb dma_addr_t addr;
652cbb3ec25SBjoern A. Zeeb int offset;
653*8ba4d145SBjoern A. Zeeb void *buf = NULL;
654*8ba4d145SBjoern A. Zeeb
655*8ba4d145SBjoern A. Zeeb if (mt76_queue_is_wed_rro_ind(q))
656*8ba4d145SBjoern A. Zeeb goto done;
6576c92544dSBjoern A. Zeeb
658cbb3ec25SBjoern A. Zeeb buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
6596c92544dSBjoern A. Zeeb if (!buf)
6606c92544dSBjoern A. Zeeb break;
6616c92544dSBjoern A. Zeeb
662cbb3ec25SBjoern A. Zeeb addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
663cbb3ec25SBjoern A. Zeeb dir = page_pool_get_dma_dir(q->page_pool);
664cbb3ec25SBjoern A. Zeeb dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
665cbb3ec25SBjoern A. Zeeb
666cbb3ec25SBjoern A. Zeeb qbuf.addr = addr + q->buf_offset;
667*8ba4d145SBjoern A. Zeeb done:
668cbb3ec25SBjoern A. Zeeb qbuf.len = len - q->buf_offset;
669cbb3ec25SBjoern A. Zeeb qbuf.skip_unmap = false;
670cbb3ec25SBjoern A. Zeeb if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
671cbb3ec25SBjoern A. Zeeb mt76_put_page_pool_buf(buf, allow_direct);
6726c92544dSBjoern A. Zeeb break;
6736c92544dSBjoern A. Zeeb }
6746c92544dSBjoern A. Zeeb frames++;
6756c92544dSBjoern A. Zeeb }
6766c92544dSBjoern A. Zeeb
677*8ba4d145SBjoern A. Zeeb if (frames || mt76_queue_is_wed_rx(q))
6786c92544dSBjoern A. Zeeb mt76_dma_kick_queue(dev, q);
6796c92544dSBjoern A. Zeeb
6806c92544dSBjoern A. Zeeb return frames;
6816c92544dSBjoern A. Zeeb }
6826c92544dSBjoern A. Zeeb
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct)683*8ba4d145SBjoern A. Zeeb int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
684*8ba4d145SBjoern A. Zeeb bool allow_direct)
6856c92544dSBjoern A. Zeeb {
686*8ba4d145SBjoern A. Zeeb int frames;
6876c92544dSBjoern A. Zeeb
688*8ba4d145SBjoern A. Zeeb if (!q->ndesc)
6896c92544dSBjoern A. Zeeb return 0;
6906c92544dSBjoern A. Zeeb
691*8ba4d145SBjoern A. Zeeb spin_lock_bh(&q->lock);
692*8ba4d145SBjoern A. Zeeb frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
693*8ba4d145SBjoern A. Zeeb spin_unlock_bh(&q->lock);
6946c92544dSBjoern A. Zeeb
695*8ba4d145SBjoern A. Zeeb return frames;
6966c92544dSBjoern A. Zeeb }
6976c92544dSBjoern A. Zeeb
6986c92544dSBjoern A. Zeeb static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)6996c92544dSBjoern A. Zeeb mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
7006c92544dSBjoern A. Zeeb int idx, int n_desc, int bufsize,
7016c92544dSBjoern A. Zeeb u32 ring_base)
7026c92544dSBjoern A. Zeeb {
7036c92544dSBjoern A. Zeeb int ret, size;
7046c92544dSBjoern A. Zeeb
7056c92544dSBjoern A. Zeeb spin_lock_init(&q->lock);
7066c92544dSBjoern A. Zeeb spin_lock_init(&q->cleanup_lock);
7076c92544dSBjoern A. Zeeb
7086c92544dSBjoern A. Zeeb #if defined(__linux__)
7096c92544dSBjoern A. Zeeb q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
7106c92544dSBjoern A. Zeeb #elif defined(__FreeBSD__)
7116c92544dSBjoern A. Zeeb q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE);
7126c92544dSBjoern A. Zeeb #endif
7136c92544dSBjoern A. Zeeb q->ndesc = n_desc;
7146c92544dSBjoern A. Zeeb q->buf_size = bufsize;
7156c92544dSBjoern A. Zeeb q->hw_idx = idx;
7166c92544dSBjoern A. Zeeb
717*8ba4d145SBjoern A. Zeeb size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
718*8ba4d145SBjoern A. Zeeb : sizeof(struct mt76_desc);
719*8ba4d145SBjoern A. Zeeb q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
720*8ba4d145SBjoern A. Zeeb &q->desc_dma, GFP_KERNEL);
7216c92544dSBjoern A. Zeeb if (!q->desc)
7226c92544dSBjoern A. Zeeb return -ENOMEM;
7236c92544dSBjoern A. Zeeb
724*8ba4d145SBjoern A. Zeeb if (mt76_queue_is_wed_rro_ind(q)) {
725*8ba4d145SBjoern A. Zeeb struct mt76_wed_rro_desc *rro_desc;
726*8ba4d145SBjoern A. Zeeb int i;
727*8ba4d145SBjoern A. Zeeb
728*8ba4d145SBjoern A. Zeeb rro_desc = (struct mt76_wed_rro_desc *)q->desc;
729*8ba4d145SBjoern A. Zeeb for (i = 0; i < q->ndesc; i++) {
730*8ba4d145SBjoern A. Zeeb struct mt76_wed_rro_ind *cmd;
731*8ba4d145SBjoern A. Zeeb
732*8ba4d145SBjoern A. Zeeb cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
733*8ba4d145SBjoern A. Zeeb cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
734*8ba4d145SBjoern A. Zeeb }
735*8ba4d145SBjoern A. Zeeb }
736*8ba4d145SBjoern A. Zeeb
7376c92544dSBjoern A. Zeeb size = q->ndesc * sizeof(*q->entry);
7386c92544dSBjoern A. Zeeb q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
7396c92544dSBjoern A. Zeeb if (!q->entry)
7406c92544dSBjoern A. Zeeb return -ENOMEM;
7416c92544dSBjoern A. Zeeb
742cbb3ec25SBjoern A. Zeeb ret = mt76_create_page_pool(dev, q);
743cbb3ec25SBjoern A. Zeeb if (ret)
744cbb3ec25SBjoern A. Zeeb return ret;
745cbb3ec25SBjoern A. Zeeb
746*8ba4d145SBjoern A. Zeeb ret = mt76_wed_dma_setup(dev, q, false);
7476c92544dSBjoern A. Zeeb if (ret)
7486c92544dSBjoern A. Zeeb return ret;
7496c92544dSBjoern A. Zeeb
750*8ba4d145SBjoern A. Zeeb if (mtk_wed_device_active(&dev->mmio.wed)) {
751*8ba4d145SBjoern A. Zeeb if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
752*8ba4d145SBjoern A. Zeeb mt76_queue_is_wed_tx_free(q))
753*8ba4d145SBjoern A. Zeeb return 0;
754*8ba4d145SBjoern A. Zeeb }
755*8ba4d145SBjoern A. Zeeb
7566c92544dSBjoern A. Zeeb mt76_dma_queue_reset(dev, q);
7576c92544dSBjoern A. Zeeb
7586c92544dSBjoern A. Zeeb return 0;
7596c92544dSBjoern A. Zeeb }
7606c92544dSBjoern A. Zeeb
7616c92544dSBjoern A. Zeeb static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)7626c92544dSBjoern A. Zeeb mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
7636c92544dSBjoern A. Zeeb {
7646c92544dSBjoern A. Zeeb void *buf;
7656c92544dSBjoern A. Zeeb bool more;
7666c92544dSBjoern A. Zeeb
7676c92544dSBjoern A. Zeeb if (!q->ndesc)
7686c92544dSBjoern A. Zeeb return;
7696c92544dSBjoern A. Zeeb
7706c92544dSBjoern A. Zeeb do {
771*8ba4d145SBjoern A. Zeeb spin_lock_bh(&q->lock);
772cbb3ec25SBjoern A. Zeeb buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
773*8ba4d145SBjoern A. Zeeb spin_unlock_bh(&q->lock);
774*8ba4d145SBjoern A. Zeeb
7756c92544dSBjoern A. Zeeb if (!buf)
7766c92544dSBjoern A. Zeeb break;
7776c92544dSBjoern A. Zeeb
778*8ba4d145SBjoern A. Zeeb if (!mt76_queue_is_wed_rro(q))
779cbb3ec25SBjoern A. Zeeb mt76_put_page_pool_buf(buf, false);
7806c92544dSBjoern A. Zeeb } while (1);
781cbb3ec25SBjoern A. Zeeb
782*8ba4d145SBjoern A. Zeeb spin_lock_bh(&q->lock);
783cbb3ec25SBjoern A. Zeeb if (q->rx_head) {
784cbb3ec25SBjoern A. Zeeb dev_kfree_skb(q->rx_head);
785cbb3ec25SBjoern A. Zeeb q->rx_head = NULL;
786cbb3ec25SBjoern A. Zeeb }
787cbb3ec25SBjoern A. Zeeb
7886c92544dSBjoern A. Zeeb spin_unlock_bh(&q->lock);
7896c92544dSBjoern A. Zeeb }
7906c92544dSBjoern A. Zeeb
7916c92544dSBjoern A. Zeeb static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)7926c92544dSBjoern A. Zeeb mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
7936c92544dSBjoern A. Zeeb {
7946c92544dSBjoern A. Zeeb struct mt76_queue *q = &dev->q_rx[qid];
7956c92544dSBjoern A. Zeeb
7966c92544dSBjoern A. Zeeb if (!q->ndesc)
7976c92544dSBjoern A. Zeeb return;
7986c92544dSBjoern A. Zeeb
799*8ba4d145SBjoern A. Zeeb if (!mt76_queue_is_wed_rro_ind(q)) {
800*8ba4d145SBjoern A. Zeeb int i;
801*8ba4d145SBjoern A. Zeeb
8026c92544dSBjoern A. Zeeb for (i = 0; i < q->ndesc; i++)
8036c92544dSBjoern A. Zeeb q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
804*8ba4d145SBjoern A. Zeeb }
8056c92544dSBjoern A. Zeeb
8066c92544dSBjoern A. Zeeb mt76_dma_rx_cleanup(dev, q);
807cbb3ec25SBjoern A. Zeeb
808cbb3ec25SBjoern A. Zeeb /* reset WED rx queues */
809*8ba4d145SBjoern A. Zeeb mt76_wed_dma_setup(dev, q, true);
810*8ba4d145SBjoern A. Zeeb
811*8ba4d145SBjoern A. Zeeb if (mt76_queue_is_wed_tx_free(q))
812*8ba4d145SBjoern A. Zeeb return;
813*8ba4d145SBjoern A. Zeeb
814*8ba4d145SBjoern A. Zeeb if (mtk_wed_device_active(&dev->mmio.wed) &&
815*8ba4d145SBjoern A. Zeeb mt76_queue_is_wed_rro(q))
816*8ba4d145SBjoern A. Zeeb return;
817*8ba4d145SBjoern A. Zeeb
8186c92544dSBjoern A. Zeeb mt76_dma_sync_idx(dev, q);
819*8ba4d145SBjoern A. Zeeb mt76_dma_rx_fill_buf(dev, q, false);
8206c92544dSBjoern A. Zeeb }
8216c92544dSBjoern A. Zeeb
8226c92544dSBjoern A. Zeeb static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more,u32 info,bool allow_direct)8236c92544dSBjoern A. Zeeb mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
824*8ba4d145SBjoern A. Zeeb int len, bool more, u32 info, bool allow_direct)
8256c92544dSBjoern A. Zeeb {
8266c92544dSBjoern A. Zeeb struct sk_buff *skb = q->rx_head;
8276c92544dSBjoern A. Zeeb struct skb_shared_info *shinfo = skb_shinfo(skb);
8286c92544dSBjoern A. Zeeb int nr_frags = shinfo->nr_frags;
8296c92544dSBjoern A. Zeeb
8306c92544dSBjoern A. Zeeb if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
8316c92544dSBjoern A. Zeeb struct page *page = virt_to_head_page(data);
8326c92544dSBjoern A. Zeeb #if defined(__linux__)
8336c92544dSBjoern A. Zeeb int offset = data - page_address(page) + q->buf_offset;
8346c92544dSBjoern A. Zeeb #elif defined(__FreeBSD__)
8356c92544dSBjoern A. Zeeb int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset;
8366c92544dSBjoern A. Zeeb #endif
8376c92544dSBjoern A. Zeeb
8386c92544dSBjoern A. Zeeb skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
8396c92544dSBjoern A. Zeeb } else {
840*8ba4d145SBjoern A. Zeeb mt76_put_page_pool_buf(data, allow_direct);
8416c92544dSBjoern A. Zeeb }
8426c92544dSBjoern A. Zeeb
8436c92544dSBjoern A. Zeeb if (more)
8446c92544dSBjoern A. Zeeb return;
8456c92544dSBjoern A. Zeeb
8466c92544dSBjoern A. Zeeb q->rx_head = NULL;
8476c92544dSBjoern A. Zeeb if (nr_frags < ARRAY_SIZE(shinfo->frags))
848cbb3ec25SBjoern A. Zeeb dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
8496c92544dSBjoern A. Zeeb else
8506c92544dSBjoern A. Zeeb dev_kfree_skb(skb);
8516c92544dSBjoern A. Zeeb }
8526c92544dSBjoern A. Zeeb
8536c92544dSBjoern A. Zeeb static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)8546c92544dSBjoern A. Zeeb mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
8556c92544dSBjoern A. Zeeb {
8566c92544dSBjoern A. Zeeb int len, data_len, done = 0, dma_idx;
8576c92544dSBjoern A. Zeeb struct sk_buff *skb;
8586c92544dSBjoern A. Zeeb unsigned char *data;
8596c92544dSBjoern A. Zeeb bool check_ddone = false;
860*8ba4d145SBjoern A. Zeeb bool allow_direct = !mt76_queue_is_wed_rx(q);
8616c92544dSBjoern A. Zeeb bool more;
8626c92544dSBjoern A. Zeeb
8636c92544dSBjoern A. Zeeb if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
864*8ba4d145SBjoern A. Zeeb mt76_queue_is_wed_tx_free(q)) {
865*8ba4d145SBjoern A. Zeeb dma_idx = Q_READ(q, dma_idx);
8666c92544dSBjoern A. Zeeb check_ddone = true;
8676c92544dSBjoern A. Zeeb }
8686c92544dSBjoern A. Zeeb
8696c92544dSBjoern A. Zeeb while (done < budget) {
870cbb3ec25SBjoern A. Zeeb bool drop = false;
8716c92544dSBjoern A. Zeeb u32 info;
8726c92544dSBjoern A. Zeeb
8736c92544dSBjoern A. Zeeb if (check_ddone) {
8746c92544dSBjoern A. Zeeb if (q->tail == dma_idx)
875*8ba4d145SBjoern A. Zeeb dma_idx = Q_READ(q, dma_idx);
8766c92544dSBjoern A. Zeeb
8776c92544dSBjoern A. Zeeb if (q->tail == dma_idx)
8786c92544dSBjoern A. Zeeb break;
8796c92544dSBjoern A. Zeeb }
8806c92544dSBjoern A. Zeeb
881cbb3ec25SBjoern A. Zeeb data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
882cbb3ec25SBjoern A. Zeeb &drop);
8836c92544dSBjoern A. Zeeb if (!data)
8846c92544dSBjoern A. Zeeb break;
8856c92544dSBjoern A. Zeeb
886cbb3ec25SBjoern A. Zeeb if (drop)
887cbb3ec25SBjoern A. Zeeb goto free_frag;
888cbb3ec25SBjoern A. Zeeb
8896c92544dSBjoern A. Zeeb if (q->rx_head)
8906c92544dSBjoern A. Zeeb data_len = q->buf_size;
8916c92544dSBjoern A. Zeeb else
8926c92544dSBjoern A. Zeeb data_len = SKB_WITH_OVERHEAD(q->buf_size);
8936c92544dSBjoern A. Zeeb
8946c92544dSBjoern A. Zeeb if (data_len < len + q->buf_offset) {
8956c92544dSBjoern A. Zeeb dev_kfree_skb(q->rx_head);
8966c92544dSBjoern A. Zeeb q->rx_head = NULL;
8976c92544dSBjoern A. Zeeb goto free_frag;
8986c92544dSBjoern A. Zeeb }
8996c92544dSBjoern A. Zeeb
9006c92544dSBjoern A. Zeeb if (q->rx_head) {
901*8ba4d145SBjoern A. Zeeb mt76_add_fragment(dev, q, data, len, more, info,
902*8ba4d145SBjoern A. Zeeb allow_direct);
9036c92544dSBjoern A. Zeeb continue;
9046c92544dSBjoern A. Zeeb }
9056c92544dSBjoern A. Zeeb
9066c92544dSBjoern A. Zeeb if (!more && dev->drv->rx_check &&
9076c92544dSBjoern A. Zeeb !(dev->drv->rx_check(dev, data, len)))
9086c92544dSBjoern A. Zeeb goto free_frag;
9096c92544dSBjoern A. Zeeb
910cbb3ec25SBjoern A. Zeeb skb = napi_build_skb(data, q->buf_size);
9116c92544dSBjoern A. Zeeb if (!skb)
9126c92544dSBjoern A. Zeeb goto free_frag;
9136c92544dSBjoern A. Zeeb
9146c92544dSBjoern A. Zeeb skb_reserve(skb, q->buf_offset);
915cbb3ec25SBjoern A. Zeeb skb_mark_for_recycle(skb);
9166c92544dSBjoern A. Zeeb
9176c92544dSBjoern A. Zeeb *(u32 *)skb->cb = info;
9186c92544dSBjoern A. Zeeb
9196c92544dSBjoern A. Zeeb __skb_put(skb, len);
9206c92544dSBjoern A. Zeeb done++;
9216c92544dSBjoern A. Zeeb
9226c92544dSBjoern A. Zeeb if (more) {
9236c92544dSBjoern A. Zeeb q->rx_head = skb;
9246c92544dSBjoern A. Zeeb continue;
9256c92544dSBjoern A. Zeeb }
9266c92544dSBjoern A. Zeeb
927cbb3ec25SBjoern A. Zeeb dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
9286c92544dSBjoern A. Zeeb continue;
9296c92544dSBjoern A. Zeeb
9306c92544dSBjoern A. Zeeb free_frag:
931*8ba4d145SBjoern A. Zeeb mt76_put_page_pool_buf(data, allow_direct);
9326c92544dSBjoern A. Zeeb }
9336c92544dSBjoern A. Zeeb
934cbb3ec25SBjoern A. Zeeb mt76_dma_rx_fill(dev, q, true);
9356c92544dSBjoern A. Zeeb return done;
9366c92544dSBjoern A. Zeeb }
9376c92544dSBjoern A. Zeeb
mt76_dma_rx_poll(struct napi_struct * napi,int budget)9386c92544dSBjoern A. Zeeb int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
9396c92544dSBjoern A. Zeeb {
9406c92544dSBjoern A. Zeeb struct mt76_dev *dev;
9416c92544dSBjoern A. Zeeb int qid, done = 0, cur;
9426c92544dSBjoern A. Zeeb
943*8ba4d145SBjoern A. Zeeb dev = mt76_priv(napi->dev);
9446c92544dSBjoern A. Zeeb qid = napi - dev->napi;
9456c92544dSBjoern A. Zeeb
9466c92544dSBjoern A. Zeeb rcu_read_lock();
9476c92544dSBjoern A. Zeeb
9486c92544dSBjoern A. Zeeb do {
9496c92544dSBjoern A. Zeeb cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
9506c92544dSBjoern A. Zeeb mt76_rx_poll_complete(dev, qid, napi);
9516c92544dSBjoern A. Zeeb done += cur;
9526c92544dSBjoern A. Zeeb } while (cur && done < budget);
9536c92544dSBjoern A. Zeeb
9546c92544dSBjoern A. Zeeb rcu_read_unlock();
9556c92544dSBjoern A. Zeeb
9566c92544dSBjoern A. Zeeb if (done < budget && napi_complete(napi))
9576c92544dSBjoern A. Zeeb dev->drv->rx_poll_complete(dev, qid);
9586c92544dSBjoern A. Zeeb
9596c92544dSBjoern A. Zeeb return done;
9606c92544dSBjoern A. Zeeb }
9616c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
9626c92544dSBjoern A. Zeeb
9636c92544dSBjoern A. Zeeb static int
mt76_dma_init(struct mt76_dev * dev,int (* poll)(struct napi_struct * napi,int budget))9646c92544dSBjoern A. Zeeb mt76_dma_init(struct mt76_dev *dev,
9656c92544dSBjoern A. Zeeb int (*poll)(struct napi_struct *napi, int budget))
9666c92544dSBjoern A. Zeeb {
967*8ba4d145SBjoern A. Zeeb struct mt76_dev **priv;
9686c92544dSBjoern A. Zeeb int i;
9696c92544dSBjoern A. Zeeb
970*8ba4d145SBjoern A. Zeeb dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
971*8ba4d145SBjoern A. Zeeb if (!dev->napi_dev)
972*8ba4d145SBjoern A. Zeeb return -ENOMEM;
973*8ba4d145SBjoern A. Zeeb
974*8ba4d145SBjoern A. Zeeb /* napi_dev private data points to mt76_dev parent, so, mt76_dev
975*8ba4d145SBjoern A. Zeeb * can be retrieved given napi_dev
976*8ba4d145SBjoern A. Zeeb */
977*8ba4d145SBjoern A. Zeeb priv = netdev_priv(dev->napi_dev);
978*8ba4d145SBjoern A. Zeeb *priv = dev;
979*8ba4d145SBjoern A. Zeeb
980*8ba4d145SBjoern A. Zeeb dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
981*8ba4d145SBjoern A. Zeeb if (!dev->tx_napi_dev) {
982*8ba4d145SBjoern A. Zeeb free_netdev(dev->napi_dev);
983*8ba4d145SBjoern A. Zeeb return -ENOMEM;
984*8ba4d145SBjoern A. Zeeb }
985*8ba4d145SBjoern A. Zeeb priv = netdev_priv(dev->tx_napi_dev);
986*8ba4d145SBjoern A. Zeeb *priv = dev;
987*8ba4d145SBjoern A. Zeeb
988*8ba4d145SBjoern A. Zeeb snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
9896c92544dSBjoern A. Zeeb wiphy_name(dev->hw->wiphy));
990*8ba4d145SBjoern A. Zeeb dev->napi_dev->threaded = 1;
991cbb3ec25SBjoern A. Zeeb init_completion(&dev->mmio.wed_reset);
992cbb3ec25SBjoern A. Zeeb init_completion(&dev->mmio.wed_reset_complete);
9936c92544dSBjoern A. Zeeb
9946c92544dSBjoern A. Zeeb mt76_for_each_q_rx(dev, i) {
995*8ba4d145SBjoern A. Zeeb netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
996*8ba4d145SBjoern A. Zeeb mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
9976c92544dSBjoern A. Zeeb napi_enable(&dev->napi[i]);
9986c92544dSBjoern A. Zeeb }
9996c92544dSBjoern A. Zeeb
10006c92544dSBjoern A. Zeeb return 0;
10016c92544dSBjoern A. Zeeb }
10026c92544dSBjoern A. Zeeb
10036c92544dSBjoern A. Zeeb static const struct mt76_queue_ops mt76_dma_ops = {
10046c92544dSBjoern A. Zeeb .init = mt76_dma_init,
10056c92544dSBjoern A. Zeeb .alloc = mt76_dma_alloc_queue,
10066c92544dSBjoern A. Zeeb .reset_q = mt76_dma_queue_reset,
10076c92544dSBjoern A. Zeeb .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
10086c92544dSBjoern A. Zeeb .tx_queue_skb = mt76_dma_tx_queue_skb,
10096c92544dSBjoern A. Zeeb .tx_cleanup = mt76_dma_tx_cleanup,
10106c92544dSBjoern A. Zeeb .rx_cleanup = mt76_dma_rx_cleanup,
10116c92544dSBjoern A. Zeeb .rx_reset = mt76_dma_rx_reset,
10126c92544dSBjoern A. Zeeb .kick = mt76_dma_kick_queue,
10136c92544dSBjoern A. Zeeb };
10146c92544dSBjoern A. Zeeb
mt76_dma_attach(struct mt76_dev * dev)10156c92544dSBjoern A. Zeeb void mt76_dma_attach(struct mt76_dev *dev)
10166c92544dSBjoern A. Zeeb {
10176c92544dSBjoern A. Zeeb dev->queue_ops = &mt76_dma_ops;
10186c92544dSBjoern A. Zeeb }
10196c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_dma_attach);
10206c92544dSBjoern A. Zeeb
mt76_dma_cleanup(struct mt76_dev * dev)10216c92544dSBjoern A. Zeeb void mt76_dma_cleanup(struct mt76_dev *dev)
10226c92544dSBjoern A. Zeeb {
10236c92544dSBjoern A. Zeeb int i;
10246c92544dSBjoern A. Zeeb
10256c92544dSBjoern A. Zeeb mt76_worker_disable(&dev->tx_worker);
10266c92544dSBjoern A. Zeeb netif_napi_del(&dev->tx_napi);
10276c92544dSBjoern A. Zeeb
10286c92544dSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
10296c92544dSBjoern A. Zeeb struct mt76_phy *phy = dev->phys[i];
10306c92544dSBjoern A. Zeeb int j;
10316c92544dSBjoern A. Zeeb
10326c92544dSBjoern A. Zeeb if (!phy)
10336c92544dSBjoern A. Zeeb continue;
10346c92544dSBjoern A. Zeeb
10356c92544dSBjoern A. Zeeb for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
10366c92544dSBjoern A. Zeeb mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
10376c92544dSBjoern A. Zeeb }
10386c92544dSBjoern A. Zeeb
10396c92544dSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
10406c92544dSBjoern A. Zeeb mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
10416c92544dSBjoern A. Zeeb
10426c92544dSBjoern A. Zeeb mt76_for_each_q_rx(dev, i) {
1043cbb3ec25SBjoern A. Zeeb struct mt76_queue *q = &dev->q_rx[i];
1044cbb3ec25SBjoern A. Zeeb
1045*8ba4d145SBjoern A. Zeeb if (mtk_wed_device_active(&dev->mmio.wed) &&
1046*8ba4d145SBjoern A. Zeeb mt76_queue_is_wed_rro(q))
1047*8ba4d145SBjoern A. Zeeb continue;
1048*8ba4d145SBjoern A. Zeeb
10496c92544dSBjoern A. Zeeb netif_napi_del(&dev->napi[i]);
1050cbb3ec25SBjoern A. Zeeb mt76_dma_rx_cleanup(dev, q);
1051cbb3ec25SBjoern A. Zeeb
1052cbb3ec25SBjoern A. Zeeb page_pool_destroy(q->page_pool);
10536c92544dSBjoern A. Zeeb }
10546c92544dSBjoern A. Zeeb
10556c92544dSBjoern A. Zeeb if (mtk_wed_device_active(&dev->mmio.wed))
10566c92544dSBjoern A. Zeeb mtk_wed_device_detach(&dev->mmio.wed);
1057*8ba4d145SBjoern A. Zeeb
1058*8ba4d145SBjoern A. Zeeb if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1059*8ba4d145SBjoern A. Zeeb mtk_wed_device_detach(&dev->mmio.wed_hif2);
1060*8ba4d145SBjoern A. Zeeb
1061*8ba4d145SBjoern A. Zeeb mt76_free_pending_txwi(dev);
1062*8ba4d145SBjoern A. Zeeb mt76_free_pending_rxwi(dev);
1063*8ba4d145SBjoern A. Zeeb free_netdev(dev->napi_dev);
1064*8ba4d145SBjoern A. Zeeb free_netdev(dev->tx_napi_dev);
10656c92544dSBjoern A. Zeeb }
10666c92544dSBjoern A. Zeeb EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1067