xref: /linux/drivers/net/wireless/mediatek/mt76/dma.c (revision f5db8841ebe59dbdf07fda797c88ccb51e0c893d)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9 
10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
11 
12 #define Q_READ(_q, _field) ({						\
13 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
14 	u32 _val;							\
15 	if ((_q)->flags & MT_QFLAG_WED)					\
16 		_val = mtk_wed_device_reg_read((_q)->wed,		\
17 					       ((_q)->wed_regs +	\
18 					        _offset));		\
19 	else								\
20 		_val = readl(&(_q)->regs->_field);			\
21 	_val;								\
22 })
23 
24 #define Q_WRITE(_q, _field, _val)	do {				\
25 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
26 	if ((_q)->flags & MT_QFLAG_WED)					\
27 		mtk_wed_device_reg_write((_q)->wed,			\
28 					 ((_q)->wed_regs + _offset),	\
29 					 _val);				\
30 	else								\
31 		writel(_val, &(_q)->regs->_field);			\
32 } while (0)
33 
34 #else
35 
36 #define Q_READ(_q, _field)		readl(&(_q)->regs->_field)
37 #define Q_WRITE(_q, _field, _val)	writel(_val, &(_q)->regs->_field)
38 
39 #endif
40 
41 static struct mt76_txwi_cache *
42 mt76_alloc_txwi(struct mt76_dev *dev)
43 {
44 	struct mt76_txwi_cache *t;
45 	dma_addr_t addr;
46 	u8 *txwi;
47 	int size;
48 
49 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 	txwi = kzalloc(size, GFP_ATOMIC);
51 	if (!txwi)
52 		return NULL;
53 
54 	addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
55 			      DMA_TO_DEVICE);
56 	if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
57 		kfree(txwi);
58 		return NULL;
59 	}
60 
61 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
62 	t->dma_addr = addr;
63 
64 	return t;
65 }
66 
67 static struct mt76_txwi_cache *
68 mt76_alloc_rxwi(struct mt76_dev *dev)
69 {
70 	struct mt76_txwi_cache *t;
71 
72 	t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
73 	if (!t)
74 		return NULL;
75 
76 	t->ptr = NULL;
77 	return t;
78 }
79 
80 static struct mt76_txwi_cache *
81 __mt76_get_txwi(struct mt76_dev *dev)
82 {
83 	struct mt76_txwi_cache *t = NULL;
84 
85 	spin_lock(&dev->lock);
86 	if (!list_empty(&dev->txwi_cache)) {
87 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
88 				     list);
89 		list_del(&t->list);
90 	}
91 	spin_unlock(&dev->lock);
92 
93 	return t;
94 }
95 
96 static struct mt76_txwi_cache *
97 __mt76_get_rxwi(struct mt76_dev *dev)
98 {
99 	struct mt76_txwi_cache *t = NULL;
100 
101 	spin_lock_bh(&dev->wed_lock);
102 	if (!list_empty(&dev->rxwi_cache)) {
103 		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
104 				     list);
105 		list_del(&t->list);
106 	}
107 	spin_unlock_bh(&dev->wed_lock);
108 
109 	return t;
110 }
111 
112 static struct mt76_txwi_cache *
113 mt76_get_txwi(struct mt76_dev *dev)
114 {
115 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
116 
117 	if (t)
118 		return t;
119 
120 	return mt76_alloc_txwi(dev);
121 }
122 
123 struct mt76_txwi_cache *
124 mt76_get_rxwi(struct mt76_dev *dev)
125 {
126 	struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
127 
128 	if (t)
129 		return t;
130 
131 	return mt76_alloc_rxwi(dev);
132 }
133 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
134 
135 void
136 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
137 {
138 	if (!t)
139 		return;
140 
141 	spin_lock(&dev->lock);
142 	list_add(&t->list, &dev->txwi_cache);
143 	spin_unlock(&dev->lock);
144 }
145 EXPORT_SYMBOL_GPL(mt76_put_txwi);
146 
147 void
148 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
149 {
150 	if (!t)
151 		return;
152 
153 	spin_lock_bh(&dev->wed_lock);
154 	list_add(&t->list, &dev->rxwi_cache);
155 	spin_unlock_bh(&dev->wed_lock);
156 }
157 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
158 
159 static void
160 mt76_free_pending_txwi(struct mt76_dev *dev)
161 {
162 	struct mt76_txwi_cache *t;
163 
164 	local_bh_disable();
165 	while ((t = __mt76_get_txwi(dev)) != NULL) {
166 		dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
167 				 DMA_TO_DEVICE);
168 		kfree(mt76_get_txwi_ptr(dev, t));
169 	}
170 	local_bh_enable();
171 }
172 
173 void
174 mt76_free_pending_rxwi(struct mt76_dev *dev)
175 {
176 	struct mt76_txwi_cache *t;
177 
178 	local_bh_disable();
179 	while ((t = __mt76_get_rxwi(dev)) != NULL) {
180 		if (t->ptr)
181 			mt76_put_page_pool_buf(t->ptr, false);
182 		kfree(t);
183 	}
184 	local_bh_enable();
185 }
186 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
187 
188 static void
189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
190 {
191 	Q_WRITE(q, desc_base, q->desc_dma);
192 	if (q->flags & MT_QFLAG_WED_RRO_EN)
193 		Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
194 	else
195 		Q_WRITE(q, ring_size, q->ndesc);
196 	q->head = Q_READ(q, dma_idx);
197 	q->tail = q->head;
198 }
199 
200 static void
201 __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
202 		       bool reset_idx)
203 {
204 	if (!q || !q->ndesc)
205 		return;
206 
207 	if (!mt76_queue_is_wed_rro_ind(q)) {
208 		int i;
209 
210 		/* clear descriptors */
211 		for (i = 0; i < q->ndesc; i++)
212 			q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
213 	}
214 
215 	if (reset_idx) {
216 		Q_WRITE(q, cpu_idx, 0);
217 		Q_WRITE(q, dma_idx, 0);
218 	}
219 	mt76_dma_sync_idx(dev, q);
220 }
221 
222 static void
223 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
224 {
225 	__mt76_dma_queue_reset(dev, q, true);
226 }
227 
228 static int
229 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
230 		    struct mt76_queue_buf *buf, void *data)
231 {
232 	struct mt76_queue_entry *entry = &q->entry[q->head];
233 	struct mt76_txwi_cache *txwi = NULL;
234 	struct mt76_desc *desc;
235 	int idx = q->head;
236 	u32 buf1 = 0, ctrl;
237 	int rx_token;
238 
239 	if (mt76_queue_is_wed_rro_ind(q)) {
240 		struct mt76_wed_rro_desc *rro_desc;
241 
242 		rro_desc = (struct mt76_wed_rro_desc *)q->desc;
243 		data = &rro_desc[q->head];
244 		goto done;
245 	}
246 
247 	desc = &q->desc[q->head];
248 	ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
249 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
250 	buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
251 #endif
252 
253 	if (mt76_queue_is_wed_rx(q)) {
254 		txwi = mt76_get_rxwi(dev);
255 		if (!txwi)
256 			return -ENOMEM;
257 
258 		rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
259 		if (rx_token < 0) {
260 			mt76_put_rxwi(dev, txwi);
261 			return -ENOMEM;
262 		}
263 
264 		buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
265 		ctrl |= MT_DMA_CTL_TO_HOST;
266 	}
267 
268 	WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
269 	WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
270 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
271 	WRITE_ONCE(desc->info, 0);
272 
273 done:
274 	entry->dma_addr[0] = buf->addr;
275 	entry->dma_len[0] = buf->len;
276 	entry->txwi = txwi;
277 	entry->buf = data;
278 	entry->wcid = 0xffff;
279 	entry->skip_buf1 = true;
280 	q->head = (q->head + 1) % q->ndesc;
281 	q->queued++;
282 
283 	return idx;
284 }
285 
286 static int
287 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
288 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
289 		 struct sk_buff *skb, void *txwi)
290 {
291 	struct mt76_queue_entry *entry;
292 	struct mt76_desc *desc;
293 	int i, idx = -1;
294 	u32 ctrl, next;
295 
296 	if (txwi) {
297 		q->entry[q->head].txwi = DMA_DUMMY_DATA;
298 		q->entry[q->head].skip_buf0 = true;
299 	}
300 
301 	for (i = 0; i < nbufs; i += 2, buf += 2) {
302 		u32 buf0 = buf[0].addr, buf1 = 0;
303 
304 		idx = q->head;
305 		next = (q->head + 1) % q->ndesc;
306 
307 		desc = &q->desc[idx];
308 		entry = &q->entry[idx];
309 
310 		if (buf[0].skip_unmap)
311 			entry->skip_buf0 = true;
312 		entry->skip_buf1 = i == nbufs - 1;
313 
314 		entry->dma_addr[0] = buf[0].addr;
315 		entry->dma_len[0] = buf[0].len;
316 
317 		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
318 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
319 		info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
320 #endif
321 		if (i < nbufs - 1) {
322 			entry->dma_addr[1] = buf[1].addr;
323 			entry->dma_len[1] = buf[1].len;
324 			buf1 = buf[1].addr;
325 			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
326 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
327 			info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
328 					   buf[1].addr >> 32);
329 #endif
330 			if (buf[1].skip_unmap)
331 				entry->skip_buf1 = true;
332 		}
333 
334 		if (i == nbufs - 1)
335 			ctrl |= MT_DMA_CTL_LAST_SEC0;
336 		else if (i == nbufs - 2)
337 			ctrl |= MT_DMA_CTL_LAST_SEC1;
338 
339 		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
340 		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
341 		WRITE_ONCE(desc->info, cpu_to_le32(info));
342 		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
343 
344 		q->head = next;
345 		q->queued++;
346 	}
347 
348 	q->entry[idx].txwi = txwi;
349 	q->entry[idx].skb = skb;
350 	q->entry[idx].wcid = 0xffff;
351 
352 	return idx;
353 }
354 
355 static void
356 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
357 			struct mt76_queue_entry *prev_e)
358 {
359 	struct mt76_queue_entry *e = &q->entry[idx];
360 
361 	if (!e->skip_buf0)
362 		dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
363 				 DMA_TO_DEVICE);
364 
365 	if (!e->skip_buf1)
366 		dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
367 				 DMA_TO_DEVICE);
368 
369 	if (e->txwi == DMA_DUMMY_DATA)
370 		e->txwi = NULL;
371 
372 	*prev_e = *e;
373 	memset(e, 0, sizeof(*e));
374 }
375 
376 static void
377 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
378 {
379 	wmb();
380 	Q_WRITE(q, cpu_idx, q->head);
381 }
382 
383 static void
384 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
385 {
386 	struct mt76_queue_entry entry;
387 	int last;
388 
389 	if (!q || !q->ndesc)
390 		return;
391 
392 	spin_lock_bh(&q->cleanup_lock);
393 	if (flush)
394 		last = -1;
395 	else
396 		last = Q_READ(q, dma_idx);
397 
398 	while (q->queued > 0 && q->tail != last) {
399 		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
400 		mt76_queue_tx_complete(dev, q, &entry);
401 
402 		if (entry.txwi) {
403 			if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
404 				mt76_put_txwi(dev, entry.txwi);
405 		}
406 
407 		if (!flush && q->tail == last)
408 			last = Q_READ(q, dma_idx);
409 	}
410 	spin_unlock_bh(&q->cleanup_lock);
411 
412 	if (flush) {
413 		spin_lock_bh(&q->lock);
414 		mt76_dma_sync_idx(dev, q);
415 		mt76_dma_kick_queue(dev, q);
416 		spin_unlock_bh(&q->lock);
417 	}
418 
419 	if (!q->queued)
420 		wake_up(&dev->tx_wait);
421 }
422 
423 static void *
424 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
425 		 int *len, u32 *info, bool *more, bool *drop)
426 {
427 	struct mt76_queue_entry *e = &q->entry[idx];
428 	struct mt76_desc *desc = &q->desc[idx];
429 	u32 ctrl, desc_info, buf1;
430 	void *buf = e->buf;
431 
432 	if (mt76_queue_is_wed_rro_ind(q))
433 		goto done;
434 
435 	ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
436 	if (len) {
437 		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
438 		*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
439 	}
440 
441 	desc_info = le32_to_cpu(desc->info);
442 	if (info)
443 		*info = desc_info;
444 
445 	buf1 = le32_to_cpu(desc->buf1);
446 	mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
447 
448 	if (mt76_queue_is_wed_rx(q)) {
449 		u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
450 		struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
451 
452 		if (!t)
453 			return NULL;
454 
455 		dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
456 				SKB_WITH_OVERHEAD(q->buf_size),
457 				page_pool_get_dma_dir(q->page_pool));
458 
459 		buf = t->ptr;
460 		t->dma_addr = 0;
461 		t->ptr = NULL;
462 
463 		mt76_put_rxwi(dev, t);
464 		if (drop)
465 			*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
466 	} else {
467 		dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
468 				SKB_WITH_OVERHEAD(q->buf_size),
469 				page_pool_get_dma_dir(q->page_pool));
470 	}
471 
472 done:
473 	e->buf = NULL;
474 	return buf;
475 }
476 
477 static void *
478 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
479 		 int *len, u32 *info, bool *more, bool *drop)
480 {
481 	int idx = q->tail;
482 
483 	*more = false;
484 	if (!q->queued)
485 		return NULL;
486 
487 	if (mt76_queue_is_wed_rro_data(q))
488 		return NULL;
489 
490 	if (!mt76_queue_is_wed_rro_ind(q)) {
491 		if (flush)
492 			q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
493 		else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
494 			return NULL;
495 	}
496 
497 	q->tail = (q->tail + 1) % q->ndesc;
498 	q->queued--;
499 
500 	return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
501 }
502 
503 static int
504 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
505 			  struct sk_buff *skb, u32 tx_info)
506 {
507 	struct mt76_queue_buf buf = {};
508 	dma_addr_t addr;
509 
510 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
511 		goto error;
512 
513 	if (q->queued + 1 >= q->ndesc - 1)
514 		goto error;
515 
516 	addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
517 			      DMA_TO_DEVICE);
518 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
519 		goto error;
520 
521 	buf.addr = addr;
522 	buf.len = skb->len;
523 
524 	spin_lock_bh(&q->lock);
525 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
526 	mt76_dma_kick_queue(dev, q);
527 	spin_unlock_bh(&q->lock);
528 
529 	return 0;
530 
531 error:
532 	dev_kfree_skb(skb);
533 	return -ENOMEM;
534 }
535 
536 static int
537 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
538 		      enum mt76_txq_id qid, struct sk_buff *skb,
539 		      struct mt76_wcid *wcid, struct ieee80211_sta *sta)
540 {
541 	struct ieee80211_tx_status status = {
542 		.sta = sta,
543 	};
544 	struct mt76_tx_info tx_info = {
545 		.skb = skb,
546 	};
547 	struct ieee80211_hw *hw;
548 	int len, n = 0, ret = -ENOMEM;
549 	struct mt76_txwi_cache *t;
550 	struct sk_buff *iter;
551 	dma_addr_t addr;
552 	u8 *txwi;
553 
554 	if (test_bit(MT76_RESET, &dev->phy.state))
555 		goto free_skb;
556 
557 	t = mt76_get_txwi(dev);
558 	if (!t)
559 		goto free_skb;
560 
561 	txwi = mt76_get_txwi_ptr(dev, t);
562 
563 	skb->prev = skb->next = NULL;
564 	if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
565 		mt76_insert_hdr_pad(skb);
566 
567 	len = skb_headlen(skb);
568 	addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
569 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
570 		goto free;
571 
572 	tx_info.buf[n].addr = t->dma_addr;
573 	tx_info.buf[n++].len = dev->drv->txwi_size;
574 	tx_info.buf[n].addr = addr;
575 	tx_info.buf[n++].len = len;
576 
577 	skb_walk_frags(skb, iter) {
578 		if (n == ARRAY_SIZE(tx_info.buf))
579 			goto unmap;
580 
581 		addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
582 				      DMA_TO_DEVICE);
583 		if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
584 			goto unmap;
585 
586 		tx_info.buf[n].addr = addr;
587 		tx_info.buf[n++].len = iter->len;
588 	}
589 	tx_info.nbuf = n;
590 
591 	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
592 		ret = -ENOMEM;
593 		goto unmap;
594 	}
595 
596 	dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
597 				DMA_TO_DEVICE);
598 	ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
599 	dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
600 				   DMA_TO_DEVICE);
601 	if (ret < 0)
602 		goto unmap;
603 
604 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
605 				tx_info.info, tx_info.skb, t);
606 
607 unmap:
608 	for (n--; n > 0; n--)
609 		dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
610 				 tx_info.buf[n].len, DMA_TO_DEVICE);
611 
612 free:
613 #ifdef CONFIG_NL80211_TESTMODE
614 	/* fix tx_done accounting on queue overflow */
615 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
616 		struct mt76_phy *phy = hw->priv;
617 
618 		if (tx_info.skb == phy->test.tx_skb)
619 			phy->test.tx_done--;
620 	}
621 #endif
622 
623 	mt76_put_txwi(dev, t);
624 
625 free_skb:
626 	status.skb = tx_info.skb;
627 	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
628 	spin_lock_bh(&dev->rx_lock);
629 	ieee80211_tx_status_ext(hw, &status);
630 	spin_unlock_bh(&dev->rx_lock);
631 
632 	return ret;
633 }
634 
635 static int
636 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
637 		 bool allow_direct)
638 {
639 	int len = SKB_WITH_OVERHEAD(q->buf_size);
640 	int frames = 0;
641 
642 	if (!q->ndesc)
643 		return 0;
644 
645 	spin_lock_bh(&q->lock);
646 
647 	while (q->queued < q->ndesc - 1) {
648 		struct mt76_queue_buf qbuf = {};
649 		enum dma_data_direction dir;
650 		dma_addr_t addr;
651 		int offset;
652 		void *buf = NULL;
653 
654 		if (mt76_queue_is_wed_rro_ind(q))
655 			goto done;
656 
657 		buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
658 		if (!buf)
659 			break;
660 
661 		addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
662 		dir = page_pool_get_dma_dir(q->page_pool);
663 		dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
664 
665 		qbuf.addr = addr + q->buf_offset;
666 done:
667 		qbuf.len = len - q->buf_offset;
668 		qbuf.skip_unmap = false;
669 		if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
670 			mt76_put_page_pool_buf(buf, allow_direct);
671 			break;
672 		}
673 		frames++;
674 	}
675 
676 	if (frames || mt76_queue_is_wed_rx(q))
677 		mt76_dma_kick_queue(dev, q);
678 
679 	spin_unlock_bh(&q->lock);
680 
681 	return frames;
682 }
683 
684 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
685 {
686 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
687 	int ret = 0, type, ring;
688 	u16 flags;
689 
690 	if (!q || !q->ndesc)
691 		return -EINVAL;
692 
693 	flags = q->flags;
694 	if (!q->wed || !mtk_wed_device_active(q->wed))
695 		q->flags &= ~MT_QFLAG_WED;
696 
697 	if (!(q->flags & MT_QFLAG_WED))
698 		return 0;
699 
700 	type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
701 	ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
702 
703 	switch (type) {
704 	case MT76_WED_Q_TX:
705 		ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
706 						   reset);
707 		if (!ret)
708 			q->wed_regs = q->wed->tx_ring[ring].reg_base;
709 		break;
710 	case MT76_WED_Q_TXFREE:
711 		/* WED txfree queue needs ring to be initialized before setup */
712 		q->flags = 0;
713 		mt76_dma_queue_reset(dev, q);
714 		mt76_dma_rx_fill(dev, q, false);
715 
716 		ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
717 		if (!ret)
718 			q->wed_regs = q->wed->txfree_ring.reg_base;
719 		break;
720 	case MT76_WED_Q_RX:
721 		ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
722 						   reset);
723 		if (!ret)
724 			q->wed_regs = q->wed->rx_ring[ring].reg_base;
725 		break;
726 	case MT76_WED_RRO_Q_DATA:
727 		q->flags &= ~MT_QFLAG_WED;
728 		__mt76_dma_queue_reset(dev, q, false);
729 		mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
730 		q->head = q->ndesc - 1;
731 		q->queued = q->head;
732 		break;
733 	case MT76_WED_RRO_Q_MSDU_PG:
734 		q->flags &= ~MT_QFLAG_WED;
735 		__mt76_dma_queue_reset(dev, q, false);
736 		mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
737 		q->head = q->ndesc - 1;
738 		q->queued = q->head;
739 		break;
740 	case MT76_WED_RRO_Q_IND:
741 		q->flags &= ~MT_QFLAG_WED;
742 		mt76_dma_queue_reset(dev, q);
743 		mt76_dma_rx_fill(dev, q, false);
744 		mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
745 		break;
746 	default:
747 		ret = -EINVAL;
748 		break;
749 	}
750 	q->flags = flags;
751 
752 	return ret;
753 #else
754 	return 0;
755 #endif
756 }
757 EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
758 
759 static int
760 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
761 		     int idx, int n_desc, int bufsize,
762 		     u32 ring_base)
763 {
764 	int ret, size;
765 
766 	spin_lock_init(&q->lock);
767 	spin_lock_init(&q->cleanup_lock);
768 
769 	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
770 	q->ndesc = n_desc;
771 	q->buf_size = bufsize;
772 	q->hw_idx = idx;
773 
774 	size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
775 					    : sizeof(struct mt76_desc);
776 	q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
777 				      &q->desc_dma, GFP_KERNEL);
778 	if (!q->desc)
779 		return -ENOMEM;
780 
781 	if (mt76_queue_is_wed_rro_ind(q)) {
782 		struct mt76_wed_rro_desc *rro_desc;
783 		int i;
784 
785 		rro_desc = (struct mt76_wed_rro_desc *)q->desc;
786 		for (i = 0; i < q->ndesc; i++) {
787 			struct mt76_wed_rro_ind *cmd;
788 
789 			cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
790 			cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
791 		}
792 	}
793 
794 	size = q->ndesc * sizeof(*q->entry);
795 	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
796 	if (!q->entry)
797 		return -ENOMEM;
798 
799 	ret = mt76_create_page_pool(dev, q);
800 	if (ret)
801 		return ret;
802 
803 	ret = mt76_dma_wed_setup(dev, q, false);
804 	if (ret)
805 		return ret;
806 
807 	if (mtk_wed_device_active(&dev->mmio.wed)) {
808 		if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
809 		    mt76_queue_is_wed_tx_free(q))
810 			return 0;
811 	}
812 
813 	mt76_dma_queue_reset(dev, q);
814 
815 	return 0;
816 }
817 
818 static void
819 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
820 {
821 	void *buf;
822 	bool more;
823 
824 	if (!q->ndesc)
825 		return;
826 
827 	do {
828 		spin_lock_bh(&q->lock);
829 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
830 		spin_unlock_bh(&q->lock);
831 
832 		if (!buf)
833 			break;
834 
835 		if (!mt76_queue_is_wed_rro(q))
836 			mt76_put_page_pool_buf(buf, false);
837 	} while (1);
838 
839 	spin_lock_bh(&q->lock);
840 	if (q->rx_head) {
841 		dev_kfree_skb(q->rx_head);
842 		q->rx_head = NULL;
843 	}
844 
845 	spin_unlock_bh(&q->lock);
846 }
847 
848 static void
849 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
850 {
851 	struct mt76_queue *q = &dev->q_rx[qid];
852 
853 	if (!q->ndesc)
854 		return;
855 
856 	if (!mt76_queue_is_wed_rro_ind(q)) {
857 		int i;
858 
859 		for (i = 0; i < q->ndesc; i++)
860 			q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
861 	}
862 
863 	mt76_dma_rx_cleanup(dev, q);
864 
865 	/* reset WED rx queues */
866 	mt76_dma_wed_setup(dev, q, true);
867 
868 	if (mt76_queue_is_wed_tx_free(q))
869 		return;
870 
871 	if (mtk_wed_device_active(&dev->mmio.wed) &&
872 	    mt76_queue_is_wed_rro(q))
873 		return;
874 
875 	mt76_dma_sync_idx(dev, q);
876 	mt76_dma_rx_fill(dev, q, false);
877 }
878 
879 static void
880 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
881 		  int len, bool more, u32 info, bool allow_direct)
882 {
883 	struct sk_buff *skb = q->rx_head;
884 	struct skb_shared_info *shinfo = skb_shinfo(skb);
885 	int nr_frags = shinfo->nr_frags;
886 
887 	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
888 		struct page *page = virt_to_head_page(data);
889 		int offset = data - page_address(page) + q->buf_offset;
890 
891 		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
892 	} else {
893 		mt76_put_page_pool_buf(data, allow_direct);
894 	}
895 
896 	if (more)
897 		return;
898 
899 	q->rx_head = NULL;
900 	if (nr_frags < ARRAY_SIZE(shinfo->frags))
901 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
902 	else
903 		dev_kfree_skb(skb);
904 }
905 
906 static int
907 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
908 {
909 	int len, data_len, done = 0, dma_idx;
910 	struct sk_buff *skb;
911 	unsigned char *data;
912 	bool check_ddone = false;
913 	bool allow_direct = !mt76_queue_is_wed_rx(q);
914 	bool more;
915 
916 	if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
917 	    mt76_queue_is_wed_tx_free(q)) {
918 		dma_idx = Q_READ(q, dma_idx);
919 		check_ddone = true;
920 	}
921 
922 	while (done < budget) {
923 		bool drop = false;
924 		u32 info;
925 
926 		if (check_ddone) {
927 			if (q->tail == dma_idx)
928 				dma_idx = Q_READ(q, dma_idx);
929 
930 			if (q->tail == dma_idx)
931 				break;
932 		}
933 
934 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
935 					&drop);
936 		if (!data)
937 			break;
938 
939 		if (drop)
940 			goto free_frag;
941 
942 		if (q->rx_head)
943 			data_len = q->buf_size;
944 		else
945 			data_len = SKB_WITH_OVERHEAD(q->buf_size);
946 
947 		if (data_len < len + q->buf_offset) {
948 			dev_kfree_skb(q->rx_head);
949 			q->rx_head = NULL;
950 			goto free_frag;
951 		}
952 
953 		if (q->rx_head) {
954 			mt76_add_fragment(dev, q, data, len, more, info,
955 					  allow_direct);
956 			continue;
957 		}
958 
959 		if (!more && dev->drv->rx_check &&
960 		    !(dev->drv->rx_check(dev, data, len)))
961 			goto free_frag;
962 
963 		skb = napi_build_skb(data, q->buf_size);
964 		if (!skb)
965 			goto free_frag;
966 
967 		skb_reserve(skb, q->buf_offset);
968 		skb_mark_for_recycle(skb);
969 
970 		*(u32 *)skb->cb = info;
971 
972 		__skb_put(skb, len);
973 		done++;
974 
975 		if (more) {
976 			q->rx_head = skb;
977 			continue;
978 		}
979 
980 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
981 		continue;
982 
983 free_frag:
984 		mt76_put_page_pool_buf(data, allow_direct);
985 	}
986 
987 	mt76_dma_rx_fill(dev, q, true);
988 	return done;
989 }
990 
991 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
992 {
993 	struct mt76_dev *dev;
994 	int qid, done = 0, cur;
995 
996 	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
997 	qid = napi - dev->napi;
998 
999 	rcu_read_lock();
1000 
1001 	do {
1002 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
1003 		mt76_rx_poll_complete(dev, qid, napi);
1004 		done += cur;
1005 	} while (cur && done < budget);
1006 
1007 	rcu_read_unlock();
1008 
1009 	if (done < budget && napi_complete(napi))
1010 		dev->drv->rx_poll_complete(dev, qid);
1011 
1012 	return done;
1013 }
1014 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
1015 
1016 static int
1017 mt76_dma_init(struct mt76_dev *dev,
1018 	      int (*poll)(struct napi_struct *napi, int budget))
1019 {
1020 	int i;
1021 
1022 	init_dummy_netdev(&dev->napi_dev);
1023 	init_dummy_netdev(&dev->tx_napi_dev);
1024 	snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
1025 		 wiphy_name(dev->hw->wiphy));
1026 	dev->napi_dev.threaded = 1;
1027 	init_completion(&dev->mmio.wed_reset);
1028 	init_completion(&dev->mmio.wed_reset_complete);
1029 
1030 	mt76_for_each_q_rx(dev, i) {
1031 		netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
1032 		mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
1033 		napi_enable(&dev->napi[i]);
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 static const struct mt76_queue_ops mt76_dma_ops = {
1040 	.init = mt76_dma_init,
1041 	.alloc = mt76_dma_alloc_queue,
1042 	.reset_q = mt76_dma_queue_reset,
1043 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
1044 	.tx_queue_skb = mt76_dma_tx_queue_skb,
1045 	.tx_cleanup = mt76_dma_tx_cleanup,
1046 	.rx_cleanup = mt76_dma_rx_cleanup,
1047 	.rx_reset = mt76_dma_rx_reset,
1048 	.kick = mt76_dma_kick_queue,
1049 };
1050 
1051 void mt76_dma_attach(struct mt76_dev *dev)
1052 {
1053 	dev->queue_ops = &mt76_dma_ops;
1054 }
1055 EXPORT_SYMBOL_GPL(mt76_dma_attach);
1056 
1057 void mt76_dma_wed_reset(struct mt76_dev *dev)
1058 {
1059 	struct mt76_mmio *mmio = &dev->mmio;
1060 
1061 	if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
1062 		return;
1063 
1064 	complete(&mmio->wed_reset);
1065 
1066 	if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
1067 		dev_err(dev->dev, "wed reset complete timeout\n");
1068 }
1069 EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
1070 
1071 void mt76_dma_cleanup(struct mt76_dev *dev)
1072 {
1073 	int i;
1074 
1075 	mt76_worker_disable(&dev->tx_worker);
1076 	netif_napi_del(&dev->tx_napi);
1077 
1078 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
1079 		struct mt76_phy *phy = dev->phys[i];
1080 		int j;
1081 
1082 		if (!phy)
1083 			continue;
1084 
1085 		for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
1086 			mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
1087 	}
1088 
1089 	for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
1090 		mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
1091 
1092 	mt76_for_each_q_rx(dev, i) {
1093 		struct mt76_queue *q = &dev->q_rx[i];
1094 
1095 		if (mtk_wed_device_active(&dev->mmio.wed) &&
1096 		    mt76_queue_is_wed_rro(q))
1097 			continue;
1098 
1099 		netif_napi_del(&dev->napi[i]);
1100 		mt76_dma_rx_cleanup(dev, q);
1101 
1102 		page_pool_destroy(q->page_pool);
1103 	}
1104 
1105 	if (mtk_wed_device_active(&dev->mmio.wed))
1106 		mtk_wed_device_detach(&dev->mmio.wed);
1107 
1108 	if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1109 		mtk_wed_device_detach(&dev->mmio.wed_hif2);
1110 
1111 	mt76_free_pending_txwi(dev);
1112 	mt76_free_pending_rxwi(dev);
1113 }
1114 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1115