xref: /linux/drivers/net/wireless/mediatek/mt76/dma.c (revision 8a5f956a9fb7d74fff681145082acfad5afa6bb8)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9 
10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
11 
12 #define Q_READ(_q, _field) ({						\
13 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
14 	u32 _val;							\
15 	if ((_q)->flags & MT_QFLAG_WED)					\
16 		_val = mtk_wed_device_reg_read((_q)->wed,		\
17 					       ((_q)->wed_regs +	\
18 					        _offset));		\
19 	else								\
20 		_val = readl(&(_q)->regs->_field);			\
21 	_val;								\
22 })
23 
24 #define Q_WRITE(_q, _field, _val)	do {				\
25 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
26 	if ((_q)->flags & MT_QFLAG_WED)					\
27 		mtk_wed_device_reg_write((_q)->wed,			\
28 					 ((_q)->wed_regs + _offset),	\
29 					 _val);				\
30 	else								\
31 		writel(_val, &(_q)->regs->_field);			\
32 } while (0)
33 
34 #else
35 
36 #define Q_READ(_q, _field)		readl(&(_q)->regs->_field)
37 #define Q_WRITE(_q, _field, _val)	writel(_val, &(_q)->regs->_field)
38 
39 #endif
40 
41 static struct mt76_txwi_cache *
42 mt76_alloc_txwi(struct mt76_dev *dev)
43 {
44 	struct mt76_txwi_cache *t;
45 	dma_addr_t addr;
46 	u8 *txwi;
47 	int size;
48 
49 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 	txwi = kzalloc(size, GFP_ATOMIC);
51 	if (!txwi)
52 		return NULL;
53 
54 	addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
55 			      DMA_TO_DEVICE);
56 	if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
57 		kfree(txwi);
58 		return NULL;
59 	}
60 
61 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
62 	t->dma_addr = addr;
63 
64 	return t;
65 }
66 
67 static struct mt76_txwi_cache *
68 mt76_alloc_rxwi(struct mt76_dev *dev)
69 {
70 	struct mt76_txwi_cache *t;
71 
72 	t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
73 	if (!t)
74 		return NULL;
75 
76 	t->ptr = NULL;
77 	return t;
78 }
79 
80 static struct mt76_txwi_cache *
81 __mt76_get_txwi(struct mt76_dev *dev)
82 {
83 	struct mt76_txwi_cache *t = NULL;
84 
85 	spin_lock(&dev->lock);
86 	if (!list_empty(&dev->txwi_cache)) {
87 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
88 				     list);
89 		list_del(&t->list);
90 	}
91 	spin_unlock(&dev->lock);
92 
93 	return t;
94 }
95 
96 static struct mt76_txwi_cache *
97 __mt76_get_rxwi(struct mt76_dev *dev)
98 {
99 	struct mt76_txwi_cache *t = NULL;
100 
101 	spin_lock_bh(&dev->wed_lock);
102 	if (!list_empty(&dev->rxwi_cache)) {
103 		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
104 				     list);
105 		list_del(&t->list);
106 	}
107 	spin_unlock_bh(&dev->wed_lock);
108 
109 	return t;
110 }
111 
112 static struct mt76_txwi_cache *
113 mt76_get_txwi(struct mt76_dev *dev)
114 {
115 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
116 
117 	if (t)
118 		return t;
119 
120 	return mt76_alloc_txwi(dev);
121 }
122 
123 struct mt76_txwi_cache *
124 mt76_get_rxwi(struct mt76_dev *dev)
125 {
126 	struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
127 
128 	if (t)
129 		return t;
130 
131 	return mt76_alloc_rxwi(dev);
132 }
133 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
134 
135 void
136 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
137 {
138 	if (!t)
139 		return;
140 
141 	spin_lock(&dev->lock);
142 	list_add(&t->list, &dev->txwi_cache);
143 	spin_unlock(&dev->lock);
144 }
145 EXPORT_SYMBOL_GPL(mt76_put_txwi);
146 
147 void
148 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
149 {
150 	if (!t)
151 		return;
152 
153 	spin_lock_bh(&dev->wed_lock);
154 	list_add(&t->list, &dev->rxwi_cache);
155 	spin_unlock_bh(&dev->wed_lock);
156 }
157 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
158 
159 static void
160 mt76_free_pending_txwi(struct mt76_dev *dev)
161 {
162 	struct mt76_txwi_cache *t;
163 
164 	local_bh_disable();
165 	while ((t = __mt76_get_txwi(dev)) != NULL) {
166 		dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
167 				 DMA_TO_DEVICE);
168 		kfree(mt76_get_txwi_ptr(dev, t));
169 	}
170 	local_bh_enable();
171 }
172 
173 void
174 mt76_free_pending_rxwi(struct mt76_dev *dev)
175 {
176 	struct mt76_txwi_cache *t;
177 
178 	local_bh_disable();
179 	while ((t = __mt76_get_rxwi(dev)) != NULL) {
180 		if (t->ptr)
181 			mt76_put_page_pool_buf(t->ptr, false);
182 		kfree(t);
183 	}
184 	local_bh_enable();
185 }
186 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
187 
188 static void
189 mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
190 {
191 	if (!mt76_queue_is_wed_rro(q))
192 		return;
193 
194 	q->magic_cnt = 0;
195 	if (mt76_queue_is_wed_rro_ind(q)) {
196 		struct mt76_wed_rro_desc *rro_desc;
197 		u32 data1 = FIELD_PREP(RRO_IND_DATA1_MAGIC_CNT_MASK,
198 				       MT_DMA_WED_IND_CMD_CNT - 1);
199 		int i;
200 
201 		rro_desc = (struct mt76_wed_rro_desc *)q->desc;
202 		for (i = 0; i < q->ndesc; i++) {
203 			struct mt76_wed_rro_ind *cmd;
204 
205 			cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
206 			cmd->data1 = cpu_to_le32(data1);
207 		}
208 	} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
209 		struct mt76_rro_rxdmad_c *dmad = (void *)q->desc;
210 		u32 data3 = FIELD_PREP(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
211 				       MT_DMA_MAGIC_CNT - 1);
212 		int i;
213 
214 		for (i = 0; i < q->ndesc; i++)
215 			dmad[i].data3 = cpu_to_le32(data3);
216 	}
217 }
218 
219 static void
220 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
221 {
222 	Q_WRITE(q, desc_base, q->desc_dma);
223 	if (q->flags & MT_QFLAG_WED_RRO_EN)
224 		Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
225 	else
226 		Q_WRITE(q, ring_size, q->ndesc);
227 	q->head = Q_READ(q, dma_idx);
228 	q->tail = q->head;
229 }
230 
231 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
232 			  bool reset_idx)
233 {
234 	if (!q || !q->ndesc)
235 		return;
236 
237 	if (!mt76_queue_is_wed_rro_ind(q) &&
238 	    !mt76_queue_is_wed_rro_rxdmad_c(q)) {
239 		int i;
240 
241 		/* clear descriptors */
242 		for (i = 0; i < q->ndesc; i++)
243 			q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
244 	}
245 
246 	mt76_dma_queue_magic_cnt_init(dev, q);
247 	if (reset_idx) {
248 		if (mt76_queue_is_emi(q))
249 			*q->emi_cpu_idx = 0;
250 		else
251 			Q_WRITE(q, cpu_idx, 0);
252 		Q_WRITE(q, dma_idx, 0);
253 	}
254 	mt76_dma_sync_idx(dev, q);
255 }
256 
257 static int
258 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
259 		    struct mt76_queue_buf *buf, void *data)
260 {
261 	struct mt76_queue_entry *entry = &q->entry[q->head];
262 	struct mt76_txwi_cache *txwi = NULL;
263 	u32 buf1 = 0, ctrl, info = 0;
264 	struct mt76_desc *desc;
265 	int idx = q->head;
266 	int rx_token;
267 
268 	if (mt76_queue_is_wed_rro_ind(q)) {
269 		struct mt76_wed_rro_desc *rro_desc;
270 
271 		rro_desc = (struct mt76_wed_rro_desc *)q->desc;
272 		data = &rro_desc[q->head];
273 		goto done;
274 	} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
275 		data = &q->desc[q->head];
276 		goto done;
277 	}
278 
279 	desc = &q->desc[q->head];
280 	ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
281 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
282 	buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
283 #endif
284 
285 	if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
286 		txwi = mt76_get_rxwi(dev);
287 		if (!txwi)
288 			return -ENOMEM;
289 
290 		rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
291 		if (rx_token < 0) {
292 			mt76_put_rxwi(dev, txwi);
293 			return -ENOMEM;
294 		}
295 
296 		buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
297 		ctrl |= MT_DMA_CTL_TO_HOST;
298 
299 		txwi->qid = q - dev->q_rx;
300 	}
301 
302 	if (mt76_queue_is_wed_rro_msdu_pg(q) &&
303 	    dev->drv->rx_rro_add_msdu_page) {
304 		if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data))
305 			return -ENOMEM;
306 	}
307 
308 	if (q->flags & MT_QFLAG_WED_RRO_EN) {
309 		info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
310 		if ((q->head + 1) == q->ndesc)
311 			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
312 	}
313 
314 	WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
315 	WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
316 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
317 	WRITE_ONCE(desc->info, cpu_to_le32(info));
318 
319 done:
320 	entry->dma_addr[0] = buf->addr;
321 	entry->dma_len[0] = buf->len;
322 	entry->txwi = txwi;
323 	entry->buf = data;
324 	entry->wcid = 0xffff;
325 	entry->skip_buf1 = true;
326 	q->head = (q->head + 1) % q->ndesc;
327 	q->queued++;
328 
329 	return idx;
330 }
331 
332 static int
333 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
334 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
335 		 struct sk_buff *skb, void *txwi)
336 {
337 	struct mt76_queue_entry *entry;
338 	struct mt76_desc *desc;
339 	int i, idx = -1;
340 	u32 ctrl, next;
341 
342 	if (txwi) {
343 		q->entry[q->head].txwi = DMA_DUMMY_DATA;
344 		q->entry[q->head].skip_buf0 = true;
345 	}
346 
347 	for (i = 0; i < nbufs; i += 2, buf += 2) {
348 		u32 buf0 = buf[0].addr, buf1 = 0;
349 
350 		idx = q->head;
351 		next = (q->head + 1) % q->ndesc;
352 
353 		desc = &q->desc[idx];
354 		entry = &q->entry[idx];
355 
356 		if (buf[0].skip_unmap)
357 			entry->skip_buf0 = true;
358 		entry->skip_buf1 = i == nbufs - 1;
359 
360 		entry->dma_addr[0] = buf[0].addr;
361 		entry->dma_len[0] = buf[0].len;
362 
363 		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
364 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
365 		info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
366 #endif
367 		if (i < nbufs - 1) {
368 			entry->dma_addr[1] = buf[1].addr;
369 			entry->dma_len[1] = buf[1].len;
370 			buf1 = buf[1].addr;
371 			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
372 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
373 			info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
374 					   buf[1].addr >> 32);
375 #endif
376 			if (buf[1].skip_unmap)
377 				entry->skip_buf1 = true;
378 		}
379 
380 		if (i == nbufs - 1)
381 			ctrl |= MT_DMA_CTL_LAST_SEC0;
382 		else if (i == nbufs - 2)
383 			ctrl |= MT_DMA_CTL_LAST_SEC1;
384 
385 		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
386 		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
387 		WRITE_ONCE(desc->info, cpu_to_le32(info));
388 		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
389 
390 		q->head = next;
391 		q->queued++;
392 	}
393 
394 	q->entry[idx].txwi = txwi;
395 	q->entry[idx].skb = skb;
396 	q->entry[idx].wcid = 0xffff;
397 
398 	return idx;
399 }
400 
401 static void
402 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
403 			struct mt76_queue_entry *prev_e)
404 {
405 	struct mt76_queue_entry *e = &q->entry[idx];
406 
407 	if (!e->skip_buf0)
408 		dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
409 				 DMA_TO_DEVICE);
410 
411 	if (!e->skip_buf1)
412 		dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
413 				 DMA_TO_DEVICE);
414 
415 	if (e->txwi == DMA_DUMMY_DATA)
416 		e->txwi = NULL;
417 
418 	*prev_e = *e;
419 	memset(e, 0, sizeof(*e));
420 }
421 
422 static void
423 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
424 {
425 	wmb();
426 	if (mt76_queue_is_emi(q))
427 		*q->emi_cpu_idx = cpu_to_le16(q->head);
428 	else
429 		Q_WRITE(q, cpu_idx, q->head);
430 }
431 
432 static void
433 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
434 {
435 	struct mt76_queue_entry entry;
436 	int last;
437 
438 	if (!q || !q->ndesc)
439 		return;
440 
441 	spin_lock_bh(&q->cleanup_lock);
442 	if (flush)
443 		last = -1;
444 	else
445 		last = Q_READ(q, dma_idx);
446 
447 	while (q->queued > 0 && q->tail != last) {
448 		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
449 		mt76_queue_tx_complete(dev, q, &entry);
450 
451 		if (entry.txwi) {
452 			if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
453 				mt76_put_txwi(dev, entry.txwi);
454 		}
455 
456 		if (!flush && q->tail == last)
457 			last = Q_READ(q, dma_idx);
458 	}
459 	spin_unlock_bh(&q->cleanup_lock);
460 
461 	if (flush) {
462 		spin_lock_bh(&q->lock);
463 		mt76_dma_sync_idx(dev, q);
464 		mt76_dma_kick_queue(dev, q);
465 		spin_unlock_bh(&q->lock);
466 	}
467 
468 	if (!q->queued)
469 		wake_up(&dev->tx_wait);
470 }
471 
472 static void *
473 mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
474 			  int idx, int *len, bool *more)
475 {
476 	struct mt76_queue_entry *e = &q->entry[idx];
477 	struct mt76_rro_rxdmad_c *dmad = e->buf;
478 	u32 data1 = le32_to_cpu(dmad->data1);
479 	u32 data2 = le32_to_cpu(dmad->data2);
480 	struct mt76_txwi_cache *t;
481 	u16 rx_token_id;
482 	u8 ind_reason;
483 	void *buf;
484 
485 	rx_token_id = FIELD_GET(RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK, data2);
486 	t = mt76_rx_token_release(dev, rx_token_id);
487 	if (!t)
488 		return ERR_PTR(-EAGAIN);
489 
490 	q = &dev->q_rx[t->qid];
491 	dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
492 				SKB_WITH_OVERHEAD(q->buf_size),
493 				page_pool_get_dma_dir(q->page_pool));
494 
495 	if (len)
496 		*len = FIELD_GET(RRO_RXDMAD_DATA1_SDL0_MASK, data1);
497 	if (more)
498 		*more = !FIELD_GET(RRO_RXDMAD_DATA1_LS_MASK, data1);
499 
500 	buf = t->ptr;
501 	ind_reason = FIELD_GET(RRO_RXDMAD_DATA2_IND_REASON_MASK, data2);
502 	if (ind_reason == MT_DMA_WED_IND_REASON_REPEAT ||
503 	    ind_reason == MT_DMA_WED_IND_REASON_OLDPKT) {
504 		mt76_put_page_pool_buf(buf, false);
505 		buf = ERR_PTR(-EAGAIN);
506 	}
507 	t->ptr = NULL;
508 	t->dma_addr = 0;
509 
510 	mt76_put_rxwi(dev, t);
511 
512 	return buf;
513 }
514 
515 static void *
516 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
517 		 int *len, u32 *info, bool *more, bool *drop, bool flush)
518 {
519 	struct mt76_queue_entry *e = &q->entry[idx];
520 	struct mt76_desc *desc = &q->desc[idx];
521 	u32 ctrl, desc_info, buf1;
522 	void *buf = e->buf;
523 
524 	if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
525 		buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
526 
527 	if (mt76_queue_is_wed_rro(q))
528 		goto done;
529 
530 	ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
531 	if (len) {
532 		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
533 		*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
534 	}
535 
536 	desc_info = le32_to_cpu(desc->info);
537 	if (info)
538 		*info = desc_info;
539 
540 	buf1 = le32_to_cpu(desc->buf1);
541 	mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
542 
543 	if (mt76_queue_is_wed_rx(q)) {
544 		u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
545 		struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
546 
547 		if (!t)
548 			return NULL;
549 
550 		dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
551 				SKB_WITH_OVERHEAD(q->buf_size),
552 				page_pool_get_dma_dir(q->page_pool));
553 
554 		buf = t->ptr;
555 		t->dma_addr = 0;
556 		t->ptr = NULL;
557 
558 		mt76_put_rxwi(dev, t);
559 		if (drop)
560 			*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
561 	} else {
562 		dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
563 				SKB_WITH_OVERHEAD(q->buf_size),
564 				page_pool_get_dma_dir(q->page_pool));
565 	}
566 
567 done:
568 	e->buf = NULL;
569 	return buf;
570 }
571 
572 static void *
573 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
574 		 int *len, u32 *info, bool *more, bool *drop)
575 {
576 	int idx = q->tail;
577 
578 	*more = false;
579 	if (!q->queued)
580 		return NULL;
581 
582 	if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q))
583 		goto done;
584 
585 	if (mt76_queue_is_wed_rro_ind(q)) {
586 		struct mt76_wed_rro_ind *cmd;
587 		u8 magic_cnt;
588 
589 		if (flush)
590 			goto done;
591 
592 		cmd = q->entry[idx].buf;
593 		magic_cnt = FIELD_GET(RRO_IND_DATA1_MAGIC_CNT_MASK,
594 				      le32_to_cpu(cmd->data1));
595 		if (magic_cnt != q->magic_cnt)
596 			return NULL;
597 
598 		if (q->tail == q->ndesc - 1)
599 			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
600 	} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
601 		struct mt76_rro_rxdmad_c *dmad;
602 		u16 magic_cnt;
603 
604 		if (flush)
605 			goto done;
606 
607 		dmad = q->entry[idx].buf;
608 		magic_cnt = FIELD_GET(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
609 				      le32_to_cpu(dmad->data3));
610 		if (magic_cnt != q->magic_cnt)
611 			return NULL;
612 
613 		if (q->tail == q->ndesc - 1)
614 			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
615 	} else {
616 		if (flush)
617 			q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
618 		else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
619 			return NULL;
620 	}
621 done:
622 	q->tail = (q->tail + 1) % q->ndesc;
623 	q->queued--;
624 
625 	return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
626 }
627 
628 static int
629 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
630 			  struct sk_buff *skb, u32 tx_info)
631 {
632 	struct mt76_queue_buf buf = {};
633 	dma_addr_t addr;
634 
635 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
636 		goto error;
637 
638 	if (q->queued + 1 >= q->ndesc - 1)
639 		goto error;
640 
641 	addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
642 			      DMA_TO_DEVICE);
643 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
644 		goto error;
645 
646 	buf.addr = addr;
647 	buf.len = skb->len;
648 
649 	spin_lock_bh(&q->lock);
650 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
651 	mt76_dma_kick_queue(dev, q);
652 	spin_unlock_bh(&q->lock);
653 
654 	return 0;
655 
656 error:
657 	dev_kfree_skb(skb);
658 	return -ENOMEM;
659 }
660 
661 static int
662 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
663 		      enum mt76_txq_id qid, struct sk_buff *skb,
664 		      struct mt76_wcid *wcid, struct ieee80211_sta *sta)
665 {
666 	struct ieee80211_tx_status status = {
667 		.sta = sta,
668 	};
669 	struct mt76_tx_info tx_info = {
670 		.skb = skb,
671 	};
672 	struct mt76_dev *dev = phy->dev;
673 	struct ieee80211_hw *hw;
674 	int len, n = 0, ret = -ENOMEM;
675 	struct mt76_txwi_cache *t;
676 	struct sk_buff *iter;
677 	dma_addr_t addr;
678 	u8 *txwi;
679 
680 	if (test_bit(MT76_RESET, &phy->state))
681 		goto free_skb;
682 
683 	t = mt76_get_txwi(dev);
684 	if (!t)
685 		goto free_skb;
686 
687 	txwi = mt76_get_txwi_ptr(dev, t);
688 
689 	skb->prev = skb->next = NULL;
690 	if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
691 		mt76_insert_hdr_pad(skb);
692 
693 	len = skb_headlen(skb);
694 	addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
695 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
696 		goto free;
697 
698 	tx_info.buf[n].addr = t->dma_addr;
699 	tx_info.buf[n++].len = dev->drv->txwi_size;
700 	tx_info.buf[n].addr = addr;
701 	tx_info.buf[n++].len = len;
702 
703 	skb_walk_frags(skb, iter) {
704 		if (n == ARRAY_SIZE(tx_info.buf))
705 			goto unmap;
706 
707 		addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
708 				      DMA_TO_DEVICE);
709 		if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
710 			goto unmap;
711 
712 		tx_info.buf[n].addr = addr;
713 		tx_info.buf[n++].len = iter->len;
714 	}
715 	tx_info.nbuf = n;
716 
717 	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
718 		ret = -ENOMEM;
719 		goto unmap;
720 	}
721 
722 	dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
723 				DMA_TO_DEVICE);
724 	ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
725 	dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
726 				   DMA_TO_DEVICE);
727 	if (ret < 0)
728 		goto unmap;
729 
730 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
731 				tx_info.info, tx_info.skb, t);
732 
733 unmap:
734 	for (n--; n > 0; n--)
735 		dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
736 				 tx_info.buf[n].len, DMA_TO_DEVICE);
737 
738 free:
739 #ifdef CONFIG_NL80211_TESTMODE
740 	/* fix tx_done accounting on queue overflow */
741 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
742 		struct mt76_phy *phy = hw->priv;
743 
744 		if (tx_info.skb == phy->test.tx_skb)
745 			phy->test.tx_done--;
746 	}
747 #endif
748 
749 	mt76_put_txwi(dev, t);
750 
751 free_skb:
752 	status.skb = tx_info.skb;
753 	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
754 	spin_lock_bh(&dev->rx_lock);
755 	ieee80211_tx_status_ext(hw, &status);
756 	spin_unlock_bh(&dev->rx_lock);
757 
758 	return ret;
759 }
760 
761 static int
762 mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
763 		     bool allow_direct)
764 {
765 	int len = SKB_WITH_OVERHEAD(q->buf_size);
766 	int frames = 0;
767 
768 	if (!q->ndesc)
769 		return 0;
770 
771 	while (q->queued < q->ndesc - 1) {
772 		struct mt76_queue_buf qbuf = {};
773 		void *buf = NULL;
774 		int offset;
775 
776 		if (mt76_queue_is_wed_rro_ind(q) ||
777 		    mt76_queue_is_wed_rro_rxdmad_c(q))
778 			goto done;
779 
780 		buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
781 		if (!buf)
782 			break;
783 
784 		qbuf.addr = page_pool_get_dma_addr(virt_to_head_page(buf)) +
785 			    offset + q->buf_offset;
786 done:
787 		qbuf.len = len - q->buf_offset;
788 		qbuf.skip_unmap = false;
789 		if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
790 			mt76_put_page_pool_buf(buf, allow_direct);
791 			break;
792 		}
793 		frames++;
794 	}
795 
796 	if (frames || mt76_queue_is_wed_rx(q))
797 		mt76_dma_kick_queue(dev, q);
798 
799 	return frames;
800 }
801 
802 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
803 		     bool allow_direct)
804 {
805 	int frames;
806 
807 	spin_lock_bh(&q->lock);
808 	frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
809 	spin_unlock_bh(&q->lock);
810 
811 	return frames;
812 }
813 
814 static int
815 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
816 		     int idx, int n_desc, int bufsize,
817 		     u32 ring_base)
818 {
819 	int ret, size;
820 
821 	spin_lock_init(&q->lock);
822 	spin_lock_init(&q->cleanup_lock);
823 
824 	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
825 	q->ndesc = n_desc;
826 	q->buf_size = bufsize;
827 	q->hw_idx = idx;
828 
829 	size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
830 					    : sizeof(struct mt76_desc);
831 	q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
832 				      &q->desc_dma, GFP_KERNEL);
833 	if (!q->desc)
834 		return -ENOMEM;
835 
836 	mt76_dma_queue_magic_cnt_init(dev, q);
837 	size = q->ndesc * sizeof(*q->entry);
838 	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
839 	if (!q->entry)
840 		return -ENOMEM;
841 
842 	ret = mt76_create_page_pool(dev, q);
843 	if (ret)
844 		return ret;
845 
846 	ret = mt76_wed_dma_setup(dev, q, false);
847 	if (ret)
848 		return ret;
849 
850 	if (mtk_wed_device_active(&dev->mmio.wed)) {
851 		if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
852 		    mt76_queue_is_wed_tx_free(q))
853 			return 0;
854 	}
855 
856 	/* HW specific driver is supposed to reset brand-new EMI queues since
857 	 * it needs to set cpu index pointer.
858 	 */
859 	mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q));
860 
861 	return 0;
862 }
863 
864 static void
865 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
866 {
867 	void *buf;
868 	bool more;
869 
870 	if (!q->ndesc)
871 		return;
872 
873 	do {
874 		spin_lock_bh(&q->lock);
875 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
876 		spin_unlock_bh(&q->lock);
877 
878 		if (!buf)
879 			break;
880 
881 		if (!mt76_queue_is_wed_rro(q))
882 			mt76_put_page_pool_buf(buf, false);
883 	} while (1);
884 
885 	spin_lock_bh(&q->lock);
886 	if (q->rx_head) {
887 		dev_kfree_skb(q->rx_head);
888 		q->rx_head = NULL;
889 	}
890 
891 	spin_unlock_bh(&q->lock);
892 }
893 
894 static void
895 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
896 {
897 	struct mt76_queue *q = &dev->q_rx[qid];
898 
899 	if (!q->ndesc)
900 		return;
901 
902 	if (!mt76_queue_is_wed_rro_ind(q) &&
903 	    !mt76_queue_is_wed_rro_rxdmad_c(q)) {
904 		int i;
905 
906 		for (i = 0; i < q->ndesc; i++)
907 			q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
908 	}
909 
910 	mt76_dma_rx_cleanup(dev, q);
911 
912 	/* reset WED rx queues */
913 	mt76_wed_dma_setup(dev, q, true);
914 
915 	if (mt76_queue_is_wed_tx_free(q))
916 		return;
917 
918 	if (mtk_wed_device_active(&dev->mmio.wed) &&
919 	    mt76_queue_is_wed_rro(q))
920 		return;
921 
922 	mt76_dma_sync_idx(dev, q);
923 	mt76_dma_rx_fill_buf(dev, q, false);
924 }
925 
926 static void
927 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
928 		  int len, bool more, u32 info, bool allow_direct)
929 {
930 	struct sk_buff *skb = q->rx_head;
931 	struct skb_shared_info *shinfo = skb_shinfo(skb);
932 	int nr_frags = shinfo->nr_frags;
933 
934 	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
935 		struct page *page = virt_to_head_page(data);
936 		int offset = data - page_address(page) + q->buf_offset;
937 
938 		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
939 	} else {
940 		mt76_put_page_pool_buf(data, allow_direct);
941 	}
942 
943 	if (more)
944 		return;
945 
946 	q->rx_head = NULL;
947 	if (nr_frags < ARRAY_SIZE(shinfo->frags))
948 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
949 	else
950 		dev_kfree_skb(skb);
951 }
952 
953 static int
954 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
955 {
956 	int len, data_len, done = 0, dma_idx;
957 	struct sk_buff *skb;
958 	unsigned char *data;
959 	bool check_ddone = false;
960 	bool allow_direct = !mt76_queue_is_wed_rx(q);
961 	bool more;
962 
963 	if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
964 	    (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
965 	     mt76_queue_is_wed_tx_free(q))) {
966 		dma_idx = Q_READ(q, dma_idx);
967 		check_ddone = true;
968 	}
969 
970 	while (done < budget) {
971 		bool drop = false;
972 		u32 info;
973 
974 		if (check_ddone) {
975 			if (q->tail == dma_idx)
976 				dma_idx = Q_READ(q, dma_idx);
977 
978 			if (q->tail == dma_idx)
979 				break;
980 		}
981 
982 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
983 					&drop);
984 		if (!data)
985 			break;
986 
987 		if (PTR_ERR(data) == -EAGAIN) {
988 			done++;
989 			continue;
990 		}
991 
992 		if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
993 			dev->drv->rx_rro_ind_process(dev, data);
994 
995 		if (mt76_queue_is_wed_rro(q) &&
996 		    !mt76_queue_is_wed_rro_rxdmad_c(q)) {
997 			done++;
998 			continue;
999 		}
1000 
1001 		if (drop)
1002 			goto free_frag;
1003 
1004 		if (q->rx_head)
1005 			data_len = q->buf_size;
1006 		else
1007 			data_len = SKB_WITH_OVERHEAD(q->buf_size);
1008 
1009 		if (data_len < len + q->buf_offset) {
1010 			dev_kfree_skb(q->rx_head);
1011 			q->rx_head = NULL;
1012 			goto free_frag;
1013 		}
1014 
1015 		if (q->rx_head) {
1016 			mt76_add_fragment(dev, q, data, len, more, info,
1017 					  allow_direct);
1018 			continue;
1019 		}
1020 
1021 		if (!more && dev->drv->rx_check &&
1022 		    !(dev->drv->rx_check(dev, data, len)))
1023 			goto free_frag;
1024 
1025 		skb = napi_build_skb(data, q->buf_size);
1026 		if (!skb)
1027 			goto free_frag;
1028 
1029 		skb_reserve(skb, q->buf_offset);
1030 		skb_mark_for_recycle(skb);
1031 
1032 		*(u32 *)skb->cb = info;
1033 
1034 		__skb_put(skb, len);
1035 		done++;
1036 
1037 		if (more) {
1038 			q->rx_head = skb;
1039 			continue;
1040 		}
1041 
1042 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
1043 		continue;
1044 
1045 free_frag:
1046 		mt76_put_page_pool_buf(data, allow_direct);
1047 	}
1048 
1049 	mt76_dma_rx_fill(dev, q, true);
1050 	return done;
1051 }
1052 
1053 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
1054 {
1055 	struct mt76_dev *dev;
1056 	int qid, done = 0, cur;
1057 
1058 	dev = mt76_priv(napi->dev);
1059 	qid = napi - dev->napi;
1060 
1061 	rcu_read_lock();
1062 
1063 	do {
1064 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
1065 		mt76_rx_poll_complete(dev, qid, napi);
1066 		done += cur;
1067 	} while (cur && done < budget);
1068 
1069 	rcu_read_unlock();
1070 
1071 	if (done < budget && napi_complete(napi))
1072 		dev->drv->rx_poll_complete(dev, qid);
1073 
1074 	return done;
1075 }
1076 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
1077 
1078 static void
1079 mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
1080 		       int (*poll)(struct napi_struct *napi, int budget))
1081 {
1082 	netif_napi_add(dev->napi_dev, &dev->napi[qid], poll);
1083 	mt76_dma_rx_fill_buf(dev, &dev->q_rx[qid], false);
1084 	napi_enable(&dev->napi[qid]);
1085 }
1086 
1087 static int
1088 mt76_dma_init(struct mt76_dev *dev,
1089 	      int (*poll)(struct napi_struct *napi, int budget))
1090 {
1091 	struct mt76_dev **priv;
1092 	int i;
1093 
1094 	dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
1095 	if (!dev->napi_dev)
1096 		return -ENOMEM;
1097 
1098 	/* napi_dev private data points to mt76_dev parent, so, mt76_dev
1099 	 * can be retrieved given napi_dev
1100 	 */
1101 	priv = netdev_priv(dev->napi_dev);
1102 	*priv = dev;
1103 
1104 	dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
1105 	if (!dev->tx_napi_dev) {
1106 		free_netdev(dev->napi_dev);
1107 		return -ENOMEM;
1108 	}
1109 	priv = netdev_priv(dev->tx_napi_dev);
1110 	*priv = dev;
1111 
1112 	snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
1113 		 wiphy_name(dev->hw->wiphy));
1114 	dev->napi_dev->threaded = 1;
1115 	init_completion(&dev->mmio.wed_reset);
1116 	init_completion(&dev->mmio.wed_reset_complete);
1117 
1118 	mt76_for_each_q_rx(dev, i) {
1119 		if (mt76_queue_is_wed_rro(&dev->q_rx[i]))
1120 			continue;
1121 
1122 		mt76_dma_rx_queue_init(dev, i, poll);
1123 	}
1124 
1125 	return 0;
1126 }
1127 
1128 static const struct mt76_queue_ops mt76_dma_ops = {
1129 	.init = mt76_dma_init,
1130 	.alloc = mt76_dma_alloc_queue,
1131 	.reset_q = mt76_dma_queue_reset,
1132 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
1133 	.tx_queue_skb = mt76_dma_tx_queue_skb,
1134 	.tx_cleanup = mt76_dma_tx_cleanup,
1135 	.rx_queue_init = mt76_dma_rx_queue_init,
1136 	.rx_cleanup = mt76_dma_rx_cleanup,
1137 	.rx_reset = mt76_dma_rx_reset,
1138 	.kick = mt76_dma_kick_queue,
1139 };
1140 
1141 void mt76_dma_attach(struct mt76_dev *dev)
1142 {
1143 	dev->queue_ops = &mt76_dma_ops;
1144 }
1145 EXPORT_SYMBOL_GPL(mt76_dma_attach);
1146 
1147 void mt76_dma_cleanup(struct mt76_dev *dev)
1148 {
1149 	int i;
1150 
1151 	mt76_worker_disable(&dev->tx_worker);
1152 	napi_disable(&dev->tx_napi);
1153 	netif_napi_del(&dev->tx_napi);
1154 
1155 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
1156 		struct mt76_phy *phy = dev->phys[i];
1157 		int j;
1158 
1159 		if (!phy)
1160 			continue;
1161 
1162 		for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
1163 			mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
1164 	}
1165 
1166 	for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
1167 		mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
1168 
1169 	mt76_for_each_q_rx(dev, i) {
1170 		struct mt76_queue *q = &dev->q_rx[i];
1171 
1172 		if (mtk_wed_device_active(&dev->mmio.wed) &&
1173 		    mt76_queue_is_wed_rro(q))
1174 			continue;
1175 
1176 		netif_napi_del(&dev->napi[i]);
1177 		mt76_dma_rx_cleanup(dev, q);
1178 
1179 		page_pool_destroy(q->page_pool);
1180 	}
1181 
1182 	if (mtk_wed_device_active(&dev->mmio.wed))
1183 		mtk_wed_device_detach(&dev->mmio.wed);
1184 
1185 	if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1186 		mtk_wed_device_detach(&dev->mmio.wed_hif2);
1187 
1188 	mt76_free_pending_txwi(dev);
1189 	mt76_free_pending_rxwi(dev);
1190 	free_netdev(dev->napi_dev);
1191 	free_netdev(dev->tx_napi_dev);
1192 }
1193 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1194