xref: /linux/drivers/net/wireless/mediatek/mt76/mt7996/dma.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (C) 2022 MediaTek Inc.
4  */
5 
6 #include "mt7996.h"
7 #include "../dma.h"
8 #include "mac.h"
9 
10 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
11 			  int ring_base, struct mtk_wed_device *wed)
12 {
13 	struct mt7996_dev *dev = phy->dev;
14 	u32 flags = 0;
15 
16 	if (mtk_wed_device_active(wed)) {
17 		ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
18 		idx -= MT_TXQ_ID(0);
19 
20 		if (wed == &dev->mt76.mmio.wed_hif2)
21 			flags = MT_WED_Q_TX(0);
22 		else
23 			flags = MT_WED_Q_TX(idx);
24 	}
25 
26 	if (mt76_npu_device_active(&dev->mt76))
27 		flags = MT_NPU_Q_TX(phy->mt76->band_idx);
28 
29 	return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
30 					  ring_base, wed, flags);
31 }
32 
33 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
34 {
35 	struct mt7996_dev *dev;
36 
37 	dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
38 
39 	mt76_connac_tx_cleanup(&dev->mt76);
40 	if (napi_complete_done(napi, 0))
41 		mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
42 
43 	return 0;
44 }
45 
46 static void mt7996_dma_config(struct mt7996_dev *dev)
47 {
48 #define Q_CONFIG(q, wfdma, int, id) do {		\
49 	if (wfdma)					\
50 		dev->q_wfdma_mask |= (1 << (q));	\
51 	dev->q_int_mask[(q)] = int;			\
52 	dev->q_id[(q)] = id;				\
53 } while (0)
54 
55 #define MCUQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(q, (wfdma), (int), (id))
56 #define RXQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
57 #define TXQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
58 
59 	/* rx queue */
60 	RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
61 	/* for mt7990, RX ring 1 is for SDO instead */
62 	RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
63 	RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
64 	if (mt7996_has_wa(dev))
65 		RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN,
66 			   MT7996_RXQ_MCU_WA_MAIN);
67 
68 	switch (mt76_chip(&dev->mt76)) {
69 	case MT7992_DEVICE_ID:
70 		RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
71 		RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
72 		break;
73 	case MT7990_DEVICE_ID:
74 		RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
75 		RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
76 			   MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0);
77 		if (dev->hif2)
78 			RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0,
79 				   MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1);
80 		break;
81 	case MT7996_DEVICE_ID:
82 	default:
83 		/* mt7996 band2 */
84 		RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
85 		RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
86 		break;
87 	}
88 
89 	if (mt7996_has_hwrro(dev)) {
90 		/* band0 */
91 		RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
92 			   MT7996_RXQ_RRO_BAND0);
93 		if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
94 			RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
95 				   MT_INT_RX_DONE_MSDU_PG_BAND0,
96 				   MT7996_RXQ_MSDU_PG_BAND0);
97 		if (is_mt7996(&dev->mt76)) {
98 			RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
99 				   MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
100 			/* band1 */
101 			RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
102 				   MT_INT_RX_DONE_MSDU_PG_BAND1,
103 				   MT7996_RXQ_MSDU_PG_BAND1);
104 			/* band2 */
105 			RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
106 				   MT_INT_RX_DONE_RRO_BAND2,
107 				   MT7996_RXQ_RRO_BAND2);
108 			RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
109 				   MT_INT_RX_DONE_MSDU_PG_BAND2,
110 				   MT7996_RXQ_MSDU_PG_BAND2);
111 			RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
112 				   MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
113 		} else {
114 			RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
115 				   MT_INT_RX_DONE_RRO_BAND1,
116 				   MT7996_RXQ_RRO_BAND1);
117 		}
118 
119 		if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
120 			RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
121 				   MT_INT_RX_DONE_RRO_IND,
122 				   MT7996_RXQ_RRO_IND);
123 		else
124 			RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
125 				   MT_INT_RX_DONE_RRO_RXDMAD_C,
126 				   MT7996_RXQ_RRO_RXDMAD_C);
127 	}
128 
129 	/* data tx queue */
130 	if (is_mt7996(&dev->mt76)) {
131 		TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
132 		if (dev->hif2) {
133 			/* default bn1:ring19 bn2:ring21 */
134 			TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
135 				   MT7996_TXQ_BAND1);
136 			TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
137 				   MT7996_TXQ_BAND2);
138 		} else {
139 			/* single pcie bn0/1:ring18 bn2:ring19 */
140 			TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
141 				   MT7996_TXQ_BAND1);
142 		}
143 	} else {
144 		if (dev->hif2) {
145 			/*  bn0:ring18 bn1:ring21 */
146 			TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
147 				   MT7996_TXQ_BAND0);
148 			TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
149 				   MT7996_TXQ_BAND2);
150 		} else {
151 			/* single pcie bn0:ring18 bn1:ring19 */
152 			TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
153 				   MT7996_TXQ_BAND0);
154 			TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
155 				   MT7996_TXQ_BAND1);
156 		}
157 	}
158 
159 	/* mcu tx queue */
160 	MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
161 	MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
162 	if (mt7996_has_wa(dev))
163 		MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA,
164 			    MT7996_TXQ_MCU_WA);
165 }
166 
167 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
168 {
169 	u32 ret = *base << 16 | depth;
170 
171 	*base = *base + (depth << 4);
172 
173 	return ret;
174 }
175 
176 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
177 {
178 	u16 base = 0;
179 	u8 queue, val;
180 
181 #define PREFETCH(_depth)	(__mt7996_dma_prefetch_base(&base, (_depth)))
182 	/* prefetch SRAM wrapping boundary for tx/rx ring. */
183 	/* Tx Command Rings */
184 	val = is_mt7996(&dev->mt76) ? 2 : 4;
185 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val));
186 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val));
187 	if (mt7996_has_wa(dev))
188 		mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val));
189 
190 	/* Tx Data Rings */
191 	mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
192 	if (!is_mt7996(&dev->mt76) || dev->hif2)
193 		mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
194 	if (is_mt7996(&dev->mt76))
195 		mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
196 
197 	/* Rx Event Rings */
198 	mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val));
199 	mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val));
200 
201 	/* Rx TxFreeDone From WA Rings */
202 	if (mt7996_has_wa(dev)) {
203 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val));
204 		queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
205 		mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val));
206 	}
207 
208 	/* Rx TxFreeDone From MAC Rings */
209 	val = is_mt7996(&dev->mt76) ? 4 : 8;
210 	if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
211 	    is_mt7990(&dev->mt76))
212 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
213 	if (is_mt7990(&dev->mt76) && dev->hif2)
214 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
215 	else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
216 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
217 
218 	/* Rx Data Rings */
219 	mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
220 	queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
221 	mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
222 
223 	/* Rx RRO Rings */
224 	if (mt7996_has_hwrro(dev)) {
225 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
226 		queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
227 		mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
228 
229 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val));
230 		if (is_mt7996(&dev->mt76)) {
231 			mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
232 				PREFETCH(val));
233 			mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
234 				PREFETCH(val));
235 		}
236 	}
237 #undef PREFETCH
238 
239 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
240 }
241 
242 void mt7996_dma_prefetch(struct mt7996_dev *dev)
243 {
244 	__mt7996_dma_prefetch(dev, 0);
245 	if (dev->hif2)
246 		__mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
247 }
248 
249 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
250 {
251 	u32 hif1_ofs = 0;
252 
253 	if (dev->hif2)
254 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
255 
256 	if (reset) {
257 		mt76_clear(dev, MT_WFDMA0_RST,
258 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
259 			   MT_WFDMA0_RST_LOGIC_RST);
260 
261 		mt76_set(dev, MT_WFDMA0_RST,
262 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
263 			 MT_WFDMA0_RST_LOGIC_RST);
264 
265 		if (dev->hif2) {
266 			mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
267 				   MT_WFDMA0_RST_DMASHDL_ALL_RST |
268 				   MT_WFDMA0_RST_LOGIC_RST);
269 
270 			mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
271 				 MT_WFDMA0_RST_DMASHDL_ALL_RST |
272 				 MT_WFDMA0_RST_LOGIC_RST);
273 		}
274 	}
275 
276 	/* disable */
277 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
278 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
279 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
280 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
281 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
282 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
283 
284 	if (dev->hif2) {
285 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
286 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
287 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
288 			   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
289 			   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
290 			   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
291 	}
292 }
293 
294 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
295 {
296 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
297 	u32 hif1_ofs = 0;
298 	u32 irq_mask;
299 
300 	if (dev->hif2)
301 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
302 
303 	/* enable WFDMA Tx/Rx */
304 	if (!reset) {
305 		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
306 			mt76_set(dev, MT_WFDMA0_GLO_CFG,
307 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
308 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
309 				 MT_WFDMA0_GLO_CFG_EXT_EN);
310 		else
311 			mt76_set(dev, MT_WFDMA0_GLO_CFG,
312 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
313 				 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
314 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
315 				 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
316 				 MT_WFDMA0_GLO_CFG_EXT_EN);
317 
318 		if (dev->hif2)
319 			mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
320 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
321 				 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
322 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
323 				 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
324 				 MT_WFDMA0_GLO_CFG_EXT_EN);
325 	}
326 
327 	/* enable interrupts for TX/RX rings */
328 	irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
329 
330 	if (mt7996_band_valid(dev, MT_BAND0))
331 		irq_mask |= MT_INT_BAND0_RX_DONE;
332 
333 	if (mt7996_band_valid(dev, MT_BAND1)) {
334 		irq_mask |= MT_INT_BAND1_RX_DONE;
335 		if (is_mt7992(&dev->mt76) && dev->hif2)
336 			irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
337 	}
338 
339 	if (mt7996_band_valid(dev, MT_BAND2))
340 		irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
341 
342 	if (mtk_wed_device_active(wed) && wed_reset) {
343 		u32 wed_irq_mask = irq_mask;
344 
345 		wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
346 		mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
347 		mtk_wed_device_start(wed, wed_irq_mask);
348 	}
349 
350 	if (!mt7996_has_wa(dev) || mt76_npu_device_active(&dev->mt76))
351 		irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
352 			      MT_INT_RX(MT_RXQ_BAND1_WA));
353 	irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
354 
355 	mt7996_irq_enable(dev, irq_mask);
356 	mt7996_irq_disable(dev, 0);
357 }
358 
359 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
360 {
361 	u32 hif1_ofs = 0;
362 
363 	if (dev->hif2)
364 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
365 
366 	/* reset dma idx */
367 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
368 	if (dev->hif2)
369 		mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
370 
371 	/* configure delay interrupt off */
372 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
373 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
374 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
375 
376 	if (dev->hif2) {
377 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
378 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
379 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
380 	}
381 
382 	/* configure perfetch settings */
383 	mt7996_dma_prefetch(dev);
384 
385 	/* hif wait WFDMA idle */
386 	mt76_set(dev, MT_WFDMA0_BUSY_ENA,
387 		 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
388 		 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
389 		 MT_WFDMA0_BUSY_ENA_RX_FIFO);
390 
391 	if (dev->hif2)
392 		mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
393 			 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
394 			 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
395 			 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
396 
397 	mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
398 		  MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
399 
400 	/* GLO_CFG_EXT0 */
401 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
402 		 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
403 		 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
404 
405 	/* GLO_CFG_EXT1 */
406 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
407 		 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
408 
409 	/* WFDMA rx threshold */
410 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c);
411 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008);
412 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008);
413 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20);
414 
415 	if (dev->hif2) {
416 		/* GLO_CFG_EXT0 */
417 		mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
418 			 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
419 			 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
420 
421 		/* GLO_CFG_EXT1 */
422 		mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
423 			 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
424 
425 		mt76_set(dev, MT_WFDMA_HOST_CONFIG,
426 			 MT_WFDMA_HOST_CONFIG_PDMA_BAND);
427 
428 		mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
429 			   MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
430 			   MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
431 			   MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
432 
433 		if (is_mt7996(&dev->mt76))
434 			mt76_set(dev, MT_WFDMA_HOST_CONFIG,
435 				 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
436 		else
437 			mt76_set(dev, MT_WFDMA_HOST_CONFIG,
438 				 MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
439 
440 		/* AXI read outstanding number */
441 		mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
442 			 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
443 
444 		if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
445 		    (dev->hif2->speed == PCIE_SPEED_5_0GT &&
446 		     dev->hif2->width < PCIE_LNK_X2)) {
447 			mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
448 				 WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
449 				 FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
450 					    0x1));
451 			mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
452 				 MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
453 				 FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
454 					    0x1));
455 		} else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
456 			   (dev->hif2->speed == PCIE_SPEED_8_0GT &&
457 			    dev->hif2->width < PCIE_LNK_X2)) {
458 			mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
459 				 WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
460 				 FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
461 					    0x2));
462 			mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
463 				 MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
464 				 FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
465 					    0x2));
466 		}
467 
468 		/* WFDMA rx threshold */
469 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
470 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
471 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008);
472 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20);
473 	}
474 
475 	if (dev->hif2) {
476 		/* fix hardware limitation, pcie1's rx ring3 is not available
477 		 * so, redirect pcie0 rx ring3 interrupt to pcie1
478 		 */
479 		if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
480 		    mt7996_has_hwrro(dev)) {
481 			u32 intr = is_mt7996(&dev->mt76) ?
482 				   MT_WFDMA0_RX_INT_SEL_RING6 :
483 				   MT_WFDMA0_RX_INT_SEL_RING9 |
484 				   MT_WFDMA0_RX_INT_SEL_RING5;
485 
486 			mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
487 				 intr);
488 		} else {
489 			mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
490 				 MT_WFDMA0_RX_INT_SEL_RING3);
491 		}
492 	}
493 
494 	mt7996_dma_start(dev, reset, true);
495 }
496 
497 int mt7996_dma_rro_init(struct mt7996_dev *dev)
498 {
499 	struct mt76_dev *mdev = &dev->mt76;
500 	u32 irq_mask;
501 	int ret;
502 
503 	if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
504 		/* rxdmad_c */
505 		mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
506 		if (mtk_wed_device_active(&mdev->mmio.wed))
507 			mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
508 		else if (!mt76_npu_device_active(&dev->mt76))
509 			mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
510 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
511 				       MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
512 				       MT7996_RX_RING_SIZE,
513 				       MT7996_RX_BUF_SIZE,
514 				       MT_RXQ_RRO_AP_RING_BASE);
515 		if (ret)
516 			return ret;
517 
518 		if (!mtk_wed_device_active(&mdev->mmio.wed)) {
519 			/* We need to set cpu idx pointer before resetting the
520 			 * EMI queues.
521 			 */
522 			mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
523 				&dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
524 			mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
525 					 true);
526 		}
527 		goto start_hw_rro;
528 	}
529 
530 	/* ind cmd */
531 	mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
532 	if (mtk_wed_device_active(&mdev->mmio.wed) &&
533 	    mtk_wed_get_rx_capa(&mdev->mmio.wed))
534 		mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
535 	ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
536 			       MT_RXQ_ID(MT_RXQ_RRO_IND),
537 			       MT7996_RX_RING_SIZE,
538 			       0, MT_RXQ_RRO_IND_RING_BASE);
539 	if (ret)
540 		return ret;
541 
542 	/* rx msdu page queue for band0 */
543 	mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
544 		MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
545 	if (mtk_wed_device_active(&mdev->mmio.wed) &&
546 	    mtk_wed_get_rx_capa(&mdev->mmio.wed))
547 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
548 	ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
549 			       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
550 			       MT7996_RX_RING_SIZE,
551 			       MT7996_RX_MSDU_PAGE_SIZE,
552 			       MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
553 	if (ret)
554 		return ret;
555 
556 	if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
557 		/* rx msdu page queue for band1 */
558 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
559 			MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
560 		if (mtk_wed_device_active(&mdev->mmio.wed) &&
561 		    mtk_wed_get_rx_capa(&mdev->mmio.wed))
562 			mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
563 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
564 				       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
565 				       MT7996_RX_RING_SIZE,
566 				       MT7996_RX_MSDU_PAGE_SIZE,
567 				       MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
568 		if (ret)
569 			return ret;
570 	}
571 
572 	if (mt7996_band_valid(dev, MT_BAND2)) {
573 		/* rx msdu page queue for band2 */
574 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
575 			MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
576 		if (mtk_wed_device_active(&mdev->mmio.wed) &&
577 		    mtk_wed_get_rx_capa(&mdev->mmio.wed))
578 			mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
579 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
580 				       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
581 				       MT7996_RX_RING_SIZE,
582 				       MT7996_RX_MSDU_PAGE_SIZE,
583 				       MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
584 		if (ret)
585 			return ret;
586 	}
587 
588 start_hw_rro:
589 	if (mtk_wed_device_active(&mdev->mmio.wed)) {
590 		irq_mask = mdev->mmio.irqmask |
591 			   MT_INT_TX_DONE_BAND2;
592 
593 		mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
594 		mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
595 		mt7996_irq_enable(dev, irq_mask);
596 	} else {
597 		if (is_mt7996(&dev->mt76)) {
598 			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
599 					   mt76_dma_rx_poll);
600 			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
601 					   mt76_dma_rx_poll);
602 			mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
603 					   mt76_dma_rx_poll);
604 		} else {
605 			mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
606 					   mt76_dma_rx_poll);
607 		}
608 
609 		mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
610 		if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
611 			mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
612 					   mt76_dma_rx_poll);
613 		} else {
614 			mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
615 					   mt76_dma_rx_poll);
616 			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
617 					   mt76_dma_rx_poll);
618 		}
619 
620 		if (!mt76_npu_device_active(&dev->mt76))
621 			mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
622 	}
623 
624 	return 0;
625 }
626 
627 int mt7996_dma_init(struct mt7996_dev *dev)
628 {
629 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
630 	struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
631 	u32 rx_base;
632 	u32 hif1_ofs = 0;
633 	int ret;
634 
635 	mt7996_dma_config(dev);
636 
637 	mt76_dma_attach(&dev->mt76);
638 
639 	if (dev->hif2)
640 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
641 
642 	mt7996_dma_disable(dev, true);
643 
644 	/* init tx queue */
645 	ret = mt7996_init_tx_queues(&dev->phy,
646 				    MT_TXQ_ID(dev->mphy.band_idx),
647 				    MT7996_TX_RING_SIZE,
648 				    MT_TXQ_RING_BASE(0),
649 				    wed);
650 	if (ret)
651 		return ret;
652 
653 	/* command to WM */
654 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
655 				  MT_MCUQ_ID(MT_MCUQ_WM),
656 				  MT7996_TX_MCU_RING_SIZE,
657 				  MT_MCUQ_RING_BASE(MT_MCUQ_WM));
658 	if (ret)
659 		return ret;
660 
661 	/* command to WA */
662 	if (mt7996_has_wa(dev)) {
663 		ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
664 					  MT_MCUQ_ID(MT_MCUQ_WA),
665 					  MT7996_TX_MCU_RING_SIZE,
666 					  MT_MCUQ_RING_BASE(MT_MCUQ_WA));
667 		if (ret)
668 			return ret;
669 	}
670 
671 	/* firmware download */
672 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
673 				  MT_MCUQ_ID(MT_MCUQ_FWDL),
674 				  MT7996_TX_FWDL_RING_SIZE,
675 				  MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
676 	if (ret)
677 		return ret;
678 
679 	/* event from WM */
680 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
681 			       MT_RXQ_ID(MT_RXQ_MCU),
682 			       MT7996_RX_MCU_RING_SIZE,
683 			       MT7996_RX_MCU_BUF_SIZE,
684 			       MT_RXQ_RING_BASE(MT_RXQ_MCU));
685 	if (ret)
686 		return ret;
687 
688 	/* event from WA, or SDO event for mt7990 */
689 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
690 			       MT_RXQ_ID(MT_RXQ_MCU_WA),
691 			       MT7996_RX_MCU_RING_SIZE_WA,
692 			       MT7996_RX_MCU_BUF_SIZE,
693 			       MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
694 	if (ret)
695 		return ret;
696 
697 	/* rx data queue for band0 and mt7996 band1 */
698 	if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
699 		dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
700 		dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
701 	}
702 
703 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
704 			       MT_RXQ_ID(MT_RXQ_MAIN),
705 			       MT7996_RX_RING_SIZE,
706 			       MT_RX_BUF_SIZE,
707 			       MT_RXQ_RING_BASE(MT_RXQ_MAIN));
708 	if (ret)
709 		return ret;
710 
711 	/* tx free notify event from WA for band0 */
712 	if (mtk_wed_device_active(wed) &&
713 	    ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
714 	     (is_mt7992(&dev->mt76)))) {
715 		dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
716 		dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
717 	}
718 
719 	if (mt7996_has_wa(dev)) {
720 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
721 				       MT_RXQ_ID(MT_RXQ_MAIN_WA),
722 				       MT7996_RX_MCU_RING_SIZE,
723 				       MT_RX_BUF_SIZE,
724 				       MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
725 		if (ret)
726 			return ret;
727 	} else {
728 		if (mtk_wed_device_active(wed)) {
729 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
730 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
731 		}
732 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
733 				       MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
734 				       MT7996_RX_MCU_RING_SIZE,
735 				       MT7996_RX_BUF_SIZE,
736 				       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
737 		if (ret)
738 			return ret;
739 	}
740 
741 	if (!mt7996_has_wa(dev) && dev->hif2) {
742 		if (mtk_wed_device_active(wed)) {
743 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
744 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
745 		}
746 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
747 				       MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
748 				       MT7996_RX_MCU_RING_SIZE,
749 				       MT7996_RX_BUF_SIZE,
750 				       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1));
751 		if (ret)
752 			return ret;
753 	}
754 
755 	if (mt7996_band_valid(dev, MT_BAND2)) {
756 		/* rx data queue for mt7996 band2 */
757 		rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
758 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
759 				       MT_RXQ_ID(MT_RXQ_BAND2),
760 				       MT7996_RX_RING_SIZE,
761 				       MT_RX_BUF_SIZE,
762 				       rx_base);
763 		if (ret)
764 			return ret;
765 
766 		/* tx free notify event from WA for mt7996 band2
767 		 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
768 		 */
769 		if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
770 			dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
771 			dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
772 		}
773 
774 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
775 				       MT_RXQ_ID(MT_RXQ_BAND2_WA),
776 				       MT7996_RX_MCU_RING_SIZE,
777 				       MT_RX_BUF_SIZE,
778 				       MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
779 		if (ret)
780 			return ret;
781 	} else if (mt7996_band_valid(dev, MT_BAND1)) {
782 		/* rx data queue for mt7992 band1 */
783 		rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
784 		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
785 			dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
786 			dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
787 		}
788 
789 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
790 				       MT_RXQ_ID(MT_RXQ_BAND1),
791 				       MT7996_RX_RING_SIZE,
792 				       MT_RX_BUF_SIZE,
793 				       rx_base);
794 		if (ret)
795 			return ret;
796 
797 		/* tx free notify event from WA for mt7992 band1 */
798 		if (mt7996_has_wa(dev)) {
799 			rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
800 			if (mtk_wed_device_active(wed_hif2)) {
801 				dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
802 					MT_WED_Q_TXFREE;
803 				dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
804 			}
805 
806 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
807 					       MT_RXQ_ID(MT_RXQ_BAND1_WA),
808 					       MT7996_RX_MCU_RING_SIZE,
809 					       MT_RX_BUF_SIZE,
810 					       rx_base);
811 			if (ret)
812 				return ret;
813 		}
814 	}
815 
816 	if (mt7996_has_hwrro(dev)) {
817 		/* rx rro data queue for band0 */
818 		dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
819 			MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
820 		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
821 			dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
822 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
823 				       MT_RXQ_ID(MT_RXQ_RRO_BAND0),
824 				       MT7996_RX_RING_SIZE,
825 				       MT7996_RX_BUF_SIZE,
826 				       MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
827 		if (ret)
828 			return ret;
829 
830 		if (is_mt7992(&dev->mt76)) {
831 			dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
832 				MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
833 			if (mtk_wed_device_active(wed) &&
834 			    mtk_wed_get_rx_capa(wed))
835 				dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
836 			ret = mt76_queue_alloc(dev,
837 					       &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
838 					       MT_RXQ_ID(MT_RXQ_RRO_BAND1),
839 					       MT7996_RX_RING_SIZE,
840 					       MT7996_RX_BUF_SIZE,
841 					       MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
842 			if (ret)
843 				return ret;
844 		} else {
845 			if (mtk_wed_device_active(wed)) {
846 				/* tx free notify event from WA for band0 */
847 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
848 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
849 			}
850 
851 			ret = mt76_queue_alloc(dev,
852 					       &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
853 					       MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
854 					       MT7996_RX_MCU_RING_SIZE,
855 					       MT7996_RX_BUF_SIZE,
856 					       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
857 			if (ret)
858 				return ret;
859 		}
860 
861 		if (mt7996_band_valid(dev, MT_BAND2)) {
862 			/* rx rro data queue for band2 */
863 			dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
864 				MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
865 			if (mtk_wed_device_active(wed) &&
866 			    mtk_wed_get_rx_capa(wed))
867 				dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
868 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
869 					       MT_RXQ_ID(MT_RXQ_RRO_BAND2),
870 					       MT7996_RX_RING_SIZE,
871 					       MT7996_RX_BUF_SIZE,
872 					       MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
873 			if (ret)
874 				return ret;
875 
876 			/* tx free notify event from MAC for band2 */
877 			if (mtk_wed_device_active(wed_hif2)) {
878 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
879 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
880 			}
881 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
882 					       MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
883 					       MT7996_RX_MCU_RING_SIZE,
884 					       MT7996_RX_BUF_SIZE,
885 					       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
886 			if (ret)
887 				return ret;
888 		}
889 	}
890 
891 	ret = mt76_init_queues(dev, mt76_dma_rx_poll);
892 	if (ret < 0)
893 		return ret;
894 
895 	ret = mt7996_npu_rx_queues_init(dev);
896 	if (ret)
897 		return ret;
898 
899 	netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
900 			  mt7996_poll_tx);
901 	napi_enable(&dev->mt76.tx_napi);
902 
903 	mt7996_dma_enable(dev, false);
904 
905 	return 0;
906 }
907 
908 void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
909 {
910 	struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
911 	struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
912 	u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
913 	int i;
914 
915 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
916 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
917 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
918 
919 	if (dev->hif2)
920 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
921 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
922 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
923 
924 	usleep_range(1000, 2000);
925 
926 	for (i = 0; i < __MT_TXQ_MAX; i++) {
927 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
928 		if (phy2)
929 			mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
930 		if (phy3)
931 			mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
932 	}
933 
934 	for (i = 0; i < __MT_MCUQ_MAX; i++)
935 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
936 
937 	mt76_for_each_q_rx(&dev->mt76, i)
938 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
939 
940 	mt76_tx_status_check(&dev->mt76, true);
941 
942 	if (mt7996_has_hwrro(dev) &&
943 	    !mtk_wed_device_active(&dev->mt76.mmio.wed))
944 		mt7996_rro_msdu_page_map_free(dev);
945 
946 	/* reset wfsys */
947 	if (force)
948 		mt7996_wfsys_reset(dev);
949 
950 	if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
951 		mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2);
952 
953 	if (mtk_wed_device_active(&dev->mt76.mmio.wed))
954 		mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
955 
956 	mt76_npu_disable_irqs(&dev->mt76);
957 	mt7996_dma_disable(dev, force);
958 	mt76_wed_dma_reset(&dev->mt76);
959 
960 	/* reset hw queues */
961 	for (i = 0; i < __MT_TXQ_MAX; i++) {
962 		mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]);
963 		if (phy2)
964 			mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]);
965 		if (phy3)
966 			mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]);
967 	}
968 
969 	for (i = 0; i < __MT_MCUQ_MAX; i++)
970 		mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
971 
972 	mt76_for_each_q_rx(&dev->mt76, i) {
973 		struct mt76_queue *q = &dev->mt76.q_rx[i];
974 
975 		if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
976 			if (mt76_queue_is_wed_rro(q) ||
977 			    mt76_queue_is_wed_tx_free(q)) {
978 				if (force && mt76_queue_is_wed_rro_data(q))
979 					mt76_queue_reset(dev, q, false);
980 				continue;
981 			}
982 		}
983 		mt76_queue_reset(dev, q, true);
984 	}
985 
986 	mt76_tx_status_check(&dev->mt76, true);
987 
988 	mt76_for_each_q_rx(&dev->mt76, i) {
989 		if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
990 		    (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
991 		     mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
992 			continue;
993 
994 		mt76_queue_rx_reset(dev, i);
995 	}
996 
997 	mt7996_dma_enable(dev, !force);
998 }
999 
1000 void mt7996_dma_cleanup(struct mt7996_dev *dev)
1001 {
1002 	mt7996_dma_disable(dev, true);
1003 
1004 	mt76_dma_cleanup(&dev->mt76);
1005 }
1006