xref: /linux/drivers/net/wireless/mediatek/mt76/mt7996/dma.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2022 MediaTek Inc.
4  */
5 
6 #include "mt7996.h"
7 #include "../dma.h"
8 #include "mac.h"
9 
10 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
11 			  int ring_base, struct mtk_wed_device *wed)
12 {
13 	struct mt7996_dev *dev = phy->dev;
14 	u32 flags = 0;
15 
16 	if (mtk_wed_device_active(wed)) {
17 		ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
18 		idx -= MT_TXQ_ID(0);
19 
20 		if (wed == &dev->mt76.mmio.wed_hif2)
21 			flags = MT_WED_Q_TX(0);
22 		else
23 			flags = MT_WED_Q_TX(idx);
24 	}
25 
26 	return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
27 					  ring_base, wed, flags);
28 }
29 
30 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
31 {
32 	struct mt7996_dev *dev;
33 
34 	dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
35 
36 	mt76_connac_tx_cleanup(&dev->mt76);
37 	if (napi_complete_done(napi, 0))
38 		mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
39 
40 	return 0;
41 }
42 
43 static void mt7996_dma_config(struct mt7996_dev *dev)
44 {
45 #define Q_CONFIG(q, wfdma, int, id) do {		\
46 	if (wfdma)					\
47 		dev->q_wfdma_mask |= (1 << (q));	\
48 	dev->q_int_mask[(q)] = int;			\
49 	dev->q_id[(q)] = id;				\
50 } while (0)
51 
52 #define MCUQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(q, (wfdma), (int), (id))
53 #define RXQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
54 #define TXQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
55 
56 	/* rx queue */
57 	RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
58 	/* for mt7990, RX ring 1 is for SDO instead */
59 	RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
60 	RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
61 	if (mt7996_has_wa(dev))
62 		RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN,
63 			   MT7996_RXQ_MCU_WA_MAIN);
64 
65 	switch (mt76_chip(&dev->mt76)) {
66 	case MT7992_DEVICE_ID:
67 		RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
68 		RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
69 		break;
70 	case MT7990_DEVICE_ID:
71 		RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
72 		RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
73 			   MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0);
74 		if (dev->hif2)
75 			RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0,
76 				   MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1);
77 		break;
78 	case MT7996_DEVICE_ID:
79 	default:
80 		/* mt7996 band2 */
81 		RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
82 		RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
83 		break;
84 	}
85 
86 	if (mt7996_has_hwrro(dev)) {
87 		/* band0 */
88 		RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
89 			   MT7996_RXQ_RRO_BAND0);
90 		if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
91 			RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
92 				   MT_INT_RX_DONE_MSDU_PG_BAND0,
93 				   MT7996_RXQ_MSDU_PG_BAND0);
94 		if (is_mt7996(&dev->mt76)) {
95 			RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
96 				   MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
97 			/* band1 */
98 			RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
99 				   MT_INT_RX_DONE_MSDU_PG_BAND1,
100 				   MT7996_RXQ_MSDU_PG_BAND1);
101 			/* band2 */
102 			RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
103 				   MT_INT_RX_DONE_RRO_BAND2,
104 				   MT7996_RXQ_RRO_BAND2);
105 			RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
106 				   MT_INT_RX_DONE_MSDU_PG_BAND2,
107 				   MT7996_RXQ_MSDU_PG_BAND2);
108 			RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
109 				   MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
110 		} else {
111 			RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
112 				   MT_INT_RX_DONE_RRO_BAND1,
113 				   MT7996_RXQ_RRO_BAND1);
114 		}
115 
116 		if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
117 			RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
118 				   MT_INT_RX_DONE_RRO_IND,
119 				   MT7996_RXQ_RRO_IND);
120 		else
121 			RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
122 				   MT_INT_RX_DONE_RRO_RXDMAD_C,
123 				   MT7996_RXQ_RRO_RXDMAD_C);
124 	}
125 
126 	/* data tx queue */
127 	if (is_mt7996(&dev->mt76)) {
128 		TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
129 		if (dev->hif2) {
130 			/* default bn1:ring19 bn2:ring21 */
131 			TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
132 				   MT7996_TXQ_BAND1);
133 			TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
134 				   MT7996_TXQ_BAND2);
135 		} else {
136 			/* single pcie bn0/1:ring18 bn2:ring19 */
137 			TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
138 				   MT7996_TXQ_BAND1);
139 		}
140 	} else {
141 		if (dev->hif2) {
142 			/*  bn0:ring18 bn1:ring21 */
143 			TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
144 				   MT7996_TXQ_BAND0);
145 			TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
146 				   MT7996_TXQ_BAND2);
147 		} else {
148 			/* single pcie bn0:ring18 bn1:ring19 */
149 			TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
150 				   MT7996_TXQ_BAND0);
151 			TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
152 				   MT7996_TXQ_BAND1);
153 		}
154 	}
155 
156 	/* mcu tx queue */
157 	MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
158 	MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
159 	if (mt7996_has_wa(dev))
160 		MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA,
161 			    MT7996_TXQ_MCU_WA);
162 }
163 
164 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
165 {
166 	u32 ret = *base << 16 | depth;
167 
168 	*base = *base + (depth << 4);
169 
170 	return ret;
171 }
172 
173 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
174 {
175 	u16 base = 0;
176 	u8 queue, val;
177 
178 #define PREFETCH(_depth)	(__mt7996_dma_prefetch_base(&base, (_depth)))
179 	/* prefetch SRAM wrapping boundary for tx/rx ring. */
180 	/* Tx Command Rings */
181 	val = is_mt7996(&dev->mt76) ? 2 : 4;
182 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val));
183 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val));
184 	if (mt7996_has_wa(dev))
185 		mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val));
186 
187 	/* Tx Data Rings */
188 	mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
189 	if (!is_mt7996(&dev->mt76) || dev->hif2)
190 		mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
191 	if (is_mt7996(&dev->mt76))
192 		mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
193 
194 	/* Rx Event Rings */
195 	mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val));
196 	mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val));
197 
198 	/* Rx TxFreeDone From WA Rings */
199 	if (mt7996_has_wa(dev)) {
200 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val));
201 		queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
202 		mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val));
203 	}
204 
205 	/* Rx TxFreeDone From MAC Rings */
206 	val = is_mt7996(&dev->mt76) ? 4 : 8;
207 	if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
208 	    is_mt7990(&dev->mt76))
209 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
210 	if (is_mt7990(&dev->mt76) && dev->hif2)
211 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
212 	else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
213 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
214 
215 	/* Rx Data Rings */
216 	mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
217 	queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
218 	mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
219 
220 	/* Rx RRO Rings */
221 	if (mt7996_has_hwrro(dev)) {
222 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
223 		queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
224 		mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
225 
226 		mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val));
227 		if (is_mt7996(&dev->mt76)) {
228 			mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
229 				PREFETCH(val));
230 			mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
231 				PREFETCH(val));
232 		}
233 	}
234 #undef PREFETCH
235 
236 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
237 }
238 
239 void mt7996_dma_prefetch(struct mt7996_dev *dev)
240 {
241 	__mt7996_dma_prefetch(dev, 0);
242 	if (dev->hif2)
243 		__mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
244 }
245 
246 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
247 {
248 	u32 hif1_ofs = 0;
249 
250 	if (dev->hif2)
251 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
252 
253 	if (reset) {
254 		mt76_clear(dev, MT_WFDMA0_RST,
255 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
256 			   MT_WFDMA0_RST_LOGIC_RST);
257 
258 		mt76_set(dev, MT_WFDMA0_RST,
259 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
260 			 MT_WFDMA0_RST_LOGIC_RST);
261 
262 		if (dev->hif2) {
263 			mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
264 				   MT_WFDMA0_RST_DMASHDL_ALL_RST |
265 				   MT_WFDMA0_RST_LOGIC_RST);
266 
267 			mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
268 				 MT_WFDMA0_RST_DMASHDL_ALL_RST |
269 				 MT_WFDMA0_RST_LOGIC_RST);
270 		}
271 	}
272 
273 	/* disable */
274 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
275 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
276 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
277 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
278 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
279 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
280 
281 	if (dev->hif2) {
282 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
283 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
284 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
285 			   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
286 			   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
287 			   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
288 	}
289 }
290 
291 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
292 {
293 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
294 	u32 hif1_ofs = 0;
295 	u32 irq_mask;
296 
297 	if (dev->hif2)
298 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
299 
300 	/* enable WFDMA Tx/Rx */
301 	if (!reset) {
302 		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
303 			mt76_set(dev, MT_WFDMA0_GLO_CFG,
304 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
305 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
306 				 MT_WFDMA0_GLO_CFG_EXT_EN);
307 		else
308 			mt76_set(dev, MT_WFDMA0_GLO_CFG,
309 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
310 				 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
311 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
312 				 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
313 				 MT_WFDMA0_GLO_CFG_EXT_EN);
314 
315 		if (dev->hif2)
316 			mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
317 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
318 				 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
319 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
320 				 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
321 				 MT_WFDMA0_GLO_CFG_EXT_EN);
322 	}
323 
324 	/* enable interrupts for TX/RX rings */
325 	irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
326 
327 	if (mt7996_band_valid(dev, MT_BAND0))
328 		irq_mask |= MT_INT_BAND0_RX_DONE;
329 
330 	if (mt7996_band_valid(dev, MT_BAND1)) {
331 		irq_mask |= MT_INT_BAND1_RX_DONE;
332 		if (is_mt7992(&dev->mt76) && dev->hif2)
333 			irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
334 	}
335 
336 	if (mt7996_band_valid(dev, MT_BAND2))
337 		irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
338 
339 	if (mtk_wed_device_active(wed) && wed_reset) {
340 		u32 wed_irq_mask = irq_mask;
341 
342 		wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
343 		mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
344 		mtk_wed_device_start(wed, wed_irq_mask);
345 	}
346 
347 	if (!mt7996_has_wa(dev))
348 		irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
349 			      MT_INT_RX(MT_RXQ_BAND1_WA));
350 	irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
351 
352 	mt7996_irq_enable(dev, irq_mask);
353 	mt7996_irq_disable(dev, 0);
354 }
355 
356 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
357 {
358 	u32 hif1_ofs = 0;
359 
360 	if (dev->hif2)
361 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
362 
363 	/* reset dma idx */
364 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
365 	if (dev->hif2)
366 		mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
367 
368 	/* configure delay interrupt off */
369 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
370 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
371 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
372 
373 	if (dev->hif2) {
374 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
375 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
376 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
377 	}
378 
379 	/* configure perfetch settings */
380 	mt7996_dma_prefetch(dev);
381 
382 	/* hif wait WFDMA idle */
383 	mt76_set(dev, MT_WFDMA0_BUSY_ENA,
384 		 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
385 		 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
386 		 MT_WFDMA0_BUSY_ENA_RX_FIFO);
387 
388 	if (dev->hif2)
389 		mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
390 			 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
391 			 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
392 			 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
393 
394 	mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
395 		  MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
396 
397 	/* GLO_CFG_EXT0 */
398 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
399 		 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
400 		 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
401 
402 	/* GLO_CFG_EXT1 */
403 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
404 		 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
405 
406 	/* WFDMA rx threshold */
407 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c);
408 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008);
409 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008);
410 	mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20);
411 
412 	if (dev->hif2) {
413 		/* GLO_CFG_EXT0 */
414 		mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
415 			 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
416 			 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
417 
418 		/* GLO_CFG_EXT1 */
419 		mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
420 			 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
421 
422 		mt76_set(dev, MT_WFDMA_HOST_CONFIG,
423 			 MT_WFDMA_HOST_CONFIG_PDMA_BAND);
424 
425 		mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
426 			   MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
427 			   MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
428 			   MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
429 
430 		if (is_mt7996(&dev->mt76))
431 			mt76_set(dev, MT_WFDMA_HOST_CONFIG,
432 				 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
433 		else
434 			mt76_set(dev, MT_WFDMA_HOST_CONFIG,
435 				 MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
436 
437 		/* AXI read outstanding number */
438 		mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
439 			 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
440 
441 		if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
442 		    (dev->hif2->speed == PCIE_SPEED_5_0GT &&
443 		     dev->hif2->width < PCIE_LNK_X2)) {
444 			mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
445 				 WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
446 				 FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
447 					    0x1));
448 			mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
449 				 MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
450 				 FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
451 					    0x1));
452 		} else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
453 			   (dev->hif2->speed == PCIE_SPEED_8_0GT &&
454 			    dev->hif2->width < PCIE_LNK_X2)) {
455 			mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
456 				 WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
457 				 FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
458 					    0x2));
459 			mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
460 				 MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
461 				 FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
462 					    0x2));
463 		}
464 
465 		/* WFDMA rx threshold */
466 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
467 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
468 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008);
469 		mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20);
470 	}
471 
472 	if (dev->hif2) {
473 		/* fix hardware limitation, pcie1's rx ring3 is not available
474 		 * so, redirect pcie0 rx ring3 interrupt to pcie1
475 		 */
476 		if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
477 		    mt7996_has_hwrro(dev)) {
478 			u32 intr = is_mt7996(&dev->mt76) ?
479 				   MT_WFDMA0_RX_INT_SEL_RING6 :
480 				   MT_WFDMA0_RX_INT_SEL_RING9 |
481 				   MT_WFDMA0_RX_INT_SEL_RING5;
482 
483 			mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
484 				 intr);
485 		} else {
486 			mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
487 				 MT_WFDMA0_RX_INT_SEL_RING3);
488 		}
489 	}
490 
491 	mt7996_dma_start(dev, reset, true);
492 }
493 
494 int mt7996_dma_rro_init(struct mt7996_dev *dev)
495 {
496 	struct mt76_dev *mdev = &dev->mt76;
497 	u32 irq_mask;
498 	int ret;
499 
500 	if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
501 		/* rxdmad_c */
502 		mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
503 		if (mtk_wed_device_active(&mdev->mmio.wed))
504 			mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
505 		else
506 			mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
507 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
508 				       MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
509 				       MT7996_RX_RING_SIZE,
510 				       MT7996_RX_BUF_SIZE,
511 				       MT_RXQ_RRO_AP_RING_BASE);
512 		if (ret)
513 			return ret;
514 
515 		/* We need to set cpu idx pointer before resetting the EMI
516 		 * queues.
517 		 */
518 		mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
519 			&dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
520 		mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C], true);
521 		goto start_hw_rro;
522 	}
523 
524 	/* ind cmd */
525 	mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
526 	if (mtk_wed_device_active(&mdev->mmio.wed) &&
527 	    mtk_wed_get_rx_capa(&mdev->mmio.wed))
528 		mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
529 	ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
530 			       MT_RXQ_ID(MT_RXQ_RRO_IND),
531 			       MT7996_RX_RING_SIZE,
532 			       0, MT_RXQ_RRO_IND_RING_BASE);
533 	if (ret)
534 		return ret;
535 
536 	/* rx msdu page queue for band0 */
537 	mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
538 		MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
539 	if (mtk_wed_device_active(&mdev->mmio.wed) &&
540 	    mtk_wed_get_rx_capa(&mdev->mmio.wed))
541 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
542 	ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
543 			       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
544 			       MT7996_RX_RING_SIZE,
545 			       MT7996_RX_MSDU_PAGE_SIZE,
546 			       MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
547 	if (ret)
548 		return ret;
549 
550 	if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
551 		/* rx msdu page queue for band1 */
552 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
553 			MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
554 		if (mtk_wed_device_active(&mdev->mmio.wed) &&
555 		    mtk_wed_get_rx_capa(&mdev->mmio.wed))
556 			mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
557 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
558 				       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
559 				       MT7996_RX_RING_SIZE,
560 				       MT7996_RX_MSDU_PAGE_SIZE,
561 				       MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
562 		if (ret)
563 			return ret;
564 	}
565 
566 	if (mt7996_band_valid(dev, MT_BAND2)) {
567 		/* rx msdu page queue for band2 */
568 		mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
569 			MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
570 		if (mtk_wed_device_active(&mdev->mmio.wed) &&
571 		    mtk_wed_get_rx_capa(&mdev->mmio.wed))
572 			mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
573 		ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
574 				       MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
575 				       MT7996_RX_RING_SIZE,
576 				       MT7996_RX_MSDU_PAGE_SIZE,
577 				       MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
578 		if (ret)
579 			return ret;
580 	}
581 
582 start_hw_rro:
583 	if (mtk_wed_device_active(&mdev->mmio.wed)) {
584 		irq_mask = mdev->mmio.irqmask |
585 			   MT_INT_TX_DONE_BAND2;
586 
587 		mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
588 		mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
589 		mt7996_irq_enable(dev, irq_mask);
590 	} else {
591 		if (is_mt7996(&dev->mt76)) {
592 			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
593 					   mt76_dma_rx_poll);
594 			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
595 					   mt76_dma_rx_poll);
596 			mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
597 					   mt76_dma_rx_poll);
598 		} else {
599 			mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
600 					   mt76_dma_rx_poll);
601 		}
602 
603 		mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
604 		if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
605 			mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
606 					   mt76_dma_rx_poll);
607 		} else {
608 			mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
609 					   mt76_dma_rx_poll);
610 			mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
611 					   mt76_dma_rx_poll);
612 		}
613 		mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
614 	}
615 
616 	return 0;
617 }
618 
619 int mt7996_dma_init(struct mt7996_dev *dev)
620 {
621 	struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
622 	struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
623 	u32 rx_base;
624 	u32 hif1_ofs = 0;
625 	int ret;
626 
627 	mt7996_dma_config(dev);
628 
629 	mt76_dma_attach(&dev->mt76);
630 
631 	if (dev->hif2)
632 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
633 
634 	mt7996_dma_disable(dev, true);
635 
636 	/* init tx queue */
637 	ret = mt7996_init_tx_queues(&dev->phy,
638 				    MT_TXQ_ID(dev->mphy.band_idx),
639 				    MT7996_TX_RING_SIZE,
640 				    MT_TXQ_RING_BASE(0),
641 				    wed);
642 	if (ret)
643 		return ret;
644 
645 	/* command to WM */
646 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
647 				  MT_MCUQ_ID(MT_MCUQ_WM),
648 				  MT7996_TX_MCU_RING_SIZE,
649 				  MT_MCUQ_RING_BASE(MT_MCUQ_WM));
650 	if (ret)
651 		return ret;
652 
653 	/* command to WA */
654 	if (mt7996_has_wa(dev)) {
655 		ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
656 					  MT_MCUQ_ID(MT_MCUQ_WA),
657 					  MT7996_TX_MCU_RING_SIZE,
658 					  MT_MCUQ_RING_BASE(MT_MCUQ_WA));
659 		if (ret)
660 			return ret;
661 	}
662 
663 	/* firmware download */
664 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
665 				  MT_MCUQ_ID(MT_MCUQ_FWDL),
666 				  MT7996_TX_FWDL_RING_SIZE,
667 				  MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
668 	if (ret)
669 		return ret;
670 
671 	/* event from WM */
672 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
673 			       MT_RXQ_ID(MT_RXQ_MCU),
674 			       MT7996_RX_MCU_RING_SIZE,
675 			       MT7996_RX_MCU_BUF_SIZE,
676 			       MT_RXQ_RING_BASE(MT_RXQ_MCU));
677 	if (ret)
678 		return ret;
679 
680 	/* event from WA, or SDO event for mt7990 */
681 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
682 			       MT_RXQ_ID(MT_RXQ_MCU_WA),
683 			       MT7996_RX_MCU_RING_SIZE_WA,
684 			       MT7996_RX_MCU_BUF_SIZE,
685 			       MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
686 	if (ret)
687 		return ret;
688 
689 	/* rx data queue for band0 and mt7996 band1 */
690 	if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
691 		dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
692 		dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
693 	}
694 
695 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
696 			       MT_RXQ_ID(MT_RXQ_MAIN),
697 			       MT7996_RX_RING_SIZE,
698 			       MT_RX_BUF_SIZE,
699 			       MT_RXQ_RING_BASE(MT_RXQ_MAIN));
700 	if (ret)
701 		return ret;
702 
703 	/* tx free notify event from WA for band0 */
704 	if (mtk_wed_device_active(wed) &&
705 	    ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
706 	     (is_mt7992(&dev->mt76)))) {
707 		dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
708 		dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
709 	}
710 
711 	if (mt7996_has_wa(dev)) {
712 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
713 				       MT_RXQ_ID(MT_RXQ_MAIN_WA),
714 				       MT7996_RX_MCU_RING_SIZE,
715 				       MT_RX_BUF_SIZE,
716 				       MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
717 		if (ret)
718 			return ret;
719 	} else {
720 		if (mtk_wed_device_active(wed)) {
721 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
722 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
723 		}
724 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
725 				       MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
726 				       MT7996_RX_MCU_RING_SIZE,
727 				       MT7996_RX_BUF_SIZE,
728 				       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
729 		if (ret)
730 			return ret;
731 	}
732 
733 	if (!mt7996_has_wa(dev) && dev->hif2) {
734 		if (mtk_wed_device_active(wed)) {
735 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
736 			dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
737 		}
738 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
739 				       MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
740 				       MT7996_RX_MCU_RING_SIZE,
741 				       MT7996_RX_BUF_SIZE,
742 				       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1));
743 		if (ret)
744 			return ret;
745 	}
746 
747 	if (mt7996_band_valid(dev, MT_BAND2)) {
748 		/* rx data queue for mt7996 band2 */
749 		rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
750 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
751 				       MT_RXQ_ID(MT_RXQ_BAND2),
752 				       MT7996_RX_RING_SIZE,
753 				       MT_RX_BUF_SIZE,
754 				       rx_base);
755 		if (ret)
756 			return ret;
757 
758 		/* tx free notify event from WA for mt7996 band2
759 		 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
760 		 */
761 		if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
762 			dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
763 			dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
764 		}
765 
766 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
767 				       MT_RXQ_ID(MT_RXQ_BAND2_WA),
768 				       MT7996_RX_MCU_RING_SIZE,
769 				       MT_RX_BUF_SIZE,
770 				       MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
771 		if (ret)
772 			return ret;
773 	} else if (mt7996_band_valid(dev, MT_BAND1)) {
774 		/* rx data queue for mt7992 band1 */
775 		rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
776 		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
777 			dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
778 			dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
779 		}
780 
781 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
782 				       MT_RXQ_ID(MT_RXQ_BAND1),
783 				       MT7996_RX_RING_SIZE,
784 				       MT_RX_BUF_SIZE,
785 				       rx_base);
786 		if (ret)
787 			return ret;
788 
789 		/* tx free notify event from WA for mt7992 band1 */
790 		if (mt7996_has_wa(dev)) {
791 			rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
792 			if (mtk_wed_device_active(wed_hif2)) {
793 				dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
794 					MT_WED_Q_TXFREE;
795 				dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
796 			}
797 
798 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
799 					       MT_RXQ_ID(MT_RXQ_BAND1_WA),
800 					       MT7996_RX_MCU_RING_SIZE,
801 					       MT_RX_BUF_SIZE,
802 					       rx_base);
803 			if (ret)
804 				return ret;
805 		}
806 	}
807 
808 	if (mt7996_has_hwrro(dev)) {
809 		/* rx rro data queue for band0 */
810 		dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
811 			MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
812 		if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
813 			dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
814 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
815 				       MT_RXQ_ID(MT_RXQ_RRO_BAND0),
816 				       MT7996_RX_RING_SIZE,
817 				       MT7996_RX_BUF_SIZE,
818 				       MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
819 		if (ret)
820 			return ret;
821 
822 		if (is_mt7992(&dev->mt76)) {
823 			dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
824 				MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
825 			if (mtk_wed_device_active(wed) &&
826 			    mtk_wed_get_rx_capa(wed))
827 				dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
828 			ret = mt76_queue_alloc(dev,
829 					       &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
830 					       MT_RXQ_ID(MT_RXQ_RRO_BAND1),
831 					       MT7996_RX_RING_SIZE,
832 					       MT7996_RX_BUF_SIZE,
833 					       MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
834 			if (ret)
835 				return ret;
836 		} else {
837 			if (mtk_wed_device_active(wed)) {
838 				/* tx free notify event from WA for band0 */
839 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
840 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
841 			}
842 
843 			ret = mt76_queue_alloc(dev,
844 					       &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
845 					       MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
846 					       MT7996_RX_MCU_RING_SIZE,
847 					       MT7996_RX_BUF_SIZE,
848 					       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
849 			if (ret)
850 				return ret;
851 		}
852 
853 		if (mt7996_band_valid(dev, MT_BAND2)) {
854 			/* rx rro data queue for band2 */
855 			dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
856 				MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
857 			if (mtk_wed_device_active(wed) &&
858 			    mtk_wed_get_rx_capa(wed))
859 				dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
860 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
861 					       MT_RXQ_ID(MT_RXQ_RRO_BAND2),
862 					       MT7996_RX_RING_SIZE,
863 					       MT7996_RX_BUF_SIZE,
864 					       MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
865 			if (ret)
866 				return ret;
867 
868 			/* tx free notify event from MAC for band2 */
869 			if (mtk_wed_device_active(wed_hif2)) {
870 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
871 				dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
872 			}
873 			ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
874 					       MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
875 					       MT7996_RX_MCU_RING_SIZE,
876 					       MT7996_RX_BUF_SIZE,
877 					       MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
878 			if (ret)
879 				return ret;
880 		}
881 	}
882 
883 	ret = mt76_init_queues(dev, mt76_dma_rx_poll);
884 	if (ret < 0)
885 		return ret;
886 
887 	netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
888 			  mt7996_poll_tx);
889 	napi_enable(&dev->mt76.tx_napi);
890 
891 	mt7996_dma_enable(dev, false);
892 
893 	return 0;
894 }
895 
896 void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
897 {
898 	struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
899 	struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
900 	u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
901 	int i;
902 
903 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
904 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
905 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
906 
907 	if (dev->hif2)
908 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
909 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
910 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
911 
912 	usleep_range(1000, 2000);
913 
914 	for (i = 0; i < __MT_TXQ_MAX; i++) {
915 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
916 		if (phy2)
917 			mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
918 		if (phy3)
919 			mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
920 	}
921 
922 	for (i = 0; i < __MT_MCUQ_MAX; i++)
923 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
924 
925 	mt76_for_each_q_rx(&dev->mt76, i)
926 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
927 
928 	mt76_tx_status_check(&dev->mt76, true);
929 
930 	if (mt7996_has_hwrro(dev) &&
931 	    !mtk_wed_device_active(&dev->mt76.mmio.wed))
932 		mt7996_rro_msdu_page_map_free(dev);
933 
934 	/* reset wfsys */
935 	if (force)
936 		mt7996_wfsys_reset(dev);
937 
938 	if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
939 		mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2);
940 
941 	if (mtk_wed_device_active(&dev->mt76.mmio.wed))
942 		mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
943 
944 	mt7996_dma_disable(dev, force);
945 	mt76_wed_dma_reset(&dev->mt76);
946 
947 	/* reset hw queues */
948 	for (i = 0; i < __MT_TXQ_MAX; i++) {
949 		mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]);
950 		if (phy2)
951 			mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]);
952 		if (phy3)
953 			mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]);
954 	}
955 
956 	for (i = 0; i < __MT_MCUQ_MAX; i++)
957 		mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
958 
959 	mt76_for_each_q_rx(&dev->mt76, i) {
960 		struct mt76_queue *q = &dev->mt76.q_rx[i];
961 
962 		if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
963 			if (mt76_queue_is_wed_rro(q) ||
964 			    mt76_queue_is_wed_tx_free(q)) {
965 				if (force && mt76_queue_is_wed_rro_data(q))
966 					mt76_queue_reset(dev, q, false);
967 				continue;
968 			}
969 		}
970 		mt76_queue_reset(dev, q, true);
971 	}
972 
973 	mt76_tx_status_check(&dev->mt76, true);
974 
975 	mt76_for_each_q_rx(&dev->mt76, i) {
976 		if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
977 		    (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
978 		     mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
979 			continue;
980 
981 		mt76_queue_rx_reset(dev, i);
982 	}
983 
984 	mt7996_dma_enable(dev, !force);
985 }
986 
987 void mt7996_dma_cleanup(struct mt7996_dev *dev)
988 {
989 	mt7996_dma_disable(dev, true);
990 
991 	mt76_dma_cleanup(&dev->mt76);
992 }
993