1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include "mt7996.h"
7 #include "../dma.h"
8 #include "mac.h"
9 #if defined(__FreeBSD__)
10 #include <linux/delay.h>
11 #endif
12
mt7996_init_tx_queues(struct mt7996_phy * phy,int idx,int n_desc,int ring_base,struct mtk_wed_device * wed)13 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
14 int ring_base, struct mtk_wed_device *wed)
15 {
16 struct mt7996_dev *dev = phy->dev;
17 u32 flags = 0;
18
19 if (mtk_wed_device_active(wed)) {
20 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
21 idx -= MT_TXQ_ID(0);
22
23 if (phy->mt76->band_idx == MT_BAND2)
24 flags = MT_WED_Q_TX(0);
25 else
26 flags = MT_WED_Q_TX(idx);
27 }
28
29 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
30 ring_base, wed, flags);
31 }
32
mt7996_poll_tx(struct napi_struct * napi,int budget)33 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
34 {
35 struct mt7996_dev *dev;
36
37 dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
38
39 mt76_connac_tx_cleanup(&dev->mt76);
40 if (napi_complete_done(napi, 0))
41 mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
42
43 return 0;
44 }
45
mt7996_dma_config(struct mt7996_dev * dev)46 static void mt7996_dma_config(struct mt7996_dev *dev)
47 {
48 #define Q_CONFIG(q, wfdma, int, id) do { \
49 if (wfdma) \
50 dev->q_wfdma_mask |= (1 << (q)); \
51 dev->q_int_mask[(q)] = int; \
52 dev->q_id[(q)] = id; \
53 } while (0)
54
55 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
56 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
57 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
58
59 /* rx queue */
60 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
61 /* for mt7990, RX ring 1 is for SDO instead */
62 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
63 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
64 if (mt7996_has_wa(dev))
65 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN,
66 MT7996_RXQ_MCU_WA_MAIN);
67
68 switch (mt76_chip(&dev->mt76)) {
69 case MT7992_DEVICE_ID:
70 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
71 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
72 break;
73 case MT7990_DEVICE_ID:
74 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
75 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
76 MT_INT_RX_TXFREE_BAND0_MT7990, MT7990_RXQ_TXFREE0);
77 if (dev->hif2)
78 RXQ_CONFIG(MT_RXQ_TXFREE_BAND1, WFDMA0,
79 MT_INT_RX_TXFREE_BAND1_MT7990, MT7990_RXQ_TXFREE1);
80 break;
81 case MT7996_DEVICE_ID:
82 default:
83 /* mt7996 band2 */
84 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
85 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
86 break;
87 }
88
89 if (dev->has_rro) {
90 /* band0 */
91 RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
92 MT7996_RXQ_RRO_BAND0);
93 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
94 MT7996_RXQ_MSDU_PG_BAND0);
95 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
96 MT7996_RXQ_TXFREE0);
97 /* band1 */
98 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
99 MT7996_RXQ_MSDU_PG_BAND1);
100 /* band2 */
101 RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
102 MT7996_RXQ_RRO_BAND2);
103 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
104 MT7996_RXQ_MSDU_PG_BAND2);
105 RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
106 MT7996_RXQ_TXFREE2);
107
108 RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
109 MT7996_RXQ_RRO_IND);
110 }
111
112 /* data tx queue */
113 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
114 if (is_mt7996(&dev->mt76)) {
115 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
116 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
117 } else {
118 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
119 }
120
121 /* mcu tx queue */
122 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
123 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
124 if (mt7996_has_wa(dev))
125 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA,
126 MT7996_TXQ_MCU_WA);
127 }
128
__mt7996_dma_prefetch_base(u16 * base,u8 depth)129 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
130 {
131 u32 ret = *base << 16 | depth;
132
133 *base = *base + (depth << 4);
134
135 return ret;
136 }
137
__mt7996_dma_prefetch(struct mt7996_dev * dev,u32 ofs)138 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
139 {
140 u16 base = 0;
141 u8 queue, val;
142
143 #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
144 /* prefetch SRAM wrapping boundary for tx/rx ring. */
145 /* Tx Command Rings */
146 val = is_mt7996(&dev->mt76) ? 2 : 4;
147 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(val));
148 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(val));
149 if (mt7996_has_wa(dev))
150 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(val));
151
152 /* Tx Data Rings */
153 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
154 if (!is_mt7996(&dev->mt76) || dev->hif2)
155 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
156 if (is_mt7996(&dev->mt76))
157 mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
158
159 /* Rx Event Rings */
160 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(val));
161 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(val));
162
163 /* Rx TxFreeDone From WA Rings */
164 if (mt7996_has_wa(dev)) {
165 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(val));
166 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
167 mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(val));
168 }
169
170 /* Rx TxFreeDone From MAC Rings */
171 val = is_mt7996(&dev->mt76) ? 4 : 8;
172 if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
173 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
174 if (is_mt7990(&dev->mt76) && dev->hif2)
175 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
176 else if (is_mt7996(&dev->mt76) && dev->has_rro)
177 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
178
179 /* Rx Data Rings */
180 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
181 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
182 mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
183
184 /* Rx RRO Rings */
185 if (dev->has_rro) {
186 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
187 queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
188 mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
189
190 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, PREFETCH(val));
191 if (is_mt7996(&dev->mt76)) {
192 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
193 PREFETCH(val));
194 mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
195 PREFETCH(val));
196 }
197 }
198 #undef PREFETCH
199
200 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
201 }
202
mt7996_dma_prefetch(struct mt7996_dev * dev)203 void mt7996_dma_prefetch(struct mt7996_dev *dev)
204 {
205 __mt7996_dma_prefetch(dev, 0);
206 if (dev->hif2)
207 __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
208 }
209
mt7996_dma_disable(struct mt7996_dev * dev,bool reset)210 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
211 {
212 u32 hif1_ofs = 0;
213
214 if (dev->hif2)
215 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
216
217 if (reset) {
218 mt76_clear(dev, MT_WFDMA0_RST,
219 MT_WFDMA0_RST_DMASHDL_ALL_RST |
220 MT_WFDMA0_RST_LOGIC_RST);
221
222 mt76_set(dev, MT_WFDMA0_RST,
223 MT_WFDMA0_RST_DMASHDL_ALL_RST |
224 MT_WFDMA0_RST_LOGIC_RST);
225
226 if (dev->hif2) {
227 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
228 MT_WFDMA0_RST_DMASHDL_ALL_RST |
229 MT_WFDMA0_RST_LOGIC_RST);
230
231 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
232 MT_WFDMA0_RST_DMASHDL_ALL_RST |
233 MT_WFDMA0_RST_LOGIC_RST);
234 }
235 }
236
237 /* disable */
238 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
239 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
240 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
241 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
242 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
243 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
244
245 if (dev->hif2) {
246 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
247 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
248 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
249 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
250 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
251 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
252 }
253 }
254
mt7996_dma_start(struct mt7996_dev * dev,bool reset,bool wed_reset)255 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
256 {
257 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
258 u32 hif1_ofs = 0;
259 u32 irq_mask;
260
261 if (dev->hif2)
262 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
263
264 /* enable WFDMA Tx/Rx */
265 if (!reset) {
266 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
267 mt76_set(dev, MT_WFDMA0_GLO_CFG,
268 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
269 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
270 MT_WFDMA0_GLO_CFG_EXT_EN);
271 else
272 mt76_set(dev, MT_WFDMA0_GLO_CFG,
273 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
274 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
275 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
276 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
277 MT_WFDMA0_GLO_CFG_EXT_EN);
278
279 if (dev->hif2)
280 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
281 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
282 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
283 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
284 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
285 MT_WFDMA0_GLO_CFG_EXT_EN);
286 }
287
288 /* enable interrupts for TX/RX rings */
289 irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
290
291 if (mt7996_band_valid(dev, MT_BAND0))
292 irq_mask |= MT_INT_BAND0_RX_DONE;
293
294 if (mt7996_band_valid(dev, MT_BAND1))
295 irq_mask |= MT_INT_BAND1_RX_DONE;
296
297 if (mt7996_band_valid(dev, MT_BAND2))
298 irq_mask |= MT_INT_BAND2_RX_DONE;
299
300 if (mtk_wed_device_active(wed) && wed_reset) {
301 u32 wed_irq_mask = irq_mask;
302
303 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
304 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
305 mtk_wed_device_start(wed, wed_irq_mask);
306 }
307
308 if (!mt7996_has_wa(dev))
309 irq_mask &= ~(MT_INT_RX(MT_RXQ_MAIN_WA) |
310 MT_INT_RX(MT_RXQ_BAND1_WA));
311 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
312
313 mt7996_irq_enable(dev, irq_mask);
314 mt7996_irq_disable(dev, 0);
315 }
316
mt7996_dma_enable(struct mt7996_dev * dev,bool reset)317 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
318 {
319 u32 hif1_ofs = 0;
320
321 if (dev->hif2)
322 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
323
324 /* reset dma idx */
325 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
326 if (dev->hif2)
327 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
328
329 /* configure delay interrupt off */
330 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
331 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
332 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
333
334 if (dev->hif2) {
335 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
336 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
337 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
338 }
339
340 /* configure perfetch settings */
341 mt7996_dma_prefetch(dev);
342
343 /* hif wait WFDMA idle */
344 mt76_set(dev, MT_WFDMA0_BUSY_ENA,
345 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
346 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
347 MT_WFDMA0_BUSY_ENA_RX_FIFO);
348
349 if (dev->hif2)
350 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
351 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
352 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
353 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
354
355 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
356 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
357
358 /* GLO_CFG_EXT0 */
359 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
360 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
361 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
362
363 /* GLO_CFG_EXT1 */
364 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
365 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
366
367 /* WFDMA rx threshold */
368 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c);
369 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008);
370 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008);
371 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20);
372
373 if (dev->hif2) {
374 /* GLO_CFG_EXT0 */
375 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
376 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
377 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
378
379 /* GLO_CFG_EXT1 */
380 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
381 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
382
383 mt76_set(dev, MT_WFDMA_HOST_CONFIG,
384 MT_WFDMA_HOST_CONFIG_PDMA_BAND |
385 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
386
387 /* AXI read outstanding number */
388 mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
389 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
390
391 /* WFDMA rx threshold */
392 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
393 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
394 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008);
395 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20);
396 }
397
398 if (dev->hif2) {
399 /* fix hardware limitation, pcie1's rx ring3 is not available
400 * so, redirect pcie0 rx ring3 interrupt to pcie1
401 */
402 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
403 dev->has_rro)
404 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
405 MT_WFDMA0_RX_INT_SEL_RING6);
406 else
407 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
408 MT_WFDMA0_RX_INT_SEL_RING3);
409 }
410
411 mt7996_dma_start(dev, reset, true);
412 }
413
414 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
mt7996_dma_rro_init(struct mt7996_dev * dev)415 int mt7996_dma_rro_init(struct mt7996_dev *dev)
416 {
417 struct mt76_dev *mdev = &dev->mt76;
418 u32 irq_mask;
419 int ret;
420
421 /* ind cmd */
422 mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
423 mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
424 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
425 MT_RXQ_ID(MT_RXQ_RRO_IND),
426 MT7996_RX_RING_SIZE,
427 0, MT_RXQ_RRO_IND_RING_BASE);
428 if (ret)
429 return ret;
430
431 /* rx msdu page queue for band0 */
432 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
433 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
434 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
435 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
436 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
437 MT7996_RX_RING_SIZE,
438 MT7996_RX_MSDU_PAGE_SIZE,
439 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
440 if (ret)
441 return ret;
442
443 if (mt7996_band_valid(dev, MT_BAND1)) {
444 /* rx msdu page queue for band1 */
445 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
446 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
447 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
448 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
449 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
450 MT7996_RX_RING_SIZE,
451 MT7996_RX_MSDU_PAGE_SIZE,
452 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
453 if (ret)
454 return ret;
455 }
456
457 if (mt7996_band_valid(dev, MT_BAND2)) {
458 /* rx msdu page queue for band2 */
459 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
460 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
461 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
462 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
463 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
464 MT7996_RX_RING_SIZE,
465 MT7996_RX_MSDU_PAGE_SIZE,
466 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
467 if (ret)
468 return ret;
469 }
470
471 irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
472 MT_INT_TX_DONE_BAND2;
473 mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
474 mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
475 mt7996_irq_enable(dev, irq_mask);
476
477 return 0;
478 }
479 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */
480
mt7996_dma_init(struct mt7996_dev * dev)481 int mt7996_dma_init(struct mt7996_dev *dev)
482 {
483 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
484 struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
485 u32 rx_base;
486 u32 hif1_ofs = 0;
487 int ret;
488
489 mt7996_dma_config(dev);
490
491 mt76_dma_attach(&dev->mt76);
492
493 if (dev->hif2)
494 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
495
496 mt7996_dma_disable(dev, true);
497
498 /* init tx queue */
499 ret = mt7996_init_tx_queues(&dev->phy,
500 MT_TXQ_ID(dev->mphy.band_idx),
501 MT7996_TX_RING_SIZE,
502 MT_TXQ_RING_BASE(0),
503 wed);
504 if (ret)
505 return ret;
506
507 /* command to WM */
508 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
509 MT_MCUQ_ID(MT_MCUQ_WM),
510 MT7996_TX_MCU_RING_SIZE,
511 MT_MCUQ_RING_BASE(MT_MCUQ_WM));
512 if (ret)
513 return ret;
514
515 /* command to WA */
516 if (mt7996_has_wa(dev)) {
517 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
518 MT_MCUQ_ID(MT_MCUQ_WA),
519 MT7996_TX_MCU_RING_SIZE,
520 MT_MCUQ_RING_BASE(MT_MCUQ_WA));
521 if (ret)
522 return ret;
523 }
524
525 /* firmware download */
526 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
527 MT_MCUQ_ID(MT_MCUQ_FWDL),
528 MT7996_TX_FWDL_RING_SIZE,
529 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
530 if (ret)
531 return ret;
532
533 /* event from WM */
534 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
535 MT_RXQ_ID(MT_RXQ_MCU),
536 MT7996_RX_MCU_RING_SIZE,
537 MT7996_RX_MCU_BUF_SIZE,
538 MT_RXQ_RING_BASE(MT_RXQ_MCU));
539 if (ret)
540 return ret;
541
542 /* event from WA, or SDO event for mt7990 */
543 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
544 MT_RXQ_ID(MT_RXQ_MCU_WA),
545 MT7996_RX_MCU_RING_SIZE_WA,
546 MT7996_RX_MCU_BUF_SIZE,
547 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
548 if (ret)
549 return ret;
550
551 /* rx data queue for band0 and mt7996 band1 */
552 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
553 dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
554 dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
555 }
556
557 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
558 MT_RXQ_ID(MT_RXQ_MAIN),
559 MT7996_RX_RING_SIZE,
560 MT_RX_BUF_SIZE,
561 MT_RXQ_RING_BASE(MT_RXQ_MAIN));
562 if (ret)
563 return ret;
564
565 /* tx free notify event from WA for band0 */
566 if (mtk_wed_device_active(wed) && !dev->has_rro) {
567 dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
568 dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
569 }
570
571 if (mt7996_has_wa(dev)) {
572 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
573 MT_RXQ_ID(MT_RXQ_MAIN_WA),
574 MT7996_RX_MCU_RING_SIZE,
575 MT_RX_BUF_SIZE,
576 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
577 if (ret)
578 return ret;
579 } else {
580 if (mtk_wed_device_active(wed)) {
581 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
582 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
583 }
584 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
585 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
586 MT7996_RX_MCU_RING_SIZE,
587 MT7996_RX_BUF_SIZE,
588 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
589 if (ret)
590 return ret;
591 }
592
593 if (!mt7996_has_wa(dev) && dev->hif2) {
594 if (mtk_wed_device_active(wed)) {
595 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
596 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
597 }
598 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
599 MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
600 MT7996_RX_MCU_RING_SIZE,
601 MT7996_RX_BUF_SIZE,
602 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1));
603 if (ret)
604 return ret;
605 }
606
607 if (mt7996_band_valid(dev, MT_BAND2)) {
608 /* rx data queue for mt7996 band2 */
609 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
610 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
611 MT_RXQ_ID(MT_RXQ_BAND2),
612 MT7996_RX_RING_SIZE,
613 MT_RX_BUF_SIZE,
614 rx_base);
615 if (ret)
616 return ret;
617
618 /* tx free notify event from WA for mt7996 band2
619 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
620 */
621 if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
622 dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
623 dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
624 }
625
626 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
627 MT_RXQ_ID(MT_RXQ_BAND2_WA),
628 MT7996_RX_MCU_RING_SIZE,
629 MT_RX_BUF_SIZE,
630 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
631 if (ret)
632 return ret;
633 } else if (mt7996_band_valid(dev, MT_BAND1)) {
634 /* rx data queue for mt7992 band1 */
635 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
636 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
637 MT_RXQ_ID(MT_RXQ_BAND1),
638 MT7996_RX_RING_SIZE,
639 MT_RX_BUF_SIZE,
640 rx_base);
641 if (ret)
642 return ret;
643
644 /* tx free notify event from WA for mt7992 band1 */
645 if (mt7996_has_wa(dev)) {
646 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
647 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
648 MT_RXQ_ID(MT_RXQ_BAND1_WA),
649 MT7996_RX_MCU_RING_SIZE,
650 MT_RX_BUF_SIZE,
651 rx_base);
652 if (ret)
653 return ret;
654 }
655 }
656
657 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
658 dev->has_rro) {
659 /* rx rro data queue for band0 */
660 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
661 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
662 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
663 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
664 MT_RXQ_ID(MT_RXQ_RRO_BAND0),
665 MT7996_RX_RING_SIZE,
666 MT7996_RX_BUF_SIZE,
667 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
668 if (ret)
669 return ret;
670
671 /* tx free notify event from WA for band0 */
672 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
673 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
674
675 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
676 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
677 MT7996_RX_MCU_RING_SIZE,
678 MT7996_RX_BUF_SIZE,
679 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
680 if (ret)
681 return ret;
682
683 if (mt7996_band_valid(dev, MT_BAND2)) {
684 /* rx rro data queue for band2 */
685 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
686 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
687 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
688 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
689 MT_RXQ_ID(MT_RXQ_RRO_BAND2),
690 MT7996_RX_RING_SIZE,
691 MT7996_RX_BUF_SIZE,
692 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
693 if (ret)
694 return ret;
695
696 /* tx free notify event from MAC for band2 */
697 if (mtk_wed_device_active(wed_hif2)) {
698 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
699 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
700 }
701 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
702 MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
703 MT7996_RX_MCU_RING_SIZE,
704 MT7996_RX_BUF_SIZE,
705 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
706 if (ret)
707 return ret;
708 }
709 }
710
711 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
712 if (ret < 0)
713 return ret;
714
715 netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
716 mt7996_poll_tx);
717 napi_enable(&dev->mt76.tx_napi);
718
719 mt7996_dma_enable(dev, false);
720
721 return 0;
722 }
723
mt7996_dma_reset(struct mt7996_dev * dev,bool force)724 void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
725 {
726 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
727 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
728 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
729 int i;
730
731 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
732 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
733 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
734
735 if (dev->hif2)
736 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
737 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
738 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
739
740 usleep_range(1000, 2000);
741
742 for (i = 0; i < __MT_TXQ_MAX; i++) {
743 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
744 if (phy2)
745 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
746 if (phy3)
747 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
748 }
749
750 for (i = 0; i < __MT_MCUQ_MAX; i++)
751 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
752
753 mt76_for_each_q_rx(&dev->mt76, i)
754 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
755
756 mt76_tx_status_check(&dev->mt76, true);
757
758 /* reset wfsys */
759 if (force)
760 mt7996_wfsys_reset(dev);
761
762 if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
763 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2);
764
765 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
766 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
767
768 mt7996_dma_disable(dev, force);
769 mt76_wed_dma_reset(&dev->mt76);
770
771 /* reset hw queues */
772 for (i = 0; i < __MT_TXQ_MAX; i++) {
773 mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]);
774 if (phy2)
775 mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]);
776 if (phy3)
777 mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]);
778 }
779
780 for (i = 0; i < __MT_MCUQ_MAX; i++)
781 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
782
783 mt76_for_each_q_rx(&dev->mt76, i) {
784 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
785 if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
786 mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
787 continue;
788
789 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
790 }
791
792 mt76_tx_status_check(&dev->mt76, true);
793
794 mt76_for_each_q_rx(&dev->mt76, i)
795 mt76_queue_rx_reset(dev, i);
796
797 mt7996_dma_enable(dev, !force);
798 }
799
mt7996_dma_cleanup(struct mt7996_dev * dev)800 void mt7996_dma_cleanup(struct mt7996_dev *dev)
801 {
802 mt7996_dma_disable(dev, true);
803
804 mt76_dma_cleanup(&dev->mt76);
805 }
806