1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include "mt7996.h"
7 #include "../dma.h"
8 #include "mac.h"
9
mt7996_init_tx_queues(struct mt7996_phy * phy,int idx,int n_desc,int ring_base,struct mtk_wed_device * wed)10 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
11 int ring_base, struct mtk_wed_device *wed)
12 {
13 struct mt7996_dev *dev = phy->dev;
14 u32 flags = 0;
15
16 if (mtk_wed_device_active(wed)) {
17 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
18 idx -= MT_TXQ_ID(0);
19
20 if (phy->mt76->band_idx == MT_BAND2)
21 flags = MT_WED_Q_TX(0);
22 else
23 flags = MT_WED_Q_TX(idx);
24 }
25
26 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc,
27 ring_base, wed, flags);
28 }
29
mt7996_poll_tx(struct napi_struct * napi,int budget)30 static int mt7996_poll_tx(struct napi_struct *napi, int budget)
31 {
32 struct mt7996_dev *dev;
33
34 dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
35
36 mt76_connac_tx_cleanup(&dev->mt76);
37 if (napi_complete_done(napi, 0))
38 mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
39
40 return 0;
41 }
42
mt7996_dma_config(struct mt7996_dev * dev)43 static void mt7996_dma_config(struct mt7996_dev *dev)
44 {
45 #define Q_CONFIG(q, wfdma, int, id) do { \
46 if (wfdma) \
47 dev->q_wfdma_mask |= (1 << (q)); \
48 dev->q_int_mask[(q)] = int; \
49 dev->q_id[(q)] = id; \
50 } while (0)
51
52 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
53 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
54 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
55
56 /* rx queue */
57 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
58 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
59
60 /* mt7996: band0 and band1, mt7992: band0 */
61 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
62 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
63
64 if (is_mt7996(&dev->mt76)) {
65 /* mt7996 band2 */
66 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
67 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
68 } else {
69 /* mt7992 band1 */
70 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1);
71 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT);
72 }
73
74 if (dev->has_rro) {
75 /* band0 */
76 RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
77 MT7996_RXQ_RRO_BAND0);
78 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
79 MT7996_RXQ_MSDU_PG_BAND0);
80 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
81 MT7996_RXQ_TXFREE0);
82 /* band1 */
83 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
84 MT7996_RXQ_MSDU_PG_BAND1);
85 /* band2 */
86 RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
87 MT7996_RXQ_RRO_BAND2);
88 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
89 MT7996_RXQ_MSDU_PG_BAND2);
90 RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
91 MT7996_RXQ_TXFREE2);
92
93 RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
94 MT7996_RXQ_RRO_IND);
95 }
96
97 /* data tx queue */
98 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
99 if (is_mt7996(&dev->mt76)) {
100 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
101 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
102 } else {
103 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
104 }
105
106 /* mcu tx queue */
107 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
108 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
109 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
110 }
111
__mt7996_dma_prefetch_base(u16 * base,u8 depth)112 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth)
113 {
114 u32 ret = *base << 16 | depth;
115
116 *base = *base + (depth << 4);
117
118 return ret;
119 }
120
__mt7996_dma_prefetch(struct mt7996_dev * dev,u32 ofs)121 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
122 {
123 u16 base = 0;
124 u8 queue;
125
126 #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth)))
127 /* prefetch SRAM wrapping boundary for tx/rx ring. */
128 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2));
129 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2));
130 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8));
131 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8));
132 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2));
133 mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8));
134 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2));
135 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2));
136 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2));
137
138 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA;
139 mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2));
140
141 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10));
142
143 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1;
144 mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10));
145
146 if (dev->has_rro) {
147 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs,
148 PREFETCH(0x10));
149 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs,
150 PREFETCH(0x10));
151 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs,
152 PREFETCH(0x4));
153 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs,
154 PREFETCH(0x4));
155 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs,
156 PREFETCH(0x4));
157 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs,
158 PREFETCH(0x4));
159 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs,
160 PREFETCH(0x4));
161 }
162 #undef PREFETCH
163
164 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
165 }
166
mt7996_dma_prefetch(struct mt7996_dev * dev)167 void mt7996_dma_prefetch(struct mt7996_dev *dev)
168 {
169 __mt7996_dma_prefetch(dev, 0);
170 if (dev->hif2)
171 __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
172 }
173
mt7996_dma_disable(struct mt7996_dev * dev,bool reset)174 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
175 {
176 u32 hif1_ofs = 0;
177
178 if (dev->hif2)
179 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
180
181 if (reset) {
182 mt76_clear(dev, MT_WFDMA0_RST,
183 MT_WFDMA0_RST_DMASHDL_ALL_RST |
184 MT_WFDMA0_RST_LOGIC_RST);
185
186 mt76_set(dev, MT_WFDMA0_RST,
187 MT_WFDMA0_RST_DMASHDL_ALL_RST |
188 MT_WFDMA0_RST_LOGIC_RST);
189
190 if (dev->hif2) {
191 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
192 MT_WFDMA0_RST_DMASHDL_ALL_RST |
193 MT_WFDMA0_RST_LOGIC_RST);
194
195 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
196 MT_WFDMA0_RST_DMASHDL_ALL_RST |
197 MT_WFDMA0_RST_LOGIC_RST);
198 }
199 }
200
201 /* disable */
202 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
203 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
204 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
205 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
206 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
207 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
208
209 if (dev->hif2) {
210 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
211 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
212 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
213 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
214 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
215 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
216 }
217 }
218
mt7996_dma_start(struct mt7996_dev * dev,bool reset,bool wed_reset)219 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
220 {
221 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
222 u32 hif1_ofs = 0;
223 u32 irq_mask;
224
225 if (dev->hif2)
226 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
227
228 /* enable WFDMA Tx/Rx */
229 if (!reset) {
230 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
231 mt76_set(dev, MT_WFDMA0_GLO_CFG,
232 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
233 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
234 MT_WFDMA0_GLO_CFG_EXT_EN);
235 else
236 mt76_set(dev, MT_WFDMA0_GLO_CFG,
237 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
238 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
239 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
240 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
241 MT_WFDMA0_GLO_CFG_EXT_EN);
242
243 if (dev->hif2)
244 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
245 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
246 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
247 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
248 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
249 MT_WFDMA0_GLO_CFG_EXT_EN);
250 }
251
252 /* enable interrupts for TX/RX rings */
253 irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
254
255 if (mt7996_band_valid(dev, MT_BAND0))
256 irq_mask |= MT_INT_BAND0_RX_DONE;
257
258 if (mt7996_band_valid(dev, MT_BAND1))
259 irq_mask |= MT_INT_BAND1_RX_DONE;
260
261 if (mt7996_band_valid(dev, MT_BAND2))
262 irq_mask |= MT_INT_BAND2_RX_DONE;
263
264 if (mtk_wed_device_active(wed) && wed_reset) {
265 u32 wed_irq_mask = irq_mask;
266
267 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
268 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
269 mtk_wed_device_start(wed, wed_irq_mask);
270 }
271
272 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
273
274 mt7996_irq_enable(dev, irq_mask);
275 mt7996_irq_disable(dev, 0);
276 }
277
mt7996_dma_enable(struct mt7996_dev * dev,bool reset)278 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
279 {
280 u32 hif1_ofs = 0;
281
282 if (dev->hif2)
283 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
284
285 /* reset dma idx */
286 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
287 if (dev->hif2)
288 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
289
290 /* configure delay interrupt off */
291 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
292 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
293 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
294
295 if (dev->hif2) {
296 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
297 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
298 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
299 }
300
301 /* configure perfetch settings */
302 mt7996_dma_prefetch(dev);
303
304 /* hif wait WFDMA idle */
305 mt76_set(dev, MT_WFDMA0_BUSY_ENA,
306 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
307 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
308 MT_WFDMA0_BUSY_ENA_RX_FIFO);
309
310 if (dev->hif2)
311 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
312 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
313 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
314 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
315
316 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
317 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
318
319 /* GLO_CFG_EXT0 */
320 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
321 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
322 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
323
324 /* GLO_CFG_EXT1 */
325 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
326 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
327
328 /* WFDMA rx threshold */
329 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c);
330 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008);
331 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008);
332 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20);
333
334 if (dev->hif2) {
335 /* GLO_CFG_EXT0 */
336 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
337 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
338 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
339
340 /* GLO_CFG_EXT1 */
341 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
342 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
343
344 mt76_set(dev, MT_WFDMA_HOST_CONFIG,
345 MT_WFDMA_HOST_CONFIG_PDMA_BAND |
346 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
347
348 /* AXI read outstanding number */
349 mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
350 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
351
352 /* WFDMA rx threshold */
353 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
354 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
355 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008);
356 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20);
357 }
358
359 if (dev->hif2) {
360 /* fix hardware limitation, pcie1's rx ring3 is not available
361 * so, redirect pcie0 rx ring3 interrupt to pcie1
362 */
363 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
364 dev->has_rro)
365 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
366 MT_WFDMA0_RX_INT_SEL_RING6);
367 else
368 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
369 MT_WFDMA0_RX_INT_SEL_RING3);
370 }
371
372 mt7996_dma_start(dev, reset, true);
373 }
374
375 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
mt7996_dma_rro_init(struct mt7996_dev * dev)376 int mt7996_dma_rro_init(struct mt7996_dev *dev)
377 {
378 struct mt76_dev *mdev = &dev->mt76;
379 u32 irq_mask;
380 int ret;
381
382 /* ind cmd */
383 mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
384 mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
385 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
386 MT_RXQ_ID(MT_RXQ_RRO_IND),
387 MT7996_RX_RING_SIZE,
388 0, MT_RXQ_RRO_IND_RING_BASE);
389 if (ret)
390 return ret;
391
392 /* rx msdu page queue for band0 */
393 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
394 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
395 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
396 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
397 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
398 MT7996_RX_RING_SIZE,
399 MT7996_RX_MSDU_PAGE_SIZE,
400 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0));
401 if (ret)
402 return ret;
403
404 if (mt7996_band_valid(dev, MT_BAND1)) {
405 /* rx msdu page queue for band1 */
406 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
407 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
408 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
409 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
410 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
411 MT7996_RX_RING_SIZE,
412 MT7996_RX_MSDU_PAGE_SIZE,
413 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1));
414 if (ret)
415 return ret;
416 }
417
418 if (mt7996_band_valid(dev, MT_BAND2)) {
419 /* rx msdu page queue for band2 */
420 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
421 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
422 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
423 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
424 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
425 MT7996_RX_RING_SIZE,
426 MT7996_RX_MSDU_PAGE_SIZE,
427 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2));
428 if (ret)
429 return ret;
430 }
431
432 irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
433 MT_INT_TX_DONE_BAND2;
434 mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
435 mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
436 mt7996_irq_enable(dev, irq_mask);
437
438 return 0;
439 }
440 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */
441
mt7996_dma_init(struct mt7996_dev * dev)442 int mt7996_dma_init(struct mt7996_dev *dev)
443 {
444 struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
445 struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
446 u32 rx_base;
447 u32 hif1_ofs = 0;
448 int ret;
449
450 mt7996_dma_config(dev);
451
452 mt76_dma_attach(&dev->mt76);
453
454 if (dev->hif2)
455 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
456
457 mt7996_dma_disable(dev, true);
458
459 /* init tx queue */
460 ret = mt7996_init_tx_queues(&dev->phy,
461 MT_TXQ_ID(dev->mphy.band_idx),
462 MT7996_TX_RING_SIZE,
463 MT_TXQ_RING_BASE(0),
464 wed);
465 if (ret)
466 return ret;
467
468 /* command to WM */
469 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
470 MT_MCUQ_ID(MT_MCUQ_WM),
471 MT7996_TX_MCU_RING_SIZE,
472 MT_MCUQ_RING_BASE(MT_MCUQ_WM));
473 if (ret)
474 return ret;
475
476 /* command to WA */
477 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
478 MT_MCUQ_ID(MT_MCUQ_WA),
479 MT7996_TX_MCU_RING_SIZE,
480 MT_MCUQ_RING_BASE(MT_MCUQ_WA));
481 if (ret)
482 return ret;
483
484 /* firmware download */
485 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
486 MT_MCUQ_ID(MT_MCUQ_FWDL),
487 MT7996_TX_FWDL_RING_SIZE,
488 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
489 if (ret)
490 return ret;
491
492 /* event from WM */
493 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
494 MT_RXQ_ID(MT_RXQ_MCU),
495 MT7996_RX_MCU_RING_SIZE,
496 MT_RX_BUF_SIZE,
497 MT_RXQ_RING_BASE(MT_RXQ_MCU));
498 if (ret)
499 return ret;
500
501 /* event from WA */
502 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
503 MT_RXQ_ID(MT_RXQ_MCU_WA),
504 MT7996_RX_MCU_RING_SIZE_WA,
505 MT_RX_BUF_SIZE,
506 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
507 if (ret)
508 return ret;
509
510 /* rx data queue for band0 and mt7996 band1 */
511 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
512 dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
513 dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
514 }
515
516 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
517 MT_RXQ_ID(MT_RXQ_MAIN),
518 MT7996_RX_RING_SIZE,
519 MT_RX_BUF_SIZE,
520 MT_RXQ_RING_BASE(MT_RXQ_MAIN));
521 if (ret)
522 return ret;
523
524 /* tx free notify event from WA for band0 */
525 if (mtk_wed_device_active(wed) && !dev->has_rro) {
526 dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
527 dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
528 }
529
530 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
531 MT_RXQ_ID(MT_RXQ_MAIN_WA),
532 MT7996_RX_MCU_RING_SIZE,
533 MT_RX_BUF_SIZE,
534 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
535 if (ret)
536 return ret;
537
538 if (mt7996_band_valid(dev, MT_BAND2)) {
539 /* rx data queue for mt7996 band2 */
540 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
541 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
542 MT_RXQ_ID(MT_RXQ_BAND2),
543 MT7996_RX_RING_SIZE,
544 MT_RX_BUF_SIZE,
545 rx_base);
546 if (ret)
547 return ret;
548
549 /* tx free notify event from WA for mt7996 band2
550 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
551 */
552 if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
553 dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
554 dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
555 }
556
557 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
558 MT_RXQ_ID(MT_RXQ_BAND2_WA),
559 MT7996_RX_MCU_RING_SIZE,
560 MT_RX_BUF_SIZE,
561 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
562 if (ret)
563 return ret;
564 } else if (mt7996_band_valid(dev, MT_BAND1)) {
565 /* rx data queue for mt7992 band1 */
566 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
567 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
568 MT_RXQ_ID(MT_RXQ_BAND1),
569 MT7996_RX_RING_SIZE,
570 MT_RX_BUF_SIZE,
571 rx_base);
572 if (ret)
573 return ret;
574
575 /* tx free notify event from WA for mt7992 band1 */
576 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
577 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
578 MT_RXQ_ID(MT_RXQ_BAND1_WA),
579 MT7996_RX_MCU_RING_SIZE,
580 MT_RX_BUF_SIZE,
581 rx_base);
582 if (ret)
583 return ret;
584 }
585
586 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
587 dev->has_rro) {
588 /* rx rro data queue for band0 */
589 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
590 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
591 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
592 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
593 MT_RXQ_ID(MT_RXQ_RRO_BAND0),
594 MT7996_RX_RING_SIZE,
595 MT7996_RX_BUF_SIZE,
596 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0));
597 if (ret)
598 return ret;
599
600 /* tx free notify event from WA for band0 */
601 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
602 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
603
604 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
605 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
606 MT7996_RX_MCU_RING_SIZE,
607 MT7996_RX_BUF_SIZE,
608 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
609 if (ret)
610 return ret;
611
612 if (mt7996_band_valid(dev, MT_BAND2)) {
613 /* rx rro data queue for band2 */
614 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
615 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
616 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
617 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
618 MT_RXQ_ID(MT_RXQ_RRO_BAND2),
619 MT7996_RX_RING_SIZE,
620 MT7996_RX_BUF_SIZE,
621 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs);
622 if (ret)
623 return ret;
624
625 /* tx free notify event from MAC for band2 */
626 if (mtk_wed_device_active(wed_hif2)) {
627 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
628 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
629 }
630 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
631 MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
632 MT7996_RX_MCU_RING_SIZE,
633 MT7996_RX_BUF_SIZE,
634 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs);
635 if (ret)
636 return ret;
637 }
638 }
639
640 ret = mt76_init_queues(dev, mt76_dma_rx_poll);
641 if (ret < 0)
642 return ret;
643
644 netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
645 mt7996_poll_tx);
646 napi_enable(&dev->mt76.tx_napi);
647
648 mt7996_dma_enable(dev, false);
649
650 return 0;
651 }
652
mt7996_dma_reset(struct mt7996_dev * dev,bool force)653 void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
654 {
655 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
656 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
657 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
658 int i;
659
660 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
661 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
662 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
663
664 if (dev->hif2)
665 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
666 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
667 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
668
669 usleep_range(1000, 2000);
670
671 for (i = 0; i < __MT_TXQ_MAX; i++) {
672 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
673 if (phy2)
674 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
675 if (phy3)
676 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
677 }
678
679 for (i = 0; i < __MT_MCUQ_MAX; i++)
680 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
681
682 mt76_for_each_q_rx(&dev->mt76, i)
683 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
684
685 mt76_tx_status_check(&dev->mt76, true);
686
687 /* reset wfsys */
688 if (force)
689 mt7996_wfsys_reset(dev);
690
691 if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
692 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2);
693
694 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
695 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
696
697 mt7996_dma_disable(dev, force);
698 mt76_wed_dma_reset(&dev->mt76);
699
700 /* reset hw queues */
701 for (i = 0; i < __MT_TXQ_MAX; i++) {
702 mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]);
703 if (phy2)
704 mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]);
705 if (phy3)
706 mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]);
707 }
708
709 for (i = 0; i < __MT_MCUQ_MAX; i++)
710 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
711
712 mt76_for_each_q_rx(&dev->mt76, i) {
713 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
714 if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
715 mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
716 continue;
717
718 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
719 }
720
721 mt76_tx_status_check(&dev->mt76, true);
722
723 mt76_for_each_q_rx(&dev->mt76, i)
724 mt76_queue_rx_reset(dev, i);
725
726 mt7996_dma_enable(dev, !force);
727 }
728
mt7996_dma_cleanup(struct mt7996_dev * dev)729 void mt7996_dma_cleanup(struct mt7996_dev *dev)
730 {
731 mt7996_dma_disable(dev, true);
732
733 mt76_dma_cleanup(&dev->mt76);
734 }
735