1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
3
4 #include <linux/module.h>
5 #include <linux/firmware.h>
6 #if defined(__FreeBSD__)
7 #include <linux/delay.h>
8 #endif
9
10 #include "mt792x.h"
11 #include "dma.h"
12 #include "trace.h"
13
mt792x_irq_handler(int irq,void * dev_instance)14 irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
15 {
16 struct mt792x_dev *dev = dev_instance;
17
18 if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
19 return IRQ_NONE;
20 mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
21
22 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
23 return IRQ_NONE;
24
25 tasklet_schedule(&dev->mt76.irq_tasklet);
26
27 return IRQ_HANDLED;
28 }
29 EXPORT_SYMBOL_GPL(mt792x_irq_handler);
30
mt792x_irq_tasklet(unsigned long data)31 void mt792x_irq_tasklet(unsigned long data)
32 {
33 struct mt792x_dev *dev = (struct mt792x_dev *)data;
34 const struct mt792x_irq_map *irq_map = dev->irq_map;
35 u32 intr, mask = 0;
36
37 mt76_wr(dev, irq_map->host_irq_enable, 0);
38
39 intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
40 intr &= dev->mt76.mmio.irqmask;
41 mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
42
43 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
44
45 mask |= intr & (irq_map->rx.data_complete_mask |
46 irq_map->rx.wm_complete_mask |
47 irq_map->rx.wm2_complete_mask);
48 if (intr & dev->irq_map->tx.mcu_complete_mask)
49 mask |= dev->irq_map->tx.mcu_complete_mask;
50
51 if (intr & MT_INT_MCU_CMD) {
52 u32 intr_sw;
53
54 intr_sw = mt76_rr(dev, MT_MCU_CMD);
55 /* ack MCU2HOST_SW_INT_STA */
56 mt76_wr(dev, MT_MCU_CMD, intr_sw);
57 if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
58 mask |= irq_map->rx.data_complete_mask;
59 intr |= irq_map->rx.data_complete_mask;
60 }
61 }
62
63 mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0);
64
65 if (intr & dev->irq_map->tx.all_complete_mask)
66 napi_schedule(&dev->mt76.tx_napi);
67
68 if (intr & irq_map->rx.wm_complete_mask)
69 napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
70
71 if (intr & irq_map->rx.wm2_complete_mask)
72 napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
73
74 if (intr & irq_map->rx.data_complete_mask)
75 napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
76 }
77 EXPORT_SYMBOL_GPL(mt792x_irq_tasklet);
78
mt792x_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)79 void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
80 {
81 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
82 const struct mt792x_irq_map *irq_map = dev->irq_map;
83
84 if (q == MT_RXQ_MAIN)
85 mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask);
86 else if (q == MT_RXQ_MCU_WA)
87 mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask);
88 else
89 mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask);
90 }
91 EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete);
92
93 #define PREFETCH(base, depth) ((base) << 16 | (depth))
mt792x_dma_prefetch(struct mt792x_dev * dev)94 static void mt792x_dma_prefetch(struct mt792x_dev *dev)
95 {
96 if (is_mt7925(&dev->mt76)) {
97 /* rx ring */
98 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4));
99 mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4));
100 mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4));
101 mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4));
102 /* tx ring */
103 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x10));
104 mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0200, 0x10));
105 mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0300, 0x10));
106 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10));
107 mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4));
108 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4));
109 } else {
110 /* rx ring */
111 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
112 mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
113 mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
114 mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
115 mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
116 /* tx ring */
117 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
118 mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
119 mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
120 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
121 mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
122 mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
123 mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
124 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
125 mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
126 }
127 }
128
mt792x_dma_enable(struct mt792x_dev * dev)129 int mt792x_dma_enable(struct mt792x_dev *dev)
130 {
131 /* configure perfetch settings */
132 mt792x_dma_prefetch(dev);
133
134 /* reset dma idx */
135 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
136 if (is_mt7925(&dev->mt76))
137 mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
138
139 /* configure delay interrupt */
140 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
141
142 mt76_set(dev, MT_WFDMA0_GLO_CFG,
143 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
144 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
145 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
146 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
147 FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
148 MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
149 MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
150 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
151 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
152
153 mt76_set(dev, MT_WFDMA0_GLO_CFG,
154 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
155
156 if (is_mt7925(&dev->mt76)) {
157 mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
158 mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
159 mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
160 }
161 mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
162
163 /* enable interrupts for TX/RX rings */
164 mt76_connac_irq_enable(&dev->mt76,
165 dev->irq_map->tx.all_complete_mask |
166 dev->irq_map->rx.data_complete_mask |
167 dev->irq_map->rx.wm2_complete_mask |
168 dev->irq_map->rx.wm_complete_mask |
169 MT_INT_MCU_CMD);
170 mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
171
172 return 0;
173 }
174 EXPORT_SYMBOL_GPL(mt792x_dma_enable);
175
176 static int
mt792x_dma_reset(struct mt792x_dev * dev,bool force)177 mt792x_dma_reset(struct mt792x_dev *dev, bool force)
178 {
179 int i, err;
180
181 err = mt792x_dma_disable(dev, force);
182 if (err)
183 return err;
184
185 /* reset hw queues */
186 for (i = 0; i < __MT_TXQ_MAX; i++)
187 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
188
189 for (i = 0; i < __MT_MCUQ_MAX; i++)
190 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
191
192 mt76_for_each_q_rx(&dev->mt76, i)
193 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
194
195 mt76_tx_status_check(&dev->mt76, true);
196
197 return mt792x_dma_enable(dev);
198 }
199
mt792x_wpdma_reset(struct mt792x_dev * dev,bool force)200 int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force)
201 {
202 int i, err;
203
204 /* clean up hw queues */
205 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
206 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
207
208 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
209 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
210
211 mt76_for_each_q_rx(&dev->mt76, i)
212 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
213
214 if (force) {
215 err = mt792x_wfsys_reset(dev);
216 if (err)
217 return err;
218 }
219 err = mt792x_dma_reset(dev, force);
220 if (err)
221 return err;
222
223 mt76_for_each_q_rx(&dev->mt76, i)
224 mt76_queue_rx_reset(dev, i);
225
226 return 0;
227 }
228 EXPORT_SYMBOL_GPL(mt792x_wpdma_reset);
229
mt792x_wpdma_reinit_cond(struct mt792x_dev * dev)230 int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev)
231 {
232 struct mt76_connac_pm *pm = &dev->pm;
233 int err;
234
235 /* check if the wpdma must be reinitialized */
236 if (mt792x_dma_need_reinit(dev)) {
237 /* disable interrutpts */
238 mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
239 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
240
241 err = mt792x_wpdma_reset(dev, false);
242 if (err) {
243 dev_err(dev->mt76.dev, "wpdma reset failed\n");
244 return err;
245 }
246
247 /* enable interrutpts */
248 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
249 pm->stats.lp_wake++;
250 }
251
252 return 0;
253 }
254 EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond);
255
mt792x_dma_disable(struct mt792x_dev * dev,bool force)256 int mt792x_dma_disable(struct mt792x_dev *dev, bool force)
257 {
258 /* disable WFDMA0 */
259 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
260 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
261 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
262 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
263 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
264 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
265
266 if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
267 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
268 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
269 return -ETIMEDOUT;
270
271 /* disable dmashdl */
272 mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
273 MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
274 mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
275
276 if (force) {
277 /* reset */
278 mt76_clear(dev, MT_WFDMA0_RST,
279 MT_WFDMA0_RST_DMASHDL_ALL_RST |
280 MT_WFDMA0_RST_LOGIC_RST);
281
282 mt76_set(dev, MT_WFDMA0_RST,
283 MT_WFDMA0_RST_DMASHDL_ALL_RST |
284 MT_WFDMA0_RST_LOGIC_RST);
285 }
286
287 return 0;
288 }
289 EXPORT_SYMBOL_GPL(mt792x_dma_disable);
290
mt792x_dma_cleanup(struct mt792x_dev * dev)291 void mt792x_dma_cleanup(struct mt792x_dev *dev)
292 {
293 /* disable */
294 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
295 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
296 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
297 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
298 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
299 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
300 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
301
302 mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
303 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
304 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
305
306 /* reset */
307 mt76_clear(dev, MT_WFDMA0_RST,
308 MT_WFDMA0_RST_DMASHDL_ALL_RST |
309 MT_WFDMA0_RST_LOGIC_RST);
310
311 mt76_set(dev, MT_WFDMA0_RST,
312 MT_WFDMA0_RST_DMASHDL_ALL_RST |
313 MT_WFDMA0_RST_LOGIC_RST);
314
315 mt76_dma_cleanup(&dev->mt76);
316 }
317 EXPORT_SYMBOL_GPL(mt792x_dma_cleanup);
318
mt792x_poll_tx(struct napi_struct * napi,int budget)319 int mt792x_poll_tx(struct napi_struct *napi, int budget)
320 {
321 struct mt792x_dev *dev;
322
323 dev = container_of(napi, struct mt792x_dev, mt76.tx_napi);
324
325 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
326 napi_complete(napi);
327 queue_work(dev->mt76.wq, &dev->pm.wake_work);
328 return 0;
329 }
330
331 mt76_connac_tx_cleanup(&dev->mt76);
332 if (napi_complete(napi))
333 mt76_connac_irq_enable(&dev->mt76,
334 dev->irq_map->tx.all_complete_mask);
335 mt76_connac_pm_unref(&dev->mphy, &dev->pm);
336
337 return 0;
338 }
339 EXPORT_SYMBOL_GPL(mt792x_poll_tx);
340
mt792x_poll_rx(struct napi_struct * napi,int budget)341 int mt792x_poll_rx(struct napi_struct *napi, int budget)
342 {
343 struct mt792x_dev *dev;
344 int done;
345
346 dev = mt76_priv(napi->dev);
347
348 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
349 napi_complete(napi);
350 queue_work(dev->mt76.wq, &dev->pm.wake_work);
351 return 0;
352 }
353 done = mt76_dma_rx_poll(napi, budget);
354 mt76_connac_pm_unref(&dev->mphy, &dev->pm);
355
356 return done;
357 }
358 EXPORT_SYMBOL_GPL(mt792x_poll_rx);
359
mt792x_wfsys_reset(struct mt792x_dev * dev)360 int mt792x_wfsys_reset(struct mt792x_dev *dev)
361 {
362 u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140;
363
364 mt76_clear(dev, addr, WFSYS_SW_RST_B);
365 msleep(50);
366 mt76_set(dev, addr, WFSYS_SW_RST_B);
367
368 if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE,
369 WFSYS_SW_INIT_DONE, 500))
370 return -ETIMEDOUT;
371
372 return 0;
373 }
374 EXPORT_SYMBOL_GPL(mt792x_wfsys_reset);
375
376