xref: /freebsd/sys/contrib/dev/mediatek/mt76/mt792x_dma.c (revision cbb3ec25236ba72f91cbdf23f8b78b9d1af0cedf)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
3 
4 #include <linux/module.h>
5 #include <linux/firmware.h>
6 #if defined(__FreeBSD__)
7 #include <linux/delay.h>
8 #endif
9 
10 #include "mt792x.h"
11 #include "dma.h"
12 #include "trace.h"
13 
mt792x_irq_handler(int irq,void * dev_instance)14 irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
15 {
16 	struct mt792x_dev *dev = dev_instance;
17 
18 	mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
19 
20 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
21 		return IRQ_NONE;
22 
23 	tasklet_schedule(&dev->mt76.irq_tasklet);
24 
25 	return IRQ_HANDLED;
26 }
27 EXPORT_SYMBOL_GPL(mt792x_irq_handler);
28 
mt792x_irq_tasklet(unsigned long data)29 void mt792x_irq_tasklet(unsigned long data)
30 {
31 	struct mt792x_dev *dev = (struct mt792x_dev *)data;
32 	const struct mt792x_irq_map *irq_map = dev->irq_map;
33 	u32 intr, mask = 0;
34 
35 	mt76_wr(dev, irq_map->host_irq_enable, 0);
36 
37 	intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA);
38 	intr &= dev->mt76.mmio.irqmask;
39 	mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr);
40 
41 	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
42 
43 	mask |= intr & (irq_map->rx.data_complete_mask |
44 			irq_map->rx.wm_complete_mask |
45 			irq_map->rx.wm2_complete_mask);
46 	if (intr & dev->irq_map->tx.mcu_complete_mask)
47 		mask |= dev->irq_map->tx.mcu_complete_mask;
48 
49 	if (intr & MT_INT_MCU_CMD) {
50 		u32 intr_sw;
51 
52 		intr_sw = mt76_rr(dev, MT_MCU_CMD);
53 		/* ack MCU2HOST_SW_INT_STA */
54 		mt76_wr(dev, MT_MCU_CMD, intr_sw);
55 		if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
56 			mask |= irq_map->rx.data_complete_mask;
57 			intr |= irq_map->rx.data_complete_mask;
58 		}
59 	}
60 
61 	mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0);
62 
63 	if (intr & dev->irq_map->tx.all_complete_mask)
64 		napi_schedule(&dev->mt76.tx_napi);
65 
66 	if (intr & irq_map->rx.wm_complete_mask)
67 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
68 
69 	if (intr & irq_map->rx.wm2_complete_mask)
70 		napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
71 
72 	if (intr & irq_map->rx.data_complete_mask)
73 		napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
74 }
75 EXPORT_SYMBOL_GPL(mt792x_irq_tasklet);
76 
mt792x_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)77 void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
78 {
79 	struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
80 	const struct mt792x_irq_map *irq_map = dev->irq_map;
81 
82 	if (q == MT_RXQ_MAIN)
83 		mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask);
84 	else if (q == MT_RXQ_MCU_WA)
85 		mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask);
86 	else
87 		mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask);
88 }
89 EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete);
90 
91 #define PREFETCH(base, depth)	((base) << 16 | (depth))
mt792x_dma_prefetch(struct mt792x_dev * dev)92 static void mt792x_dma_prefetch(struct mt792x_dev *dev)
93 {
94 	mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
95 	mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
96 	mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
97 	mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
98 	mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
99 
100 	mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
101 	mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
102 	mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
103 	mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
104 	mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
105 	mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
106 	mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
107 	mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
108 	mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
109 }
110 
mt792x_dma_enable(struct mt792x_dev * dev)111 int mt792x_dma_enable(struct mt792x_dev *dev)
112 {
113 	/* configure perfetch settings */
114 	mt792x_dma_prefetch(dev);
115 
116 	/* reset dma idx */
117 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
118 
119 	/* configure delay interrupt */
120 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
121 
122 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
123 		 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
124 		 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
125 		 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
126 		 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
127 		 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
128 		 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
129 
130 	mt76_set(dev, MT_WFDMA0_GLO_CFG,
131 		 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
132 
133 	mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
134 
135 	/* enable interrupts for TX/RX rings */
136 	mt76_connac_irq_enable(&dev->mt76,
137 			       dev->irq_map->tx.all_complete_mask |
138 			       dev->irq_map->rx.data_complete_mask |
139 			       dev->irq_map->rx.wm2_complete_mask |
140 			       dev->irq_map->rx.wm_complete_mask |
141 			       MT_INT_MCU_CMD);
142 	mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
143 
144 	return 0;
145 }
146 EXPORT_SYMBOL_GPL(mt792x_dma_enable);
147 
148 static int
mt792x_dma_reset(struct mt792x_dev * dev,bool force)149 mt792x_dma_reset(struct mt792x_dev *dev, bool force)
150 {
151 	int i, err;
152 
153 	err = mt792x_dma_disable(dev, force);
154 	if (err)
155 		return err;
156 
157 	/* reset hw queues */
158 	for (i = 0; i < __MT_TXQ_MAX; i++)
159 		mt76_queue_reset(dev, dev->mphy.q_tx[i]);
160 
161 	for (i = 0; i < __MT_MCUQ_MAX; i++)
162 		mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
163 
164 	mt76_for_each_q_rx(&dev->mt76, i)
165 		mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
166 
167 	mt76_tx_status_check(&dev->mt76, true);
168 
169 	return mt792x_dma_enable(dev);
170 }
171 
mt792x_wpdma_reset(struct mt792x_dev * dev,bool force)172 int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force)
173 {
174 	int i, err;
175 
176 	/* clean up hw queues */
177 	for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
178 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
179 
180 	for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
181 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
182 
183 	mt76_for_each_q_rx(&dev->mt76, i)
184 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
185 
186 	if (force) {
187 		err = mt792x_wfsys_reset(dev);
188 		if (err)
189 			return err;
190 	}
191 	err = mt792x_dma_reset(dev, force);
192 	if (err)
193 		return err;
194 
195 	mt76_for_each_q_rx(&dev->mt76, i)
196 		mt76_queue_rx_reset(dev, i);
197 
198 	return 0;
199 }
200 EXPORT_SYMBOL_GPL(mt792x_wpdma_reset);
201 
mt792x_wpdma_reinit_cond(struct mt792x_dev * dev)202 int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev)
203 {
204 	struct mt76_connac_pm *pm = &dev->pm;
205 	int err;
206 
207 	/* check if the wpdma must be reinitialized */
208 	if (mt792x_dma_need_reinit(dev)) {
209 		/* disable interrutpts */
210 		mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
211 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
212 
213 		err = mt792x_wpdma_reset(dev, false);
214 		if (err) {
215 			dev_err(dev->mt76.dev, "wpdma reset failed\n");
216 			return err;
217 		}
218 
219 		/* enable interrutpts */
220 		mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
221 		pm->stats.lp_wake++;
222 	}
223 
224 	return 0;
225 }
226 EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond);
227 
mt792x_dma_disable(struct mt792x_dev * dev,bool force)228 int mt792x_dma_disable(struct mt792x_dev *dev, bool force)
229 {
230 	/* disable WFDMA0 */
231 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
232 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
233 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
234 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
235 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
236 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
237 
238 	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
239 				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
240 				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
241 		return -ETIMEDOUT;
242 
243 	/* disable dmashdl */
244 	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
245 		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
246 	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
247 
248 	if (force) {
249 		/* reset */
250 		mt76_clear(dev, MT_WFDMA0_RST,
251 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
252 			   MT_WFDMA0_RST_LOGIC_RST);
253 
254 		mt76_set(dev, MT_WFDMA0_RST,
255 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
256 			 MT_WFDMA0_RST_LOGIC_RST);
257 	}
258 
259 	return 0;
260 }
261 EXPORT_SYMBOL_GPL(mt792x_dma_disable);
262 
mt792x_dma_cleanup(struct mt792x_dev * dev)263 void mt792x_dma_cleanup(struct mt792x_dev *dev)
264 {
265 	/* disable */
266 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
267 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
268 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
269 		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
270 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
271 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
272 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
273 
274 	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
275 			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
276 			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
277 
278 	/* reset */
279 	mt76_clear(dev, MT_WFDMA0_RST,
280 		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
281 		   MT_WFDMA0_RST_LOGIC_RST);
282 
283 	mt76_set(dev, MT_WFDMA0_RST,
284 		 MT_WFDMA0_RST_DMASHDL_ALL_RST |
285 		 MT_WFDMA0_RST_LOGIC_RST);
286 
287 	mt76_dma_cleanup(&dev->mt76);
288 }
289 EXPORT_SYMBOL_GPL(mt792x_dma_cleanup);
290 
mt792x_poll_tx(struct napi_struct * napi,int budget)291 int mt792x_poll_tx(struct napi_struct *napi, int budget)
292 {
293 	struct mt792x_dev *dev;
294 
295 	dev = container_of(napi, struct mt792x_dev, mt76.tx_napi);
296 
297 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
298 		napi_complete(napi);
299 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
300 		return 0;
301 	}
302 
303 	mt76_connac_tx_cleanup(&dev->mt76);
304 	if (napi_complete(napi))
305 		mt76_connac_irq_enable(&dev->mt76,
306 				       dev->irq_map->tx.all_complete_mask);
307 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
308 
309 	return 0;
310 }
311 EXPORT_SYMBOL_GPL(mt792x_poll_tx);
312 
mt792x_poll_rx(struct napi_struct * napi,int budget)313 int mt792x_poll_rx(struct napi_struct *napi, int budget)
314 {
315 	struct mt792x_dev *dev;
316 	int done;
317 
318 	dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev);
319 
320 	if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
321 		napi_complete(napi);
322 		queue_work(dev->mt76.wq, &dev->pm.wake_work);
323 		return 0;
324 	}
325 	done = mt76_dma_rx_poll(napi, budget);
326 	mt76_connac_pm_unref(&dev->mphy, &dev->pm);
327 
328 	return done;
329 }
330 EXPORT_SYMBOL_GPL(mt792x_poll_rx);
331 
mt792x_wfsys_reset(struct mt792x_dev * dev)332 int mt792x_wfsys_reset(struct mt792x_dev *dev)
333 {
334 	u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140;
335 
336 	mt76_clear(dev, addr, WFSYS_SW_RST_B);
337 	msleep(50);
338 	mt76_set(dev, addr, WFSYS_SW_RST_B);
339 
340 	if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE,
341 			      WFSYS_SW_INIT_DONE, 500))
342 		return -ETIMEDOUT;
343 
344 	return 0;
345 }
346 EXPORT_SYMBOL_GPL(mt792x_wfsys_reset);
347 
348