xref: /freebsd/sys/contrib/dev/mediatek/mt76/mt7996/dma.c (revision cbb3ec25236ba72f91cbdf23f8b78b9d1af0cedf)
1*cbb3ec25SBjoern A. Zeeb // SPDX-License-Identifier: ISC
2*cbb3ec25SBjoern A. Zeeb /*
3*cbb3ec25SBjoern A. Zeeb  * Copyright (C) 2022 MediaTek Inc.
4*cbb3ec25SBjoern A. Zeeb  */
5*cbb3ec25SBjoern A. Zeeb 
6*cbb3ec25SBjoern A. Zeeb #include "mt7996.h"
7*cbb3ec25SBjoern A. Zeeb #include "../dma.h"
8*cbb3ec25SBjoern A. Zeeb #include "mac.h"
9*cbb3ec25SBjoern A. Zeeb #if defined(__FreeBSD__)
10*cbb3ec25SBjoern A. Zeeb #include <linux/delay.h>
11*cbb3ec25SBjoern A. Zeeb #endif
12*cbb3ec25SBjoern A. Zeeb 
13*cbb3ec25SBjoern A. Zeeb static int mt7996_poll_tx(struct napi_struct *napi, int budget)
14*cbb3ec25SBjoern A. Zeeb {
15*cbb3ec25SBjoern A. Zeeb 	struct mt7996_dev *dev;
16*cbb3ec25SBjoern A. Zeeb 
17*cbb3ec25SBjoern A. Zeeb 	dev = container_of(napi, struct mt7996_dev, mt76.tx_napi);
18*cbb3ec25SBjoern A. Zeeb 
19*cbb3ec25SBjoern A. Zeeb 	mt76_connac_tx_cleanup(&dev->mt76);
20*cbb3ec25SBjoern A. Zeeb 	if (napi_complete_done(napi, 0))
21*cbb3ec25SBjoern A. Zeeb 		mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU);
22*cbb3ec25SBjoern A. Zeeb 
23*cbb3ec25SBjoern A. Zeeb 	return 0;
24*cbb3ec25SBjoern A. Zeeb }
25*cbb3ec25SBjoern A. Zeeb 
26*cbb3ec25SBjoern A. Zeeb static void mt7996_dma_config(struct mt7996_dev *dev)
27*cbb3ec25SBjoern A. Zeeb {
28*cbb3ec25SBjoern A. Zeeb #define Q_CONFIG(q, wfdma, int, id) do {		\
29*cbb3ec25SBjoern A. Zeeb 	if (wfdma)					\
30*cbb3ec25SBjoern A. Zeeb 		dev->q_wfdma_mask |= (1 << (q));	\
31*cbb3ec25SBjoern A. Zeeb 	dev->q_int_mask[(q)] = int;			\
32*cbb3ec25SBjoern A. Zeeb 	dev->q_id[(q)] = id;				\
33*cbb3ec25SBjoern A. Zeeb } while (0)
34*cbb3ec25SBjoern A. Zeeb 
35*cbb3ec25SBjoern A. Zeeb #define MCUQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(q, (wfdma), (int), (id))
36*cbb3ec25SBjoern A. Zeeb #define RXQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
37*cbb3ec25SBjoern A. Zeeb #define TXQ_CONFIG(q, wfdma, int, id)	Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
38*cbb3ec25SBjoern A. Zeeb 
39*cbb3ec25SBjoern A. Zeeb 	/* rx queue */
40*cbb3ec25SBjoern A. Zeeb 	RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM);
41*cbb3ec25SBjoern A. Zeeb 	RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA);
42*cbb3ec25SBjoern A. Zeeb 
43*cbb3ec25SBjoern A. Zeeb 	/* band0/band1 */
44*cbb3ec25SBjoern A. Zeeb 	RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0);
45*cbb3ec25SBjoern A. Zeeb 	RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN);
46*cbb3ec25SBjoern A. Zeeb 
47*cbb3ec25SBjoern A. Zeeb 	/* band2 */
48*cbb3ec25SBjoern A. Zeeb 	RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2);
49*cbb3ec25SBjoern A. Zeeb 	RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI);
50*cbb3ec25SBjoern A. Zeeb 
51*cbb3ec25SBjoern A. Zeeb 	/* data tx queue */
52*cbb3ec25SBjoern A. Zeeb 	TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
53*cbb3ec25SBjoern A. Zeeb 	TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
54*cbb3ec25SBjoern A. Zeeb 	TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
55*cbb3ec25SBjoern A. Zeeb 
56*cbb3ec25SBjoern A. Zeeb 	/* mcu tx queue */
57*cbb3ec25SBjoern A. Zeeb 	MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM);
58*cbb3ec25SBjoern A. Zeeb 	MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA);
59*cbb3ec25SBjoern A. Zeeb 	MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL);
60*cbb3ec25SBjoern A. Zeeb }
61*cbb3ec25SBjoern A. Zeeb 
62*cbb3ec25SBjoern A. Zeeb static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
63*cbb3ec25SBjoern A. Zeeb {
64*cbb3ec25SBjoern A. Zeeb #define PREFETCH(_base, _depth)	((_base) << 16 | (_depth))
65*cbb3ec25SBjoern A. Zeeb 	/* prefetch SRAM wrapping boundary for tx/rx ring. */
66*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x2));
67*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x20, 0x2));
68*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x40, 0x4));
69*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x80, 0x4));
70*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0xc0, 0x2));
71*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0xe0, 0x4));
72*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x120, 0x2));
73*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x140, 0x2));
74*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x160, 0x2));
75*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x180, 0x2));
76*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10));
77*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10));
78*cbb3ec25SBjoern A. Zeeb 
79*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE);
80*cbb3ec25SBjoern A. Zeeb }
81*cbb3ec25SBjoern A. Zeeb 
82*cbb3ec25SBjoern A. Zeeb void mt7996_dma_prefetch(struct mt7996_dev *dev)
83*cbb3ec25SBjoern A. Zeeb {
84*cbb3ec25SBjoern A. Zeeb 	__mt7996_dma_prefetch(dev, 0);
85*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
86*cbb3ec25SBjoern A. Zeeb 		__mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
87*cbb3ec25SBjoern A. Zeeb }
88*cbb3ec25SBjoern A. Zeeb 
89*cbb3ec25SBjoern A. Zeeb static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset)
90*cbb3ec25SBjoern A. Zeeb {
91*cbb3ec25SBjoern A. Zeeb 	u32 hif1_ofs = 0;
92*cbb3ec25SBjoern A. Zeeb 
93*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
94*cbb3ec25SBjoern A. Zeeb 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
95*cbb3ec25SBjoern A. Zeeb 
96*cbb3ec25SBjoern A. Zeeb 	if (reset) {
97*cbb3ec25SBjoern A. Zeeb 		mt76_clear(dev, MT_WFDMA0_RST,
98*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_RST_DMASHDL_ALL_RST |
99*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_RST_LOGIC_RST);
100*cbb3ec25SBjoern A. Zeeb 
101*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, MT_WFDMA0_RST,
102*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_RST_DMASHDL_ALL_RST |
103*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_RST_LOGIC_RST);
104*cbb3ec25SBjoern A. Zeeb 
105*cbb3ec25SBjoern A. Zeeb 		if (dev->hif2) {
106*cbb3ec25SBjoern A. Zeeb 			mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
107*cbb3ec25SBjoern A. Zeeb 				   MT_WFDMA0_RST_DMASHDL_ALL_RST |
108*cbb3ec25SBjoern A. Zeeb 				   MT_WFDMA0_RST_LOGIC_RST);
109*cbb3ec25SBjoern A. Zeeb 
110*cbb3ec25SBjoern A. Zeeb 			mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
111*cbb3ec25SBjoern A. Zeeb 				 MT_WFDMA0_RST_DMASHDL_ALL_RST |
112*cbb3ec25SBjoern A. Zeeb 				 MT_WFDMA0_RST_LOGIC_RST);
113*cbb3ec25SBjoern A. Zeeb 		}
114*cbb3ec25SBjoern A. Zeeb 	}
115*cbb3ec25SBjoern A. Zeeb 
116*cbb3ec25SBjoern A. Zeeb 	/* disable */
117*cbb3ec25SBjoern A. Zeeb 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
118*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
119*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
120*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
121*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
122*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
123*cbb3ec25SBjoern A. Zeeb 
124*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2) {
125*cbb3ec25SBjoern A. Zeeb 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
126*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
127*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN |
128*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
129*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
130*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
131*cbb3ec25SBjoern A. Zeeb 	}
132*cbb3ec25SBjoern A. Zeeb }
133*cbb3ec25SBjoern A. Zeeb 
134*cbb3ec25SBjoern A. Zeeb void mt7996_dma_start(struct mt7996_dev *dev, bool reset)
135*cbb3ec25SBjoern A. Zeeb {
136*cbb3ec25SBjoern A. Zeeb 	u32 hif1_ofs = 0;
137*cbb3ec25SBjoern A. Zeeb 	u32 irq_mask;
138*cbb3ec25SBjoern A. Zeeb 
139*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
140*cbb3ec25SBjoern A. Zeeb 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
141*cbb3ec25SBjoern A. Zeeb 
142*cbb3ec25SBjoern A. Zeeb 	/* enable WFDMA Tx/Rx */
143*cbb3ec25SBjoern A. Zeeb 	if (!reset) {
144*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, MT_WFDMA0_GLO_CFG,
145*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
146*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
147*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
148*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
149*cbb3ec25SBjoern A. Zeeb 
150*cbb3ec25SBjoern A. Zeeb 		if (dev->hif2)
151*cbb3ec25SBjoern A. Zeeb 			mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
152*cbb3ec25SBjoern A. Zeeb 				 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
153*cbb3ec25SBjoern A. Zeeb 				 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
154*cbb3ec25SBjoern A. Zeeb 				 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
155*cbb3ec25SBjoern A. Zeeb 				 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
156*cbb3ec25SBjoern A. Zeeb 	}
157*cbb3ec25SBjoern A. Zeeb 
158*cbb3ec25SBjoern A. Zeeb 	/* enable interrupts for TX/RX rings */
159*cbb3ec25SBjoern A. Zeeb 	irq_mask = MT_INT_MCU_CMD;
160*cbb3ec25SBjoern A. Zeeb 	if (reset)
161*cbb3ec25SBjoern A. Zeeb 		goto done;
162*cbb3ec25SBjoern A. Zeeb 
163*cbb3ec25SBjoern A. Zeeb 	irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU;
164*cbb3ec25SBjoern A. Zeeb 
165*cbb3ec25SBjoern A. Zeeb 	if (!dev->mphy.band_idx)
166*cbb3ec25SBjoern A. Zeeb 		irq_mask |= MT_INT_BAND0_RX_DONE;
167*cbb3ec25SBjoern A. Zeeb 
168*cbb3ec25SBjoern A. Zeeb 	if (dev->dbdc_support)
169*cbb3ec25SBjoern A. Zeeb 		irq_mask |= MT_INT_BAND1_RX_DONE;
170*cbb3ec25SBjoern A. Zeeb 
171*cbb3ec25SBjoern A. Zeeb 	if (dev->tbtc_support)
172*cbb3ec25SBjoern A. Zeeb 		irq_mask |= MT_INT_BAND2_RX_DONE;
173*cbb3ec25SBjoern A. Zeeb 
174*cbb3ec25SBjoern A. Zeeb done:
175*cbb3ec25SBjoern A. Zeeb 	mt7996_irq_enable(dev, irq_mask);
176*cbb3ec25SBjoern A. Zeeb 	mt7996_irq_disable(dev, 0);
177*cbb3ec25SBjoern A. Zeeb }
178*cbb3ec25SBjoern A. Zeeb 
179*cbb3ec25SBjoern A. Zeeb static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
180*cbb3ec25SBjoern A. Zeeb {
181*cbb3ec25SBjoern A. Zeeb 	u32 hif1_ofs = 0;
182*cbb3ec25SBjoern A. Zeeb 
183*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
184*cbb3ec25SBjoern A. Zeeb 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
185*cbb3ec25SBjoern A. Zeeb 
186*cbb3ec25SBjoern A. Zeeb 	/* reset dma idx */
187*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
188*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
189*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
190*cbb3ec25SBjoern A. Zeeb 
191*cbb3ec25SBjoern A. Zeeb 	/* configure delay interrupt off */
192*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
193*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
194*cbb3ec25SBjoern A. Zeeb 	mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
195*cbb3ec25SBjoern A. Zeeb 
196*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2) {
197*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
198*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0);
199*cbb3ec25SBjoern A. Zeeb 		mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0);
200*cbb3ec25SBjoern A. Zeeb 	}
201*cbb3ec25SBjoern A. Zeeb 
202*cbb3ec25SBjoern A. Zeeb 	/* configure perfetch settings */
203*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_prefetch(dev);
204*cbb3ec25SBjoern A. Zeeb 
205*cbb3ec25SBjoern A. Zeeb 	/* hif wait WFDMA idle */
206*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, MT_WFDMA0_BUSY_ENA,
207*cbb3ec25SBjoern A. Zeeb 		 MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
208*cbb3ec25SBjoern A. Zeeb 		 MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
209*cbb3ec25SBjoern A. Zeeb 		 MT_WFDMA0_BUSY_ENA_RX_FIFO);
210*cbb3ec25SBjoern A. Zeeb 
211*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
212*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
213*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
214*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
215*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
216*cbb3ec25SBjoern A. Zeeb 
217*cbb3ec25SBjoern A. Zeeb 	mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
218*cbb3ec25SBjoern A. Zeeb 		  MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
219*cbb3ec25SBjoern A. Zeeb 
220*cbb3ec25SBjoern A. Zeeb 	/* GLO_CFG_EXT0 */
221*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0,
222*cbb3ec25SBjoern A. Zeeb 		 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
223*cbb3ec25SBjoern A. Zeeb 		 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
224*cbb3ec25SBjoern A. Zeeb 
225*cbb3ec25SBjoern A. Zeeb 	/* GLO_CFG_EXT1 */
226*cbb3ec25SBjoern A. Zeeb 	mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1,
227*cbb3ec25SBjoern A. Zeeb 		 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
228*cbb3ec25SBjoern A. Zeeb 
229*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2) {
230*cbb3ec25SBjoern A. Zeeb 		/* GLO_CFG_EXT0 */
231*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
232*cbb3ec25SBjoern A. Zeeb 			 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD |
233*cbb3ec25SBjoern A. Zeeb 			 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE);
234*cbb3ec25SBjoern A. Zeeb 
235*cbb3ec25SBjoern A. Zeeb 		/* GLO_CFG_EXT1 */
236*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs,
237*cbb3ec25SBjoern A. Zeeb 			 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
238*cbb3ec25SBjoern A. Zeeb 
239*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, MT_WFDMA_HOST_CONFIG,
240*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA_HOST_CONFIG_PDMA_BAND);
241*cbb3ec25SBjoern A. Zeeb 	}
242*cbb3ec25SBjoern A. Zeeb 
243*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2) {
244*cbb3ec25SBjoern A. Zeeb 		/* fix hardware limitation, pcie1's rx ring3 is not available
245*cbb3ec25SBjoern A. Zeeb 		 * so, redirect pcie0 rx ring3 interrupt to pcie1
246*cbb3ec25SBjoern A. Zeeb 		 */
247*cbb3ec25SBjoern A. Zeeb 		mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
248*cbb3ec25SBjoern A. Zeeb 			 MT_WFDMA0_RX_INT_SEL_RING3);
249*cbb3ec25SBjoern A. Zeeb 
250*cbb3ec25SBjoern A. Zeeb 		/* TODO: redirect rx ring6 interrupt to pcie0 for wed function */
251*cbb3ec25SBjoern A. Zeeb 	}
252*cbb3ec25SBjoern A. Zeeb 
253*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_start(dev, reset);
254*cbb3ec25SBjoern A. Zeeb }
255*cbb3ec25SBjoern A. Zeeb 
256*cbb3ec25SBjoern A. Zeeb int mt7996_dma_init(struct mt7996_dev *dev)
257*cbb3ec25SBjoern A. Zeeb {
258*cbb3ec25SBjoern A. Zeeb 	u32 hif1_ofs = 0;
259*cbb3ec25SBjoern A. Zeeb 	int ret;
260*cbb3ec25SBjoern A. Zeeb 
261*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_config(dev);
262*cbb3ec25SBjoern A. Zeeb 
263*cbb3ec25SBjoern A. Zeeb 	mt76_dma_attach(&dev->mt76);
264*cbb3ec25SBjoern A. Zeeb 
265*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
266*cbb3ec25SBjoern A. Zeeb 		hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
267*cbb3ec25SBjoern A. Zeeb 
268*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_disable(dev, true);
269*cbb3ec25SBjoern A. Zeeb 
270*cbb3ec25SBjoern A. Zeeb 	/* init tx queue */
271*cbb3ec25SBjoern A. Zeeb 	ret = mt76_connac_init_tx_queues(dev->phy.mt76,
272*cbb3ec25SBjoern A. Zeeb 					 MT_TXQ_ID(dev->mphy.band_idx),
273*cbb3ec25SBjoern A. Zeeb 					 MT7996_TX_RING_SIZE,
274*cbb3ec25SBjoern A. Zeeb 					 MT_TXQ_RING_BASE(0), 0);
275*cbb3ec25SBjoern A. Zeeb 	if (ret)
276*cbb3ec25SBjoern A. Zeeb 		return ret;
277*cbb3ec25SBjoern A. Zeeb 
278*cbb3ec25SBjoern A. Zeeb 	/* command to WM */
279*cbb3ec25SBjoern A. Zeeb 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
280*cbb3ec25SBjoern A. Zeeb 				  MT_MCUQ_ID(MT_MCUQ_WM),
281*cbb3ec25SBjoern A. Zeeb 				  MT7996_TX_MCU_RING_SIZE,
282*cbb3ec25SBjoern A. Zeeb 				  MT_MCUQ_RING_BASE(MT_MCUQ_WM));
283*cbb3ec25SBjoern A. Zeeb 	if (ret)
284*cbb3ec25SBjoern A. Zeeb 		return ret;
285*cbb3ec25SBjoern A. Zeeb 
286*cbb3ec25SBjoern A. Zeeb 	/* command to WA */
287*cbb3ec25SBjoern A. Zeeb 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
288*cbb3ec25SBjoern A. Zeeb 				  MT_MCUQ_ID(MT_MCUQ_WA),
289*cbb3ec25SBjoern A. Zeeb 				  MT7996_TX_MCU_RING_SIZE,
290*cbb3ec25SBjoern A. Zeeb 				  MT_MCUQ_RING_BASE(MT_MCUQ_WA));
291*cbb3ec25SBjoern A. Zeeb 	if (ret)
292*cbb3ec25SBjoern A. Zeeb 		return ret;
293*cbb3ec25SBjoern A. Zeeb 
294*cbb3ec25SBjoern A. Zeeb 	/* firmware download */
295*cbb3ec25SBjoern A. Zeeb 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
296*cbb3ec25SBjoern A. Zeeb 				  MT_MCUQ_ID(MT_MCUQ_FWDL),
297*cbb3ec25SBjoern A. Zeeb 				  MT7996_TX_FWDL_RING_SIZE,
298*cbb3ec25SBjoern A. Zeeb 				  MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
299*cbb3ec25SBjoern A. Zeeb 	if (ret)
300*cbb3ec25SBjoern A. Zeeb 		return ret;
301*cbb3ec25SBjoern A. Zeeb 
302*cbb3ec25SBjoern A. Zeeb 	/* event from WM */
303*cbb3ec25SBjoern A. Zeeb 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
304*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_ID(MT_RXQ_MCU),
305*cbb3ec25SBjoern A. Zeeb 			       MT7996_RX_MCU_RING_SIZE,
306*cbb3ec25SBjoern A. Zeeb 			       MT_RX_BUF_SIZE,
307*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_RING_BASE(MT_RXQ_MCU));
308*cbb3ec25SBjoern A. Zeeb 	if (ret)
309*cbb3ec25SBjoern A. Zeeb 		return ret;
310*cbb3ec25SBjoern A. Zeeb 
311*cbb3ec25SBjoern A. Zeeb 	/* event from WA */
312*cbb3ec25SBjoern A. Zeeb 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
313*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_ID(MT_RXQ_MCU_WA),
314*cbb3ec25SBjoern A. Zeeb 			       MT7996_RX_MCU_RING_SIZE_WA,
315*cbb3ec25SBjoern A. Zeeb 			       MT_RX_BUF_SIZE,
316*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
317*cbb3ec25SBjoern A. Zeeb 	if (ret)
318*cbb3ec25SBjoern A. Zeeb 		return ret;
319*cbb3ec25SBjoern A. Zeeb 
320*cbb3ec25SBjoern A. Zeeb 	/* rx data queue for band0 and band1 */
321*cbb3ec25SBjoern A. Zeeb 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
322*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_ID(MT_RXQ_MAIN),
323*cbb3ec25SBjoern A. Zeeb 			       MT7996_RX_RING_SIZE,
324*cbb3ec25SBjoern A. Zeeb 			       MT_RX_BUF_SIZE,
325*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_RING_BASE(MT_RXQ_MAIN));
326*cbb3ec25SBjoern A. Zeeb 	if (ret)
327*cbb3ec25SBjoern A. Zeeb 		return ret;
328*cbb3ec25SBjoern A. Zeeb 
329*cbb3ec25SBjoern A. Zeeb 	/* tx free notify event from WA for band0 */
330*cbb3ec25SBjoern A. Zeeb 	ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
331*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_ID(MT_RXQ_MAIN_WA),
332*cbb3ec25SBjoern A. Zeeb 			       MT7996_RX_MCU_RING_SIZE,
333*cbb3ec25SBjoern A. Zeeb 			       MT_RX_BUF_SIZE,
334*cbb3ec25SBjoern A. Zeeb 			       MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
335*cbb3ec25SBjoern A. Zeeb 	if (ret)
336*cbb3ec25SBjoern A. Zeeb 		return ret;
337*cbb3ec25SBjoern A. Zeeb 
338*cbb3ec25SBjoern A. Zeeb 	if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) {
339*cbb3ec25SBjoern A. Zeeb 		/* rx data queue for band2 */
340*cbb3ec25SBjoern A. Zeeb 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
341*cbb3ec25SBjoern A. Zeeb 				       MT_RXQ_ID(MT_RXQ_BAND2),
342*cbb3ec25SBjoern A. Zeeb 				       MT7996_RX_RING_SIZE,
343*cbb3ec25SBjoern A. Zeeb 				       MT_RX_BUF_SIZE,
344*cbb3ec25SBjoern A. Zeeb 				       MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs);
345*cbb3ec25SBjoern A. Zeeb 		if (ret)
346*cbb3ec25SBjoern A. Zeeb 			return ret;
347*cbb3ec25SBjoern A. Zeeb 
348*cbb3ec25SBjoern A. Zeeb 		/* tx free notify event from WA for band2
349*cbb3ec25SBjoern A. Zeeb 		 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
350*cbb3ec25SBjoern A. Zeeb 		 */
351*cbb3ec25SBjoern A. Zeeb 		ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
352*cbb3ec25SBjoern A. Zeeb 				       MT_RXQ_ID(MT_RXQ_BAND2_WA),
353*cbb3ec25SBjoern A. Zeeb 				       MT7996_RX_MCU_RING_SIZE,
354*cbb3ec25SBjoern A. Zeeb 				       MT_RX_BUF_SIZE,
355*cbb3ec25SBjoern A. Zeeb 				       MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA));
356*cbb3ec25SBjoern A. Zeeb 		if (ret)
357*cbb3ec25SBjoern A. Zeeb 			return ret;
358*cbb3ec25SBjoern A. Zeeb 	}
359*cbb3ec25SBjoern A. Zeeb 
360*cbb3ec25SBjoern A. Zeeb 	ret = mt76_init_queues(dev, mt76_dma_rx_poll);
361*cbb3ec25SBjoern A. Zeeb 	if (ret < 0)
362*cbb3ec25SBjoern A. Zeeb 		return ret;
363*cbb3ec25SBjoern A. Zeeb 
364*cbb3ec25SBjoern A. Zeeb 	netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
365*cbb3ec25SBjoern A. Zeeb 			  mt7996_poll_tx);
366*cbb3ec25SBjoern A. Zeeb 	napi_enable(&dev->mt76.tx_napi);
367*cbb3ec25SBjoern A. Zeeb 
368*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_enable(dev, false);
369*cbb3ec25SBjoern A. Zeeb 
370*cbb3ec25SBjoern A. Zeeb 	return 0;
371*cbb3ec25SBjoern A. Zeeb }
372*cbb3ec25SBjoern A. Zeeb 
373*cbb3ec25SBjoern A. Zeeb void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
374*cbb3ec25SBjoern A. Zeeb {
375*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1];
376*cbb3ec25SBjoern A. Zeeb 	struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2];
377*cbb3ec25SBjoern A. Zeeb 	u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
378*cbb3ec25SBjoern A. Zeeb 	int i;
379*cbb3ec25SBjoern A. Zeeb 
380*cbb3ec25SBjoern A. Zeeb 	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
381*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
382*cbb3ec25SBjoern A. Zeeb 		   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
383*cbb3ec25SBjoern A. Zeeb 
384*cbb3ec25SBjoern A. Zeeb 	if (dev->hif2)
385*cbb3ec25SBjoern A. Zeeb 		mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
386*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_TX_DMA_EN |
387*cbb3ec25SBjoern A. Zeeb 			   MT_WFDMA0_GLO_CFG_RX_DMA_EN);
388*cbb3ec25SBjoern A. Zeeb 
389*cbb3ec25SBjoern A. Zeeb 	usleep_range(1000, 2000);
390*cbb3ec25SBjoern A. Zeeb 
391*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < __MT_TXQ_MAX; i++) {
392*cbb3ec25SBjoern A. Zeeb 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
393*cbb3ec25SBjoern A. Zeeb 		if (phy2)
394*cbb3ec25SBjoern A. Zeeb 			mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true);
395*cbb3ec25SBjoern A. Zeeb 		if (phy3)
396*cbb3ec25SBjoern A. Zeeb 			mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
397*cbb3ec25SBjoern A. Zeeb 	}
398*cbb3ec25SBjoern A. Zeeb 
399*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < __MT_MCUQ_MAX; i++)
400*cbb3ec25SBjoern A. Zeeb 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
401*cbb3ec25SBjoern A. Zeeb 
402*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(&dev->mt76, i)
403*cbb3ec25SBjoern A. Zeeb 		mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
404*cbb3ec25SBjoern A. Zeeb 
405*cbb3ec25SBjoern A. Zeeb 	mt76_tx_status_check(&dev->mt76, true);
406*cbb3ec25SBjoern A. Zeeb 
407*cbb3ec25SBjoern A. Zeeb 	/* reset wfsys */
408*cbb3ec25SBjoern A. Zeeb 	if (force)
409*cbb3ec25SBjoern A. Zeeb 		mt7996_wfsys_reset(dev);
410*cbb3ec25SBjoern A. Zeeb 
411*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_disable(dev, force);
412*cbb3ec25SBjoern A. Zeeb 
413*cbb3ec25SBjoern A. Zeeb 	/* reset hw queues */
414*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < __MT_TXQ_MAX; i++) {
415*cbb3ec25SBjoern A. Zeeb 		mt76_queue_reset(dev, dev->mphy.q_tx[i]);
416*cbb3ec25SBjoern A. Zeeb 		if (phy2)
417*cbb3ec25SBjoern A. Zeeb 			mt76_queue_reset(dev, phy2->q_tx[i]);
418*cbb3ec25SBjoern A. Zeeb 		if (phy3)
419*cbb3ec25SBjoern A. Zeeb 			mt76_queue_reset(dev, phy3->q_tx[i]);
420*cbb3ec25SBjoern A. Zeeb 	}
421*cbb3ec25SBjoern A. Zeeb 
422*cbb3ec25SBjoern A. Zeeb 	for (i = 0; i < __MT_MCUQ_MAX; i++)
423*cbb3ec25SBjoern A. Zeeb 		mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
424*cbb3ec25SBjoern A. Zeeb 
425*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(&dev->mt76, i) {
426*cbb3ec25SBjoern A. Zeeb 		mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
427*cbb3ec25SBjoern A. Zeeb 	}
428*cbb3ec25SBjoern A. Zeeb 
429*cbb3ec25SBjoern A. Zeeb 	mt76_tx_status_check(&dev->mt76, true);
430*cbb3ec25SBjoern A. Zeeb 
431*cbb3ec25SBjoern A. Zeeb 	mt76_for_each_q_rx(&dev->mt76, i)
432*cbb3ec25SBjoern A. Zeeb 		mt76_queue_rx_reset(dev, i);
433*cbb3ec25SBjoern A. Zeeb 
434*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_enable(dev, !force);
435*cbb3ec25SBjoern A. Zeeb }
436*cbb3ec25SBjoern A. Zeeb 
437*cbb3ec25SBjoern A. Zeeb void mt7996_dma_cleanup(struct mt7996_dev *dev)
438*cbb3ec25SBjoern A. Zeeb {
439*cbb3ec25SBjoern A. Zeeb 	mt7996_dma_disable(dev, true);
440*cbb3ec25SBjoern A. Zeeb 
441*cbb3ec25SBjoern A. Zeeb 	mt76_dma_cleanup(&dev->mt76);
442*cbb3ec25SBjoern A. Zeeb }
443