1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include "mt7996.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #if defined(__FreeBSD__) 10 #include <linux/delay.h> 11 #endif 12 13 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc, 14 int ring_base, struct mtk_wed_device *wed) 15 { 16 struct mt7996_dev *dev = phy->dev; 17 u32 flags = 0; 18 19 if (mtk_wed_device_active(wed)) { 20 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; 21 idx -= MT_TXQ_ID(0); 22 23 if (phy->mt76->band_idx == MT_BAND2) 24 flags = MT_WED_Q_TX(0); 25 else 26 flags = MT_WED_Q_TX(idx); 27 } 28 29 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, 30 ring_base, wed, flags); 31 } 32 33 static int mt7996_poll_tx(struct napi_struct *napi, int budget) 34 { 35 struct mt7996_dev *dev; 36 37 dev = container_of(napi, struct mt7996_dev, mt76.tx_napi); 38 39 mt76_connac_tx_cleanup(&dev->mt76); 40 if (napi_complete_done(napi, 0)) 41 mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU); 42 43 return 0; 44 } 45 46 static void mt7996_dma_config(struct mt7996_dev *dev) 47 { 48 #define Q_CONFIG(q, wfdma, int, id) do { \ 49 if (wfdma) \ 50 dev->q_wfdma_mask |= (1 << (q)); \ 51 dev->q_int_mask[(q)] = int; \ 52 dev->q_id[(q)] = id; \ 53 } while (0) 54 55 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 56 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 57 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 58 59 /* rx queue */ 60 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM); 61 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA); 62 63 /* mt7996: band0 and band1, mt7992: band0 */ 64 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0); 65 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN); 66 67 if (is_mt7996(&dev->mt76)) { 68 /* mt7996 band2 */ 69 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); 70 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); 71 } else { 72 /* mt7992 band1 */ 73 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); 74 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT); 75 } 76 77 if (dev->has_rro) { 78 /* band0 */ 79 RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0, 80 MT7996_RXQ_RRO_BAND0); 81 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0, 82 MT7996_RXQ_MSDU_PG_BAND0); 83 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN, 84 MT7996_RXQ_TXFREE0); 85 /* band1 */ 86 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1, 87 MT7996_RXQ_MSDU_PG_BAND1); 88 /* band2 */ 89 RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2, 90 MT7996_RXQ_RRO_BAND2); 91 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2, 92 MT7996_RXQ_MSDU_PG_BAND2); 93 RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI, 94 MT7996_RXQ_TXFREE2); 95 96 RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND, 97 MT7996_RXQ_RRO_IND); 98 } 99 100 /* data tx queue */ 101 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); 102 if (is_mt7996(&dev->mt76)) { 103 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 104 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2); 105 } else { 106 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 107 } 108 109 /* mcu tx queue */ 110 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM); 111 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA); 112 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL); 113 } 114 115 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth) 116 { 117 u32 ret = *base << 16 | depth; 118 119 *base = *base + (depth << 4); 120 121 return ret; 122 } 123 124 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) 125 { 126 u16 base = 0; 127 u8 queue; 128 129 #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth))) 130 /* prefetch SRAM wrapping boundary for tx/rx ring. */ 131 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2)); 132 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2)); 133 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8)); 134 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8)); 135 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2)); 136 mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8)); 137 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2)); 138 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2)); 139 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2)); 140 141 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA; 142 mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2)); 143 144 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); 145 146 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1; 147 mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10)); 148 149 if (dev->has_rro) { 150 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs, 151 PREFETCH(0x10)); 152 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs, 153 PREFETCH(0x10)); 154 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, 155 PREFETCH(0x4)); 156 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, 157 PREFETCH(0x4)); 158 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, 159 PREFETCH(0x4)); 160 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, 161 PREFETCH(0x4)); 162 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, 163 PREFETCH(0x4)); 164 } 165 #undef PREFETCH 166 167 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); 168 } 169 170 void mt7996_dma_prefetch(struct mt7996_dev *dev) 171 { 172 __mt7996_dma_prefetch(dev, 0); 173 if (dev->hif2) 174 __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 175 } 176 177 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) 178 { 179 u32 hif1_ofs = 0; 180 181 if (dev->hif2) 182 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 183 184 if (reset) { 185 mt76_clear(dev, MT_WFDMA0_RST, 186 MT_WFDMA0_RST_DMASHDL_ALL_RST | 187 MT_WFDMA0_RST_LOGIC_RST); 188 189 mt76_set(dev, MT_WFDMA0_RST, 190 MT_WFDMA0_RST_DMASHDL_ALL_RST | 191 MT_WFDMA0_RST_LOGIC_RST); 192 193 if (dev->hif2) { 194 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 195 MT_WFDMA0_RST_DMASHDL_ALL_RST | 196 MT_WFDMA0_RST_LOGIC_RST); 197 198 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 199 MT_WFDMA0_RST_DMASHDL_ALL_RST | 200 MT_WFDMA0_RST_LOGIC_RST); 201 } 202 } 203 204 /* disable */ 205 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 206 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 207 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 208 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 209 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 210 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 211 212 if (dev->hif2) { 213 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 214 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 215 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 216 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 217 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 218 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 219 } 220 } 221 222 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) 223 { 224 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 225 u32 hif1_ofs = 0; 226 u32 irq_mask; 227 228 if (dev->hif2) 229 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 230 231 /* enable WFDMA Tx/Rx */ 232 if (!reset) { 233 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) 234 mt76_set(dev, MT_WFDMA0_GLO_CFG, 235 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 236 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 237 MT_WFDMA0_GLO_CFG_EXT_EN); 238 else 239 mt76_set(dev, MT_WFDMA0_GLO_CFG, 240 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 241 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 242 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 243 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | 244 MT_WFDMA0_GLO_CFG_EXT_EN); 245 246 if (dev->hif2) 247 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 248 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 249 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 250 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 251 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | 252 MT_WFDMA0_GLO_CFG_EXT_EN); 253 } 254 255 /* enable interrupts for TX/RX rings */ 256 irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU; 257 258 if (mt7996_band_valid(dev, MT_BAND0)) 259 irq_mask |= MT_INT_BAND0_RX_DONE; 260 261 if (mt7996_band_valid(dev, MT_BAND1)) 262 irq_mask |= MT_INT_BAND1_RX_DONE; 263 264 if (mt7996_band_valid(dev, MT_BAND2)) 265 irq_mask |= MT_INT_BAND2_RX_DONE; 266 267 if (mtk_wed_device_active(wed) && wed_reset) { 268 u32 wed_irq_mask = irq_mask; 269 270 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; 271 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 272 mtk_wed_device_start(wed, wed_irq_mask); 273 } 274 275 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; 276 277 mt7996_irq_enable(dev, irq_mask); 278 mt7996_irq_disable(dev, 0); 279 } 280 281 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset) 282 { 283 u32 hif1_ofs = 0; 284 285 if (dev->hif2) 286 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 287 288 /* reset dma idx */ 289 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 290 if (dev->hif2) 291 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 292 293 /* configure delay interrupt off */ 294 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 295 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 296 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 297 298 if (dev->hif2) { 299 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 300 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0); 301 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0); 302 } 303 304 /* configure perfetch settings */ 305 mt7996_dma_prefetch(dev); 306 307 /* hif wait WFDMA idle */ 308 mt76_set(dev, MT_WFDMA0_BUSY_ENA, 309 MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 310 MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 311 MT_WFDMA0_BUSY_ENA_RX_FIFO); 312 313 if (dev->hif2) 314 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 315 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 316 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 317 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 318 319 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 320 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 321 322 /* GLO_CFG_EXT0 */ 323 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0, 324 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 325 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 326 327 /* GLO_CFG_EXT1 */ 328 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1, 329 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 330 331 /* WFDMA rx threshold */ 332 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c); 333 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008); 334 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008); 335 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20); 336 337 if (dev->hif2) { 338 /* GLO_CFG_EXT0 */ 339 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 340 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 341 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 342 343 /* GLO_CFG_EXT1 */ 344 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs, 345 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 346 347 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 348 MT_WFDMA_HOST_CONFIG_PDMA_BAND | 349 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1); 350 351 /* AXI read outstanding number */ 352 mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL, 353 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14); 354 355 /* WFDMA rx threshold */ 356 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c); 357 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008); 358 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008); 359 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20); 360 } 361 362 if (dev->hif2) { 363 /* fix hardware limitation, pcie1's rx ring3 is not available 364 * so, redirect pcie0 rx ring3 interrupt to pcie1 365 */ 366 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 367 dev->has_rro) 368 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs, 369 MT_WFDMA0_RX_INT_SEL_RING6); 370 else 371 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL, 372 MT_WFDMA0_RX_INT_SEL_RING3); 373 } 374 375 mt7996_dma_start(dev, reset, true); 376 } 377 378 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 379 int mt7996_dma_rro_init(struct mt7996_dev *dev) 380 { 381 struct mt76_dev *mdev = &dev->mt76; 382 u32 irq_mask; 383 int ret; 384 385 /* ind cmd */ 386 mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND; 387 mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed; 388 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND], 389 MT_RXQ_ID(MT_RXQ_RRO_IND), 390 MT7996_RX_RING_SIZE, 391 0, MT_RXQ_RRO_IND_RING_BASE); 392 if (ret) 393 return ret; 394 395 /* rx msdu page queue for band0 */ 396 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = 397 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN; 398 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed; 399 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0], 400 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), 401 MT7996_RX_RING_SIZE, 402 MT7996_RX_MSDU_PAGE_SIZE, 403 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); 404 if (ret) 405 return ret; 406 407 if (mt7996_band_valid(dev, MT_BAND1)) { 408 /* rx msdu page queue for band1 */ 409 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = 410 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN; 411 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed; 412 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1], 413 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), 414 MT7996_RX_RING_SIZE, 415 MT7996_RX_MSDU_PAGE_SIZE, 416 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); 417 if (ret) 418 return ret; 419 } 420 421 if (mt7996_band_valid(dev, MT_BAND2)) { 422 /* rx msdu page queue for band2 */ 423 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = 424 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN; 425 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed; 426 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2], 427 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), 428 MT7996_RX_RING_SIZE, 429 MT7996_RX_MSDU_PAGE_SIZE, 430 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); 431 if (ret) 432 return ret; 433 } 434 435 irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE | 436 MT_INT_TX_DONE_BAND2; 437 mt76_wr(dev, MT_INT_MASK_CSR, irq_mask); 438 mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false); 439 mt7996_irq_enable(dev, irq_mask); 440 441 return 0; 442 } 443 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 444 445 int mt7996_dma_init(struct mt7996_dev *dev) 446 { 447 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 448 struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2; 449 u32 rx_base; 450 u32 hif1_ofs = 0; 451 int ret; 452 453 mt7996_dma_config(dev); 454 455 mt76_dma_attach(&dev->mt76); 456 457 if (dev->hif2) 458 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 459 460 mt7996_dma_disable(dev, true); 461 462 /* init tx queue */ 463 ret = mt7996_init_tx_queues(&dev->phy, 464 MT_TXQ_ID(dev->mphy.band_idx), 465 MT7996_TX_RING_SIZE, 466 MT_TXQ_RING_BASE(0), 467 wed); 468 if (ret) 469 return ret; 470 471 /* command to WM */ 472 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 473 MT_MCUQ_ID(MT_MCUQ_WM), 474 MT7996_TX_MCU_RING_SIZE, 475 MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 476 if (ret) 477 return ret; 478 479 /* command to WA */ 480 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 481 MT_MCUQ_ID(MT_MCUQ_WA), 482 MT7996_TX_MCU_RING_SIZE, 483 MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 484 if (ret) 485 return ret; 486 487 /* firmware download */ 488 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 489 MT_MCUQ_ID(MT_MCUQ_FWDL), 490 MT7996_TX_FWDL_RING_SIZE, 491 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 492 if (ret) 493 return ret; 494 495 /* event from WM */ 496 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 497 MT_RXQ_ID(MT_RXQ_MCU), 498 MT7996_RX_MCU_RING_SIZE, 499 MT_RX_BUF_SIZE, 500 MT_RXQ_RING_BASE(MT_RXQ_MCU)); 501 if (ret) 502 return ret; 503 504 /* event from WA */ 505 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 506 MT_RXQ_ID(MT_RXQ_MCU_WA), 507 MT7996_RX_MCU_RING_SIZE_WA, 508 MT_RX_BUF_SIZE, 509 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); 510 if (ret) 511 return ret; 512 513 /* rx data queue for band0 and mt7996 band1 */ 514 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) { 515 dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0); 516 dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed; 517 } 518 519 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 520 MT_RXQ_ID(MT_RXQ_MAIN), 521 MT7996_RX_RING_SIZE, 522 MT_RX_BUF_SIZE, 523 MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 524 if (ret) 525 return ret; 526 527 /* tx free notify event from WA for band0 */ 528 if (mtk_wed_device_active(wed) && !dev->has_rro) { 529 dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 530 dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed; 531 } 532 533 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 534 MT_RXQ_ID(MT_RXQ_MAIN_WA), 535 MT7996_RX_MCU_RING_SIZE, 536 MT_RX_BUF_SIZE, 537 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); 538 if (ret) 539 return ret; 540 541 if (mt7996_band_valid(dev, MT_BAND2)) { 542 /* rx data queue for mt7996 band2 */ 543 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs; 544 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], 545 MT_RXQ_ID(MT_RXQ_BAND2), 546 MT7996_RX_RING_SIZE, 547 MT_RX_BUF_SIZE, 548 rx_base); 549 if (ret) 550 return ret; 551 552 /* tx free notify event from WA for mt7996 band2 553 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1 554 */ 555 if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) { 556 dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE; 557 dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2; 558 } 559 560 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA], 561 MT_RXQ_ID(MT_RXQ_BAND2_WA), 562 MT7996_RX_MCU_RING_SIZE, 563 MT_RX_BUF_SIZE, 564 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA)); 565 if (ret) 566 return ret; 567 } else if (mt7996_band_valid(dev, MT_BAND1)) { 568 /* rx data queue for mt7992 band1 */ 569 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs; 570 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], 571 MT_RXQ_ID(MT_RXQ_BAND1), 572 MT7996_RX_RING_SIZE, 573 MT_RX_BUF_SIZE, 574 rx_base); 575 if (ret) 576 return ret; 577 578 /* tx free notify event from WA for mt7992 band1 */ 579 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs; 580 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], 581 MT_RXQ_ID(MT_RXQ_BAND1_WA), 582 MT7996_RX_MCU_RING_SIZE, 583 MT_RX_BUF_SIZE, 584 rx_base); 585 if (ret) 586 return ret; 587 } 588 589 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && 590 dev->has_rro) { 591 /* rx rro data queue for band0 */ 592 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = 593 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN; 594 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed; 595 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0], 596 MT_RXQ_ID(MT_RXQ_RRO_BAND0), 597 MT7996_RX_RING_SIZE, 598 MT7996_RX_BUF_SIZE, 599 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0)); 600 if (ret) 601 return ret; 602 603 /* tx free notify event from WA for band0 */ 604 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; 605 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; 606 607 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], 608 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), 609 MT7996_RX_MCU_RING_SIZE, 610 MT7996_RX_BUF_SIZE, 611 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); 612 if (ret) 613 return ret; 614 615 if (mt7996_band_valid(dev, MT_BAND2)) { 616 /* rx rro data queue for band2 */ 617 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = 618 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN; 619 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed; 620 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], 621 MT_RXQ_ID(MT_RXQ_RRO_BAND2), 622 MT7996_RX_RING_SIZE, 623 MT7996_RX_BUF_SIZE, 624 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs); 625 if (ret) 626 return ret; 627 628 /* tx free notify event from MAC for band2 */ 629 if (mtk_wed_device_active(wed_hif2)) { 630 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE; 631 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2; 632 } 633 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2], 634 MT_RXQ_ID(MT_RXQ_TXFREE_BAND2), 635 MT7996_RX_MCU_RING_SIZE, 636 MT7996_RX_BUF_SIZE, 637 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs); 638 if (ret) 639 return ret; 640 } 641 } 642 643 ret = mt76_init_queues(dev, mt76_dma_rx_poll); 644 if (ret < 0) 645 return ret; 646 647 netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 648 mt7996_poll_tx); 649 napi_enable(&dev->mt76.tx_napi); 650 651 mt7996_dma_enable(dev, false); 652 653 return 0; 654 } 655 656 void mt7996_dma_reset(struct mt7996_dev *dev, bool force) 657 { 658 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1]; 659 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2]; 660 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 661 int i; 662 663 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 664 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 665 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 666 667 if (dev->hif2) 668 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 669 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 670 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 671 672 usleep_range(1000, 2000); 673 674 for (i = 0; i < __MT_TXQ_MAX; i++) { 675 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 676 if (phy2) 677 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true); 678 if (phy3) 679 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true); 680 } 681 682 for (i = 0; i < __MT_MCUQ_MAX; i++) 683 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 684 685 mt76_for_each_q_rx(&dev->mt76, i) 686 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 687 688 mt76_tx_status_check(&dev->mt76, true); 689 690 /* reset wfsys */ 691 if (force) 692 mt7996_wfsys_reset(dev); 693 694 if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 695 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2); 696 697 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 698 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed); 699 700 mt7996_dma_disable(dev, force); 701 mt76_wed_dma_reset(&dev->mt76); 702 703 /* reset hw queues */ 704 for (i = 0; i < __MT_TXQ_MAX; i++) { 705 mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]); 706 if (phy2) 707 mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]); 708 if (phy3) 709 mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]); 710 } 711 712 for (i = 0; i < __MT_MCUQ_MAX; i++) 713 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 714 715 mt76_for_each_q_rx(&dev->mt76, i) { 716 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 717 if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) || 718 mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i])) 719 continue; 720 721 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 722 } 723 724 mt76_tx_status_check(&dev->mt76, true); 725 726 mt76_for_each_q_rx(&dev->mt76, i) 727 mt76_queue_rx_reset(dev, i); 728 729 mt7996_dma_enable(dev, !force); 730 } 731 732 void mt7996_dma_cleanup(struct mt7996_dev *dev) 733 { 734 mt7996_dma_disable(dev, true); 735 736 mt76_dma_cleanup(&dev->mt76); 737 } 738