1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include "mt7996.h" 7 #include "../dma.h" 8 #include "mac.h" 9 10 int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc, 11 int ring_base, struct mtk_wed_device *wed) 12 { 13 struct mt7996_dev *dev = phy->dev; 14 u32 flags = 0; 15 16 if (mtk_wed_device_active(wed)) { 17 ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; 18 idx -= MT_TXQ_ID(0); 19 20 if (phy->mt76->band_idx == MT_BAND2) 21 flags = MT_WED_Q_TX(0); 22 else 23 flags = MT_WED_Q_TX(idx); 24 } 25 26 return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, 27 ring_base, wed, flags); 28 } 29 30 static int mt7996_poll_tx(struct napi_struct *napi, int budget) 31 { 32 struct mt7996_dev *dev; 33 34 dev = container_of(napi, struct mt7996_dev, mt76.tx_napi); 35 36 mt76_connac_tx_cleanup(&dev->mt76); 37 if (napi_complete_done(napi, 0)) 38 mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU); 39 40 return 0; 41 } 42 43 static void mt7996_dma_config(struct mt7996_dev *dev) 44 { 45 #define Q_CONFIG(q, wfdma, int, id) do { \ 46 if (wfdma) \ 47 dev->q_wfdma_mask |= (1 << (q)); \ 48 dev->q_int_mask[(q)] = int; \ 49 dev->q_id[(q)] = id; \ 50 } while (0) 51 52 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 53 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 54 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 55 56 /* rx queue */ 57 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM); 58 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA); 59 60 /* mt7996: band0 and band1, mt7992: band0 */ 61 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0); 62 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN); 63 64 if (is_mt7996(&dev->mt76)) { 65 /* mt7996 band2 */ 66 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); 67 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); 68 } else { 69 /* mt7992 band1 */ 70 RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, MT7996_RXQ_BAND1); 71 RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT, MT7996_RXQ_MCU_WA_EXT); 72 } 73 74 if (dev->has_rro) { 75 /* band0 */ 76 RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0, 77 MT7996_RXQ_RRO_BAND0); 78 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0, 79 MT7996_RXQ_MSDU_PG_BAND0); 80 RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN, 81 MT7996_RXQ_TXFREE0); 82 /* band1 */ 83 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1, 84 MT7996_RXQ_MSDU_PG_BAND1); 85 /* band2 */ 86 RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2, 87 MT7996_RXQ_RRO_BAND2); 88 RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2, 89 MT7996_RXQ_MSDU_PG_BAND2); 90 RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI, 91 MT7996_RXQ_TXFREE2); 92 93 RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND, 94 MT7996_RXQ_RRO_IND); 95 } 96 97 /* data tx queue */ 98 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); 99 if (is_mt7996(&dev->mt76)) { 100 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 101 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2); 102 } else { 103 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 104 } 105 106 /* mcu tx queue */ 107 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM); 108 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA); 109 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL); 110 } 111 112 static u32 __mt7996_dma_prefetch_base(u16 *base, u8 depth) 113 { 114 u32 ret = *base << 16 | depth; 115 116 *base = *base + (depth << 4); 117 118 return ret; 119 } 120 121 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) 122 { 123 u16 base = 0; 124 u8 queue; 125 126 #define PREFETCH(_depth) (__mt7996_dma_prefetch_base(&base, (_depth))) 127 /* prefetch SRAM wrapping boundary for tx/rx ring. */ 128 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x2)); 129 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x2)); 130 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x8)); 131 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x8)); 132 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x2)); 133 mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0x8)); 134 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x2)); 135 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x2)); 136 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x2)); 137 138 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2_WA : MT_RXQ_BAND1_WA; 139 mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x2)); 140 141 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x10)); 142 143 queue = is_mt7996(&dev->mt76) ? MT_RXQ_BAND2 : MT_RXQ_BAND1; 144 mt76_wr(dev, MT_RXQ_BAND1_CTRL(queue) + ofs, PREFETCH(0x10)); 145 146 if (dev->has_rro) { 147 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND0) + ofs, 148 PREFETCH(0x10)); 149 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_RRO_BAND2) + ofs, 150 PREFETCH(0x10)); 151 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND0) + ofs, 152 PREFETCH(0x4)); 153 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND1) + ofs, 154 PREFETCH(0x4)); 155 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MSDU_PAGE_BAND2) + ofs, 156 PREFETCH(0x4)); 157 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, 158 PREFETCH(0x4)); 159 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, 160 PREFETCH(0x4)); 161 } 162 #undef PREFETCH 163 164 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); 165 } 166 167 void mt7996_dma_prefetch(struct mt7996_dev *dev) 168 { 169 __mt7996_dma_prefetch(dev, 0); 170 if (dev->hif2) 171 __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 172 } 173 174 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) 175 { 176 u32 hif1_ofs = 0; 177 178 if (dev->hif2) 179 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 180 181 if (reset) { 182 mt76_clear(dev, MT_WFDMA0_RST, 183 MT_WFDMA0_RST_DMASHDL_ALL_RST | 184 MT_WFDMA0_RST_LOGIC_RST); 185 186 mt76_set(dev, MT_WFDMA0_RST, 187 MT_WFDMA0_RST_DMASHDL_ALL_RST | 188 MT_WFDMA0_RST_LOGIC_RST); 189 190 if (dev->hif2) { 191 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 192 MT_WFDMA0_RST_DMASHDL_ALL_RST | 193 MT_WFDMA0_RST_LOGIC_RST); 194 195 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 196 MT_WFDMA0_RST_DMASHDL_ALL_RST | 197 MT_WFDMA0_RST_LOGIC_RST); 198 } 199 } 200 201 /* disable */ 202 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 203 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 204 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 205 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 206 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 207 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 208 209 if (dev->hif2) { 210 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 211 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 212 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 213 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 214 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 215 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 216 } 217 } 218 219 void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset) 220 { 221 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 222 u32 hif1_ofs = 0; 223 u32 irq_mask; 224 225 if (dev->hif2) 226 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 227 228 /* enable WFDMA Tx/Rx */ 229 if (!reset) { 230 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) 231 mt76_set(dev, MT_WFDMA0_GLO_CFG, 232 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 233 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 234 MT_WFDMA0_GLO_CFG_EXT_EN); 235 else 236 mt76_set(dev, MT_WFDMA0_GLO_CFG, 237 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 238 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 239 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 240 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 241 242 if (dev->hif2) 243 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 244 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 245 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 246 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 247 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 | 248 MT_WFDMA0_GLO_CFG_EXT_EN); 249 } 250 251 /* enable interrupts for TX/RX rings */ 252 irq_mask = MT_INT_MCU_CMD | MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU; 253 254 if (mt7996_band_valid(dev, MT_BAND0)) 255 irq_mask |= MT_INT_BAND0_RX_DONE; 256 257 if (mt7996_band_valid(dev, MT_BAND1)) 258 irq_mask |= MT_INT_BAND1_RX_DONE; 259 260 if (mt7996_band_valid(dev, MT_BAND2)) 261 irq_mask |= MT_INT_BAND2_RX_DONE; 262 263 if (mtk_wed_device_active(wed) && wed_reset) { 264 u32 wed_irq_mask = irq_mask; 265 266 wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; 267 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 268 mtk_wed_device_start(wed, wed_irq_mask); 269 } 270 271 irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; 272 273 mt7996_irq_enable(dev, irq_mask); 274 mt7996_irq_disable(dev, 0); 275 } 276 277 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset) 278 { 279 u32 hif1_ofs = 0; 280 281 if (dev->hif2) 282 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 283 284 /* reset dma idx */ 285 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 286 if (dev->hif2) 287 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 288 289 /* configure delay interrupt off */ 290 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 291 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 292 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 293 294 if (dev->hif2) { 295 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 296 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0); 297 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0); 298 } 299 300 /* configure perfetch settings */ 301 mt7996_dma_prefetch(dev); 302 303 /* hif wait WFDMA idle */ 304 mt76_set(dev, MT_WFDMA0_BUSY_ENA, 305 MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 306 MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 307 MT_WFDMA0_BUSY_ENA_RX_FIFO); 308 309 if (dev->hif2) 310 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 311 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 312 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 313 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 314 315 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 316 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 317 318 /* GLO_CFG_EXT0 */ 319 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0, 320 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 321 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 322 323 /* GLO_CFG_EXT1 */ 324 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1, 325 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 326 327 /* WFDMA rx threshold */ 328 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH, 0xc000c); 329 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH, 0x10008); 330 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH, 0x10008); 331 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH, 0x20); 332 333 if (dev->hif2) { 334 /* GLO_CFG_EXT0 */ 335 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 336 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 337 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 338 339 /* GLO_CFG_EXT1 */ 340 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs, 341 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 342 343 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 344 MT_WFDMA_HOST_CONFIG_PDMA_BAND | 345 MT_WFDMA_HOST_CONFIG_BAND2_PCIE1); 346 347 /* AXI read outstanding number */ 348 mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL, 349 MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14); 350 351 /* WFDMA rx threshold */ 352 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c); 353 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008); 354 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_89_TH + hif1_ofs, 0x10008); 355 mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_RRO_TH + hif1_ofs, 0x20); 356 } 357 358 if (dev->hif2) { 359 /* fix hardware limitation, pcie1's rx ring3 is not available 360 * so, redirect pcie0 rx ring3 interrupt to pcie1 361 */ 362 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 363 dev->has_rro) 364 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs, 365 MT_WFDMA0_RX_INT_SEL_RING6); 366 else 367 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL, 368 MT_WFDMA0_RX_INT_SEL_RING3); 369 } 370 371 mt7996_dma_start(dev, reset, true); 372 } 373 374 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 375 int mt7996_dma_rro_init(struct mt7996_dev *dev) 376 { 377 struct mt76_dev *mdev = &dev->mt76; 378 u32 irq_mask; 379 int ret; 380 381 /* ind cmd */ 382 mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND; 383 mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed; 384 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND], 385 MT_RXQ_ID(MT_RXQ_RRO_IND), 386 MT7996_RX_RING_SIZE, 387 0, MT_RXQ_RRO_IND_RING_BASE); 388 if (ret) 389 return ret; 390 391 /* rx msdu page queue for band0 */ 392 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags = 393 MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN; 394 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed; 395 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0], 396 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0), 397 MT7996_RX_RING_SIZE, 398 MT7996_RX_MSDU_PAGE_SIZE, 399 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND0)); 400 if (ret) 401 return ret; 402 403 if (mt7996_band_valid(dev, MT_BAND1)) { 404 /* rx msdu page queue for band1 */ 405 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags = 406 MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN; 407 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed; 408 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1], 409 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1), 410 MT7996_RX_RING_SIZE, 411 MT7996_RX_MSDU_PAGE_SIZE, 412 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND1)); 413 if (ret) 414 return ret; 415 } 416 417 if (mt7996_band_valid(dev, MT_BAND2)) { 418 /* rx msdu page queue for band2 */ 419 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags = 420 MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN; 421 mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed; 422 ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2], 423 MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2), 424 MT7996_RX_RING_SIZE, 425 MT7996_RX_MSDU_PAGE_SIZE, 426 MT_RXQ_RING_BASE(MT_RXQ_MSDU_PAGE_BAND2)); 427 if (ret) 428 return ret; 429 } 430 431 irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE | 432 MT_INT_TX_DONE_BAND2; 433 mt76_wr(dev, MT_INT_MASK_CSR, irq_mask); 434 mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false); 435 mt7996_irq_enable(dev, irq_mask); 436 437 return 0; 438 } 439 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 440 441 int mt7996_dma_init(struct mt7996_dev *dev) 442 { 443 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 444 struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2; 445 u32 rx_base; 446 u32 hif1_ofs = 0; 447 int ret; 448 449 mt7996_dma_config(dev); 450 451 mt76_dma_attach(&dev->mt76); 452 453 if (dev->hif2) 454 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 455 456 mt7996_dma_disable(dev, true); 457 458 /* init tx queue */ 459 ret = mt7996_init_tx_queues(&dev->phy, 460 MT_TXQ_ID(dev->mphy.band_idx), 461 MT7996_TX_RING_SIZE, 462 MT_TXQ_RING_BASE(0), 463 wed); 464 if (ret) 465 return ret; 466 467 /* command to WM */ 468 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 469 MT_MCUQ_ID(MT_MCUQ_WM), 470 MT7996_TX_MCU_RING_SIZE, 471 MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 472 if (ret) 473 return ret; 474 475 /* command to WA */ 476 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 477 MT_MCUQ_ID(MT_MCUQ_WA), 478 MT7996_TX_MCU_RING_SIZE, 479 MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 480 if (ret) 481 return ret; 482 483 /* firmware download */ 484 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 485 MT_MCUQ_ID(MT_MCUQ_FWDL), 486 MT7996_TX_FWDL_RING_SIZE, 487 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 488 if (ret) 489 return ret; 490 491 /* event from WM */ 492 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 493 MT_RXQ_ID(MT_RXQ_MCU), 494 MT7996_RX_MCU_RING_SIZE, 495 MT_RX_BUF_SIZE, 496 MT_RXQ_RING_BASE(MT_RXQ_MCU)); 497 if (ret) 498 return ret; 499 500 /* event from WA */ 501 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 502 MT_RXQ_ID(MT_RXQ_MCU_WA), 503 MT7996_RX_MCU_RING_SIZE_WA, 504 MT_RX_BUF_SIZE, 505 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); 506 if (ret) 507 return ret; 508 509 /* rx data queue for band0 and mt7996 band1 */ 510 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) { 511 dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0); 512 dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed; 513 } 514 515 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 516 MT_RXQ_ID(MT_RXQ_MAIN), 517 MT7996_RX_RING_SIZE, 518 MT_RX_BUF_SIZE, 519 MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 520 if (ret) 521 return ret; 522 523 /* tx free notify event from WA for band0 */ 524 if (mtk_wed_device_active(wed) && !dev->has_rro) { 525 dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 526 dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed; 527 } 528 529 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 530 MT_RXQ_ID(MT_RXQ_MAIN_WA), 531 MT7996_RX_MCU_RING_SIZE, 532 MT_RX_BUF_SIZE, 533 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); 534 if (ret) 535 return ret; 536 537 if (mt7996_band_valid(dev, MT_BAND2)) { 538 /* rx data queue for mt7996 band2 */ 539 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs; 540 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], 541 MT_RXQ_ID(MT_RXQ_BAND2), 542 MT7996_RX_RING_SIZE, 543 MT_RX_BUF_SIZE, 544 rx_base); 545 if (ret) 546 return ret; 547 548 /* tx free notify event from WA for mt7996 band2 549 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1 550 */ 551 if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) { 552 dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE; 553 dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2; 554 } 555 556 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA], 557 MT_RXQ_ID(MT_RXQ_BAND2_WA), 558 MT7996_RX_MCU_RING_SIZE, 559 MT_RX_BUF_SIZE, 560 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA)); 561 if (ret) 562 return ret; 563 } else if (mt7996_band_valid(dev, MT_BAND1)) { 564 /* rx data queue for mt7992 band1 */ 565 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs; 566 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], 567 MT_RXQ_ID(MT_RXQ_BAND1), 568 MT7996_RX_RING_SIZE, 569 MT_RX_BUF_SIZE, 570 rx_base); 571 if (ret) 572 return ret; 573 574 /* tx free notify event from WA for mt7992 band1 */ 575 rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs; 576 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], 577 MT_RXQ_ID(MT_RXQ_BAND1_WA), 578 MT7996_RX_MCU_RING_SIZE, 579 MT_RX_BUF_SIZE, 580 rx_base); 581 if (ret) 582 return ret; 583 } 584 585 if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) && 586 dev->has_rro) { 587 /* rx rro data queue for band0 */ 588 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags = 589 MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN; 590 dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed; 591 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0], 592 MT_RXQ_ID(MT_RXQ_RRO_BAND0), 593 MT7996_RX_RING_SIZE, 594 MT7996_RX_BUF_SIZE, 595 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0)); 596 if (ret) 597 return ret; 598 599 /* tx free notify event from WA for band0 */ 600 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE; 601 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed; 602 603 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0], 604 MT_RXQ_ID(MT_RXQ_TXFREE_BAND0), 605 MT7996_RX_MCU_RING_SIZE, 606 MT7996_RX_BUF_SIZE, 607 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); 608 if (ret) 609 return ret; 610 611 if (mt7996_band_valid(dev, MT_BAND2)) { 612 /* rx rro data queue for band2 */ 613 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags = 614 MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN; 615 dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed; 616 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2], 617 MT_RXQ_ID(MT_RXQ_RRO_BAND2), 618 MT7996_RX_RING_SIZE, 619 MT7996_RX_BUF_SIZE, 620 MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs); 621 if (ret) 622 return ret; 623 624 /* tx free notify event from MAC for band2 */ 625 if (mtk_wed_device_active(wed_hif2)) { 626 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE; 627 dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2; 628 } 629 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2], 630 MT_RXQ_ID(MT_RXQ_TXFREE_BAND2), 631 MT7996_RX_MCU_RING_SIZE, 632 MT7996_RX_BUF_SIZE, 633 MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs); 634 if (ret) 635 return ret; 636 } 637 } 638 639 ret = mt76_init_queues(dev, mt76_dma_rx_poll); 640 if (ret < 0) 641 return ret; 642 643 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 644 mt7996_poll_tx); 645 napi_enable(&dev->mt76.tx_napi); 646 647 mt7996_dma_enable(dev, false); 648 649 return 0; 650 } 651 652 void mt7996_dma_reset(struct mt7996_dev *dev, bool force) 653 { 654 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1]; 655 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2]; 656 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 657 int i; 658 659 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 660 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 661 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 662 663 if (dev->hif2) 664 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 665 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 666 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 667 668 usleep_range(1000, 2000); 669 670 for (i = 0; i < __MT_TXQ_MAX; i++) { 671 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 672 if (phy2) 673 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true); 674 if (phy3) 675 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true); 676 } 677 678 for (i = 0; i < __MT_MCUQ_MAX; i++) 679 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 680 681 mt76_for_each_q_rx(&dev->mt76, i) 682 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 683 684 mt76_tx_status_check(&dev->mt76, true); 685 686 /* reset wfsys */ 687 if (force) 688 mt7996_wfsys_reset(dev); 689 690 if (dev->hif2 && mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 691 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed_hif2); 692 693 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 694 mtk_wed_device_dma_reset(&dev->mt76.mmio.wed); 695 696 mt7996_dma_disable(dev, force); 697 mt76_dma_wed_reset(&dev->mt76); 698 699 /* reset hw queues */ 700 for (i = 0; i < __MT_TXQ_MAX; i++) { 701 mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]); 702 if (phy2) 703 mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]); 704 if (phy3) 705 mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]); 706 } 707 708 for (i = 0; i < __MT_MCUQ_MAX; i++) 709 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 710 711 mt76_for_each_q_rx(&dev->mt76, i) { 712 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 713 if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) || 714 mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i])) 715 continue; 716 717 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 718 } 719 720 mt76_tx_status_check(&dev->mt76, true); 721 722 mt76_for_each_q_rx(&dev->mt76, i) 723 mt76_queue_rx_reset(dev, i); 724 725 mt7996_dma_enable(dev, !force); 726 } 727 728 void mt7996_dma_cleanup(struct mt7996_dev *dev) 729 { 730 mt7996_dma_disable(dev, true); 731 732 mt76_dma_cleanup(&dev->mt76); 733 } 734