1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include "mt7996.h" 7 #include "../dma.h" 8 #include "mac.h" 9 #if defined(__FreeBSD__) 10 #include <linux/delay.h> 11 #endif 12 13 static int mt7996_poll_tx(struct napi_struct *napi, int budget) 14 { 15 struct mt7996_dev *dev; 16 17 dev = container_of(napi, struct mt7996_dev, mt76.tx_napi); 18 19 mt76_connac_tx_cleanup(&dev->mt76); 20 if (napi_complete_done(napi, 0)) 21 mt7996_irq_enable(dev, MT_INT_TX_DONE_MCU); 22 23 return 0; 24 } 25 26 static void mt7996_dma_config(struct mt7996_dev *dev) 27 { 28 #define Q_CONFIG(q, wfdma, int, id) do { \ 29 if (wfdma) \ 30 dev->q_wfdma_mask |= (1 << (q)); \ 31 dev->q_int_mask[(q)] = int; \ 32 dev->q_id[(q)] = id; \ 33 } while (0) 34 35 #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 36 #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 37 #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 38 39 /* rx queue */ 40 RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7996_RXQ_MCU_WM); 41 RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7996_RXQ_MCU_WA); 42 43 /* band0/band1 */ 44 RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7996_RXQ_BAND0); 45 RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN, MT7996_RXQ_MCU_WA_MAIN); 46 47 /* band2 */ 48 RXQ_CONFIG(MT_RXQ_BAND2, WFDMA0, MT_INT_RX_DONE_BAND2, MT7996_RXQ_BAND2); 49 RXQ_CONFIG(MT_RXQ_BAND2_WA, WFDMA0, MT_INT_RX_DONE_WA_TRI, MT7996_RXQ_MCU_WA_TRI); 50 51 /* data tx queue */ 52 TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0); 53 TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1); 54 TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2); 55 56 /* mcu tx queue */ 57 MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7996_TXQ_MCU_WM); 58 MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA, MT7996_TXQ_MCU_WA); 59 MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7996_TXQ_FWDL); 60 } 61 62 static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs) 63 { 64 #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth)) 65 /* prefetch SRAM wrapping boundary for tx/rx ring. */ 66 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x2)); 67 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x20, 0x2)); 68 mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x40, 0x4)); 69 mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0x80, 0x4)); 70 mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0xc0, 0x2)); 71 mt76_wr(dev, MT_TXQ_EXT_CTRL(2) + ofs, PREFETCH(0xe0, 0x4)); 72 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x120, 0x2)); 73 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x140, 0x2)); 74 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x160, 0x2)); 75 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2_WA) + ofs, PREFETCH(0x180, 0x2)); 76 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x1a0, 0x10)); 77 mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND2) + ofs, PREFETCH(0x2a0, 0x10)); 78 79 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + ofs, WF_WFDMA0_GLO_CFG_EXT1_CALC_MODE); 80 } 81 82 void mt7996_dma_prefetch(struct mt7996_dev *dev) 83 { 84 __mt7996_dma_prefetch(dev, 0); 85 if (dev->hif2) 86 __mt7996_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 87 } 88 89 static void mt7996_dma_disable(struct mt7996_dev *dev, bool reset) 90 { 91 u32 hif1_ofs = 0; 92 93 if (dev->hif2) 94 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 95 96 if (reset) { 97 mt76_clear(dev, MT_WFDMA0_RST, 98 MT_WFDMA0_RST_DMASHDL_ALL_RST | 99 MT_WFDMA0_RST_LOGIC_RST); 100 101 mt76_set(dev, MT_WFDMA0_RST, 102 MT_WFDMA0_RST_DMASHDL_ALL_RST | 103 MT_WFDMA0_RST_LOGIC_RST); 104 105 if (dev->hif2) { 106 mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 107 MT_WFDMA0_RST_DMASHDL_ALL_RST | 108 MT_WFDMA0_RST_LOGIC_RST); 109 110 mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 111 MT_WFDMA0_RST_DMASHDL_ALL_RST | 112 MT_WFDMA0_RST_LOGIC_RST); 113 } 114 } 115 116 /* disable */ 117 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 118 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 119 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 120 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 121 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 122 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 123 124 if (dev->hif2) { 125 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 126 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 127 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 128 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 129 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 130 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 131 } 132 } 133 134 void mt7996_dma_start(struct mt7996_dev *dev, bool reset) 135 { 136 u32 hif1_ofs = 0; 137 u32 irq_mask; 138 139 if (dev->hif2) 140 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 141 142 /* enable WFDMA Tx/Rx */ 143 if (!reset) { 144 mt76_set(dev, MT_WFDMA0_GLO_CFG, 145 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 146 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 147 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 148 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 149 150 if (dev->hif2) 151 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 152 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 153 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 154 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 155 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 156 } 157 158 /* enable interrupts for TX/RX rings */ 159 irq_mask = MT_INT_MCU_CMD; 160 if (reset) 161 goto done; 162 163 irq_mask = MT_INT_RX_DONE_MCU | MT_INT_TX_DONE_MCU; 164 165 if (!dev->mphy.band_idx) 166 irq_mask |= MT_INT_BAND0_RX_DONE; 167 168 if (dev->dbdc_support) 169 irq_mask |= MT_INT_BAND1_RX_DONE; 170 171 if (dev->tbtc_support) 172 irq_mask |= MT_INT_BAND2_RX_DONE; 173 174 done: 175 mt7996_irq_enable(dev, irq_mask); 176 mt7996_irq_disable(dev, 0); 177 } 178 179 static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset) 180 { 181 u32 hif1_ofs = 0; 182 183 if (dev->hif2) 184 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 185 186 /* reset dma idx */ 187 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 188 if (dev->hif2) 189 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 190 191 /* configure delay interrupt off */ 192 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 193 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 194 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 195 196 if (dev->hif2) { 197 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 198 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + hif1_ofs, 0); 199 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + hif1_ofs, 0); 200 } 201 202 /* configure perfetch settings */ 203 mt7996_dma_prefetch(dev); 204 205 /* hif wait WFDMA idle */ 206 mt76_set(dev, MT_WFDMA0_BUSY_ENA, 207 MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 208 MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 209 MT_WFDMA0_BUSY_ENA_RX_FIFO); 210 211 if (dev->hif2) 212 mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 213 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 214 MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 215 MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 216 217 mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 218 MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 219 220 /* GLO_CFG_EXT0 */ 221 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0, 222 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 223 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 224 225 /* GLO_CFG_EXT1 */ 226 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1, 227 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 228 229 if (dev->hif2) { 230 /* GLO_CFG_EXT0 */ 231 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs, 232 WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD | 233 WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE); 234 235 /* GLO_CFG_EXT1 */ 236 mt76_set(dev, WF_WFDMA0_GLO_CFG_EXT1 + hif1_ofs, 237 WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE); 238 239 mt76_set(dev, MT_WFDMA_HOST_CONFIG, 240 MT_WFDMA_HOST_CONFIG_PDMA_BAND); 241 } 242 243 if (dev->hif2) { 244 /* fix hardware limitation, pcie1's rx ring3 is not available 245 * so, redirect pcie0 rx ring3 interrupt to pcie1 246 */ 247 mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL, 248 MT_WFDMA0_RX_INT_SEL_RING3); 249 250 /* TODO: redirect rx ring6 interrupt to pcie0 for wed function */ 251 } 252 253 mt7996_dma_start(dev, reset); 254 } 255 256 int mt7996_dma_init(struct mt7996_dev *dev) 257 { 258 u32 hif1_ofs = 0; 259 int ret; 260 261 mt7996_dma_config(dev); 262 263 mt76_dma_attach(&dev->mt76); 264 265 if (dev->hif2) 266 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 267 268 mt7996_dma_disable(dev, true); 269 270 /* init tx queue */ 271 ret = mt76_connac_init_tx_queues(dev->phy.mt76, 272 MT_TXQ_ID(dev->mphy.band_idx), 273 MT7996_TX_RING_SIZE, 274 MT_TXQ_RING_BASE(0), 0); 275 if (ret) 276 return ret; 277 278 /* command to WM */ 279 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 280 MT_MCUQ_ID(MT_MCUQ_WM), 281 MT7996_TX_MCU_RING_SIZE, 282 MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 283 if (ret) 284 return ret; 285 286 /* command to WA */ 287 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 288 MT_MCUQ_ID(MT_MCUQ_WA), 289 MT7996_TX_MCU_RING_SIZE, 290 MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 291 if (ret) 292 return ret; 293 294 /* firmware download */ 295 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 296 MT_MCUQ_ID(MT_MCUQ_FWDL), 297 MT7996_TX_FWDL_RING_SIZE, 298 MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 299 if (ret) 300 return ret; 301 302 /* event from WM */ 303 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 304 MT_RXQ_ID(MT_RXQ_MCU), 305 MT7996_RX_MCU_RING_SIZE, 306 MT_RX_BUF_SIZE, 307 MT_RXQ_RING_BASE(MT_RXQ_MCU)); 308 if (ret) 309 return ret; 310 311 /* event from WA */ 312 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 313 MT_RXQ_ID(MT_RXQ_MCU_WA), 314 MT7996_RX_MCU_RING_SIZE_WA, 315 MT_RX_BUF_SIZE, 316 MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); 317 if (ret) 318 return ret; 319 320 /* rx data queue for band0 and band1 */ 321 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 322 MT_RXQ_ID(MT_RXQ_MAIN), 323 MT7996_RX_RING_SIZE, 324 MT_RX_BUF_SIZE, 325 MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 326 if (ret) 327 return ret; 328 329 /* tx free notify event from WA for band0 */ 330 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 331 MT_RXQ_ID(MT_RXQ_MAIN_WA), 332 MT7996_RX_MCU_RING_SIZE, 333 MT_RX_BUF_SIZE, 334 MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); 335 if (ret) 336 return ret; 337 338 if (dev->tbtc_support || dev->mphy.band_idx == MT_BAND2) { 339 /* rx data queue for band2 */ 340 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2], 341 MT_RXQ_ID(MT_RXQ_BAND2), 342 MT7996_RX_RING_SIZE, 343 MT_RX_BUF_SIZE, 344 MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs); 345 if (ret) 346 return ret; 347 348 /* tx free notify event from WA for band2 349 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1 350 */ 351 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA], 352 MT_RXQ_ID(MT_RXQ_BAND2_WA), 353 MT7996_RX_MCU_RING_SIZE, 354 MT_RX_BUF_SIZE, 355 MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA)); 356 if (ret) 357 return ret; 358 } 359 360 ret = mt76_init_queues(dev, mt76_dma_rx_poll); 361 if (ret < 0) 362 return ret; 363 364 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 365 mt7996_poll_tx); 366 napi_enable(&dev->mt76.tx_napi); 367 368 mt7996_dma_enable(dev, false); 369 370 return 0; 371 } 372 373 void mt7996_dma_reset(struct mt7996_dev *dev, bool force) 374 { 375 struct mt76_phy *phy2 = dev->mt76.phys[MT_BAND1]; 376 struct mt76_phy *phy3 = dev->mt76.phys[MT_BAND2]; 377 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 378 int i; 379 380 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 381 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 382 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 383 384 if (dev->hif2) 385 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 386 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 387 MT_WFDMA0_GLO_CFG_RX_DMA_EN); 388 389 usleep_range(1000, 2000); 390 391 for (i = 0; i < __MT_TXQ_MAX; i++) { 392 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 393 if (phy2) 394 mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true); 395 if (phy3) 396 mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true); 397 } 398 399 for (i = 0; i < __MT_MCUQ_MAX; i++) 400 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 401 402 mt76_for_each_q_rx(&dev->mt76, i) 403 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 404 405 mt76_tx_status_check(&dev->mt76, true); 406 407 /* reset wfsys */ 408 if (force) 409 mt7996_wfsys_reset(dev); 410 411 mt7996_dma_disable(dev, force); 412 413 /* reset hw queues */ 414 for (i = 0; i < __MT_TXQ_MAX; i++) { 415 mt76_queue_reset(dev, dev->mphy.q_tx[i]); 416 if (phy2) 417 mt76_queue_reset(dev, phy2->q_tx[i]); 418 if (phy3) 419 mt76_queue_reset(dev, phy3->q_tx[i]); 420 } 421 422 for (i = 0; i < __MT_MCUQ_MAX; i++) 423 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 424 425 mt76_for_each_q_rx(&dev->mt76, i) { 426 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 427 } 428 429 mt76_tx_status_check(&dev->mt76, true); 430 431 mt76_for_each_q_rx(&dev->mt76, i) 432 mt76_queue_rx_reset(dev, i); 433 434 mt7996_dma_enable(dev, !force); 435 } 436 437 void mt7996_dma_cleanup(struct mt7996_dev *dev) 438 { 439 mt7996_dma_disable(dev, true); 440 441 mt76_dma_cleanup(&dev->mt76); 442 } 443