1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2023 MediaTek Inc. */ 3 4 #include <linux/module.h> 5 #include <linux/firmware.h> 6 7 #include "mt792x.h" 8 #include "dma.h" 9 #include "trace.h" 10 11 irqreturn_t mt792x_irq_handler(int irq, void *dev_instance) 12 { 13 struct mt792x_dev *dev = dev_instance; 14 15 if (test_bit(MT76_REMOVED, &dev->mt76.phy.state)) 16 return IRQ_NONE; 17 mt76_wr(dev, dev->irq_map->host_irq_enable, 0); 18 19 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) 20 return IRQ_NONE; 21 22 tasklet_schedule(&dev->mt76.irq_tasklet); 23 24 return IRQ_HANDLED; 25 } 26 EXPORT_SYMBOL_GPL(mt792x_irq_handler); 27 28 void mt792x_irq_tasklet(unsigned long data) 29 { 30 struct mt792x_dev *dev = (struct mt792x_dev *)data; 31 const struct mt792x_irq_map *irq_map = dev->irq_map; 32 u32 intr, mask = 0; 33 34 mt76_wr(dev, irq_map->host_irq_enable, 0); 35 36 intr = mt76_rr(dev, MT_WFDMA0_HOST_INT_STA); 37 intr &= dev->mt76.mmio.irqmask; 38 mt76_wr(dev, MT_WFDMA0_HOST_INT_STA, intr); 39 40 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); 41 42 mask |= intr & (irq_map->rx.data_complete_mask | 43 irq_map->rx.wm_complete_mask | 44 irq_map->rx.wm2_complete_mask); 45 if (intr & dev->irq_map->tx.mcu_complete_mask) 46 mask |= dev->irq_map->tx.mcu_complete_mask; 47 48 if (intr & MT_INT_MCU_CMD) { 49 u32 intr_sw; 50 51 intr_sw = mt76_rr(dev, MT_MCU_CMD); 52 /* ack MCU2HOST_SW_INT_STA */ 53 mt76_wr(dev, MT_MCU_CMD, intr_sw); 54 if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) { 55 mask |= irq_map->rx.data_complete_mask; 56 intr |= irq_map->rx.data_complete_mask; 57 } 58 } 59 60 mt76_set_irq_mask(&dev->mt76, irq_map->host_irq_enable, mask, 0); 61 62 if (intr & dev->irq_map->tx.all_complete_mask) 63 napi_schedule(&dev->mt76.tx_napi); 64 65 if (intr & irq_map->rx.wm_complete_mask) 66 napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]); 67 68 if (intr & irq_map->rx.wm2_complete_mask) 69 napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]); 70 71 if (intr & irq_map->rx.data_complete_mask) 72 napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]); 73 } 74 EXPORT_SYMBOL_GPL(mt792x_irq_tasklet); 75 76 void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) 77 { 78 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); 79 const struct mt792x_irq_map *irq_map = dev->irq_map; 80 81 if (q == MT_RXQ_MAIN) 82 mt76_connac_irq_enable(mdev, irq_map->rx.data_complete_mask); 83 else if (q == MT_RXQ_MCU_WA) 84 mt76_connac_irq_enable(mdev, irq_map->rx.wm2_complete_mask); 85 else 86 mt76_connac_irq_enable(mdev, irq_map->rx.wm_complete_mask); 87 } 88 EXPORT_SYMBOL_GPL(mt792x_rx_poll_complete); 89 90 #define PREFETCH(base, depth) ((base) << 16 | (depth)) 91 static void mt792x_dma_prefetch(struct mt792x_dev *dev) 92 { 93 if (is_mt7925(&dev->mt76)) { 94 /* rx ring */ 95 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0000, 0x4)); 96 mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL, PREFETCH(0x0040, 0x4)); 97 mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x0080, 0x4)); 98 mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x00c0, 0x4)); 99 /* tx ring */ 100 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x0100, 0x10)); 101 mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x0200, 0x10)); 102 mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x0300, 0x10)); 103 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x0400, 0x10)); 104 mt76_wr(dev, MT_WFDMA0_TX_RING15_EXT_CTRL, PREFETCH(0x0500, 0x4)); 105 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x0540, 0x4)); 106 } else { 107 /* rx ring */ 108 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4)); 109 mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4)); 110 mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4)); 111 mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4)); 112 mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4)); 113 /* tx ring */ 114 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4)); 115 mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4)); 116 mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4)); 117 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4)); 118 mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4)); 119 mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4)); 120 mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4)); 121 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4)); 122 mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4)); 123 } 124 } 125 126 int mt792x_dma_enable(struct mt792x_dev *dev) 127 { 128 /* configure perfetch settings */ 129 mt792x_dma_prefetch(dev); 130 131 /* reset dma idx */ 132 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 133 if (is_mt7925(&dev->mt76)) 134 mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0); 135 136 /* configure delay interrupt */ 137 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 138 139 mt76_set(dev, MT_WFDMA0_GLO_CFG, 140 MT_WFDMA0_GLO_CFG_TX_WB_DDONE | 141 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | 142 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | 143 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 144 FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) | 145 MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK | 146 MT_WFDMA0_GLO_CFG_RX_WB_DDONE | 147 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 148 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 149 150 mt76_set(dev, MT_WFDMA0_GLO_CFG, 151 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 152 153 if (is_mt7925(&dev->mt76)) { 154 mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28)); 155 mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00); 156 mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00); 157 } 158 mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); 159 160 /* enable interrupts for TX/RX rings */ 161 mt76_connac_irq_enable(&dev->mt76, 162 dev->irq_map->tx.all_complete_mask | 163 dev->irq_map->rx.data_complete_mask | 164 dev->irq_map->rx.wm2_complete_mask | 165 dev->irq_map->rx.wm_complete_mask | 166 MT_INT_MCU_CMD); 167 mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); 168 169 return 0; 170 } 171 EXPORT_SYMBOL_GPL(mt792x_dma_enable); 172 173 static int 174 mt792x_dma_reset(struct mt792x_dev *dev, bool force) 175 { 176 int i, err; 177 178 err = mt792x_dma_disable(dev, force); 179 if (err) 180 return err; 181 182 /* reset hw queues */ 183 for (i = 0; i < __MT_TXQ_MAX; i++) 184 mt76_queue_reset(dev, dev->mphy.q_tx[i]); 185 186 for (i = 0; i < __MT_MCUQ_MAX; i++) 187 mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 188 189 mt76_for_each_q_rx(&dev->mt76, i) 190 mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 191 192 mt76_tx_status_check(&dev->mt76, true); 193 194 return mt792x_dma_enable(dev); 195 } 196 197 int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force) 198 { 199 int i, err; 200 201 /* clean up hw queues */ 202 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) 203 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 204 205 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) 206 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 207 208 mt76_for_each_q_rx(&dev->mt76, i) 209 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 210 211 if (force) { 212 err = mt792x_wfsys_reset(dev); 213 if (err) 214 return err; 215 } 216 err = mt792x_dma_reset(dev, force); 217 if (err) 218 return err; 219 220 mt76_for_each_q_rx(&dev->mt76, i) 221 mt76_queue_rx_reset(dev, i); 222 223 return 0; 224 } 225 EXPORT_SYMBOL_GPL(mt792x_wpdma_reset); 226 227 int mt792x_wpdma_reinit_cond(struct mt792x_dev *dev) 228 { 229 struct mt76_connac_pm *pm = &dev->pm; 230 int err; 231 232 /* check if the wpdma must be reinitialized */ 233 if (mt792x_dma_need_reinit(dev)) { 234 /* disable interrutpts */ 235 mt76_wr(dev, dev->irq_map->host_irq_enable, 0); 236 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 237 238 err = mt792x_wpdma_reset(dev, false); 239 if (err) { 240 dev_err(dev->mt76.dev, "wpdma reset failed\n"); 241 return err; 242 } 243 244 /* enable interrutpts */ 245 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 246 pm->stats.lp_wake++; 247 } 248 249 return 0; 250 } 251 EXPORT_SYMBOL_GPL(mt792x_wpdma_reinit_cond); 252 253 int mt792x_dma_disable(struct mt792x_dev *dev, bool force) 254 { 255 /* disable WFDMA0 */ 256 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 257 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | 258 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 259 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 260 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 261 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 262 263 if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG, 264 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | 265 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1)) 266 return -ETIMEDOUT; 267 268 /* disable dmashdl */ 269 mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, 270 MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); 271 mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); 272 273 if (force) { 274 /* reset */ 275 mt76_clear(dev, MT_WFDMA0_RST, 276 MT_WFDMA0_RST_DMASHDL_ALL_RST | 277 MT_WFDMA0_RST_LOGIC_RST); 278 279 mt76_set(dev, MT_WFDMA0_RST, 280 MT_WFDMA0_RST_DMASHDL_ALL_RST | 281 MT_WFDMA0_RST_LOGIC_RST); 282 } 283 284 return 0; 285 } 286 EXPORT_SYMBOL_GPL(mt792x_dma_disable); 287 288 void mt792x_dma_cleanup(struct mt792x_dev *dev) 289 { 290 /* disable */ 291 mt76_clear(dev, MT_WFDMA0_GLO_CFG, 292 MT_WFDMA0_GLO_CFG_TX_DMA_EN | 293 MT_WFDMA0_GLO_CFG_RX_DMA_EN | 294 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 295 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 296 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 297 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 298 299 mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG, 300 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | 301 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1); 302 303 /* reset */ 304 mt76_clear(dev, MT_WFDMA0_RST, 305 MT_WFDMA0_RST_DMASHDL_ALL_RST | 306 MT_WFDMA0_RST_LOGIC_RST); 307 308 mt76_set(dev, MT_WFDMA0_RST, 309 MT_WFDMA0_RST_DMASHDL_ALL_RST | 310 MT_WFDMA0_RST_LOGIC_RST); 311 312 mt76_dma_cleanup(&dev->mt76); 313 } 314 EXPORT_SYMBOL_GPL(mt792x_dma_cleanup); 315 316 int mt792x_poll_tx(struct napi_struct *napi, int budget) 317 { 318 struct mt792x_dev *dev; 319 320 dev = container_of(napi, struct mt792x_dev, mt76.tx_napi); 321 322 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 323 napi_complete(napi); 324 queue_work(dev->mt76.wq, &dev->pm.wake_work); 325 return 0; 326 } 327 328 mt76_connac_tx_cleanup(&dev->mt76); 329 if (napi_complete(napi)) 330 mt76_connac_irq_enable(&dev->mt76, 331 dev->irq_map->tx.all_complete_mask); 332 mt76_connac_pm_unref(&dev->mphy, &dev->pm); 333 334 return 0; 335 } 336 EXPORT_SYMBOL_GPL(mt792x_poll_tx); 337 338 int mt792x_poll_rx(struct napi_struct *napi, int budget) 339 { 340 struct mt792x_dev *dev; 341 int done; 342 343 dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev); 344 345 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 346 napi_complete(napi); 347 queue_work(dev->mt76.wq, &dev->pm.wake_work); 348 return 0; 349 } 350 done = mt76_dma_rx_poll(napi, budget); 351 mt76_connac_pm_unref(&dev->mphy, &dev->pm); 352 353 return done; 354 } 355 EXPORT_SYMBOL_GPL(mt792x_poll_rx); 356 357 int mt792x_wfsys_reset(struct mt792x_dev *dev) 358 { 359 u32 addr = is_mt7921(&dev->mt76) ? 0x18000140 : 0x7c000140; 360 361 mt76_clear(dev, addr, WFSYS_SW_RST_B); 362 msleep(50); 363 mt76_set(dev, addr, WFSYS_SW_RST_B); 364 365 if (!__mt76_poll_msec(&dev->mt76, addr, WFSYS_SW_INIT_DONE, 366 WFSYS_SW_INIT_DONE, 500)) 367 return -ETIMEDOUT; 368 369 return 0; 370 } 371 EXPORT_SYMBOL_GPL(mt792x_wfsys_reset); 372 373