16c92544dSBjoern A. Zeeb // SPDX-License-Identifier: ISC 26c92544dSBjoern A. Zeeb /* Copyright (C) 2020 MediaTek Inc. */ 36c92544dSBjoern A. Zeeb 46c92544dSBjoern A. Zeeb #include "mt7915.h" 56c92544dSBjoern A. Zeeb #include "../dma.h" 66c92544dSBjoern A. Zeeb #include "mac.h" 76c92544dSBjoern A. Zeeb 86c92544dSBjoern A. Zeeb static int 96c92544dSBjoern A. Zeeb mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base) 106c92544dSBjoern A. Zeeb { 116c92544dSBjoern A. Zeeb struct mt7915_dev *dev = phy->dev; 126c92544dSBjoern A. Zeeb 136c92544dSBjoern A. Zeeb if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 14*cbb3ec25SBjoern A. Zeeb if (is_mt798x(&dev->mt76)) 15*cbb3ec25SBjoern A. Zeeb ring_base += MT_TXQ_ID(0) * MT_RING_SIZE; 16*cbb3ec25SBjoern A. Zeeb else 176c92544dSBjoern A. Zeeb ring_base = MT_WED_TX_RING_BASE; 18*cbb3ec25SBjoern A. Zeeb 196c92544dSBjoern A. Zeeb idx -= MT_TXQ_ID(0); 206c92544dSBjoern A. Zeeb } 216c92544dSBjoern A. Zeeb 226c92544dSBjoern A. Zeeb return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base, 236c92544dSBjoern A. Zeeb MT_WED_Q_TX(idx)); 246c92544dSBjoern A. Zeeb } 256c92544dSBjoern A. Zeeb 266c92544dSBjoern A. Zeeb static int mt7915_poll_tx(struct napi_struct *napi, int budget) 276c92544dSBjoern A. Zeeb { 286c92544dSBjoern A. Zeeb struct mt7915_dev *dev; 296c92544dSBjoern A. Zeeb 306c92544dSBjoern A. Zeeb dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); 316c92544dSBjoern A. Zeeb 326c92544dSBjoern A. Zeeb mt76_connac_tx_cleanup(&dev->mt76); 336c92544dSBjoern A. Zeeb if (napi_complete_done(napi, 0)) 346c92544dSBjoern A. Zeeb mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU); 356c92544dSBjoern A. Zeeb 366c92544dSBjoern A. Zeeb return 0; 376c92544dSBjoern A. Zeeb } 386c92544dSBjoern A. Zeeb 396c92544dSBjoern A. Zeeb static void mt7915_dma_config(struct mt7915_dev *dev) 406c92544dSBjoern A. Zeeb { 416c92544dSBjoern A. Zeeb #define Q_CONFIG(q, wfdma, int, id) do { \ 426c92544dSBjoern A. Zeeb if (wfdma) \ 436c92544dSBjoern A. Zeeb dev->wfdma_mask |= (1 << (q)); \ 446c92544dSBjoern A. Zeeb dev->q_int_mask[(q)] = int; \ 456c92544dSBjoern A. Zeeb dev->q_id[(q)] = id; \ 466c92544dSBjoern A. Zeeb } while (0) 476c92544dSBjoern A. Zeeb 486c92544dSBjoern A. Zeeb #define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id)) 496c92544dSBjoern A. Zeeb #define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id)) 506c92544dSBjoern A. Zeeb #define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id)) 516c92544dSBjoern A. Zeeb 526c92544dSBjoern A. Zeeb if (is_mt7915(&dev->mt76)) { 53*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, 54*cbb3ec25SBjoern A. Zeeb MT7915_RXQ_BAND0); 55*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, 56*cbb3ec25SBjoern A. Zeeb MT7915_RXQ_MCU_WM); 57*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, 58*cbb3ec25SBjoern A. Zeeb MT7915_RXQ_MCU_WA); 59*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1, 60*cbb3ec25SBjoern A. Zeeb MT7915_RXQ_BAND1); 61*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, 62*cbb3ec25SBjoern A. Zeeb MT7915_RXQ_MCU_WA_EXT); 63*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, 64*cbb3ec25SBjoern A. Zeeb MT7915_RXQ_MCU_WA); 656c92544dSBjoern A. Zeeb TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0); 666c92544dSBjoern A. Zeeb TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1); 67*cbb3ec25SBjoern A. Zeeb MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, 68*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_MCU_WM); 69*cbb3ec25SBjoern A. Zeeb MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, 70*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_MCU_WA); 71*cbb3ec25SBjoern A. Zeeb MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, 72*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_FWDL); 736c92544dSBjoern A. Zeeb } else { 74*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, 75*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_MCU_WM); 76*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, 77*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_MCU_WA_EXT); 78*cbb3ec25SBjoern A. Zeeb MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, 79*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_MCU_WM); 80*cbb3ec25SBjoern A. Zeeb MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, 81*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_MCU_WA); 82*cbb3ec25SBjoern A. Zeeb MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, 83*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_FWDL); 84*cbb3ec25SBjoern A. Zeeb 85*cbb3ec25SBjoern A. Zeeb if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) { 86*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916, 87*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_BAND0); 88*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916, 89*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_MCU_WA); 90*cbb3ec25SBjoern A. Zeeb if (dev->hif2) 91*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, 92*cbb3ec25SBjoern A. Zeeb MT_INT_RX_DONE_BAND1_MT7916, 93*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_BAND1); 94*cbb3ec25SBjoern A. Zeeb else 95*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, 96*cbb3ec25SBjoern A. Zeeb MT_INT_WED_RX_DONE_BAND1_MT7916, 97*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_BAND1); 98*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916, 99*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_MCU_WA_MAIN); 100*cbb3ec25SBjoern A. Zeeb TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0, 101*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_BAND0); 102*cbb3ec25SBjoern A. Zeeb TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1, 103*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_BAND1); 104*cbb3ec25SBjoern A. Zeeb } else { 105*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, 106*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_BAND0); 107*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, 108*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_MCU_WA); 109*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, 110*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_BAND1); 111*cbb3ec25SBjoern A. Zeeb RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, 112*cbb3ec25SBjoern A. Zeeb MT7916_RXQ_MCU_WA_MAIN); 113*cbb3ec25SBjoern A. Zeeb TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, 114*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_BAND0); 115*cbb3ec25SBjoern A. Zeeb TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, 116*cbb3ec25SBjoern A. Zeeb MT7915_TXQ_BAND1); 117*cbb3ec25SBjoern A. Zeeb } 1186c92544dSBjoern A. Zeeb } 1196c92544dSBjoern A. Zeeb } 1206c92544dSBjoern A. Zeeb 1216c92544dSBjoern A. Zeeb static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs) 1226c92544dSBjoern A. Zeeb { 1236c92544dSBjoern A. Zeeb #define PREFETCH(_base, _depth) ((_base) << 16 | (_depth)) 1246c92544dSBjoern A. Zeeb u32 base = 0; 1256c92544dSBjoern A. Zeeb 1266c92544dSBjoern A. Zeeb /* prefetch SRAM wrapping boundary for tx/rx ring. */ 1276c92544dSBjoern A. Zeeb mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4)); 1286c92544dSBjoern A. Zeeb mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4)); 1296c92544dSBjoern A. Zeeb mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4)); 1306c92544dSBjoern A. Zeeb mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4)); 1316c92544dSBjoern A. Zeeb mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4)); 1326c92544dSBjoern A. Zeeb 1336c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs, 1346c92544dSBjoern A. Zeeb PREFETCH(0x140, 0x4)); 1356c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs, 1366c92544dSBjoern A. Zeeb PREFETCH(0x180, 0x4)); 1376c92544dSBjoern A. Zeeb if (!is_mt7915(&dev->mt76)) { 1386c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs, 1396c92544dSBjoern A. Zeeb PREFETCH(0x1c0, 0x4)); 1406c92544dSBjoern A. Zeeb base = 0x40; 1416c92544dSBjoern A. Zeeb } 1426c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, 1436c92544dSBjoern A. Zeeb PREFETCH(0x1c0 + base, 0x4)); 1446c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs, 1456c92544dSBjoern A. Zeeb PREFETCH(0x200 + base, 0x4)); 1466c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, 1476c92544dSBjoern A. Zeeb PREFETCH(0x240 + base, 0x4)); 1486c92544dSBjoern A. Zeeb 1496c92544dSBjoern A. Zeeb /* for mt7915, the ring which is next the last 1506c92544dSBjoern A. Zeeb * used ring must be initialized. 1516c92544dSBjoern A. Zeeb */ 1526c92544dSBjoern A. Zeeb if (is_mt7915(&dev->mt76)) { 1536c92544dSBjoern A. Zeeb ofs += 0x4; 1546c92544dSBjoern A. Zeeb mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, 1556c92544dSBjoern A. Zeeb PREFETCH(0x140, 0x0)); 1566c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs, 1576c92544dSBjoern A. Zeeb PREFETCH(0x200 + base, 0x0)); 1586c92544dSBjoern A. Zeeb mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs, 1596c92544dSBjoern A. Zeeb PREFETCH(0x280 + base, 0x0)); 1606c92544dSBjoern A. Zeeb } 1616c92544dSBjoern A. Zeeb } 1626c92544dSBjoern A. Zeeb 1636c92544dSBjoern A. Zeeb void mt7915_dma_prefetch(struct mt7915_dev *dev) 1646c92544dSBjoern A. Zeeb { 1656c92544dSBjoern A. Zeeb __mt7915_dma_prefetch(dev, 0); 1666c92544dSBjoern A. Zeeb if (dev->hif2) 1676c92544dSBjoern A. Zeeb __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0)); 1686c92544dSBjoern A. Zeeb } 1696c92544dSBjoern A. Zeeb 1706c92544dSBjoern A. Zeeb static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst) 1716c92544dSBjoern A. Zeeb { 1726c92544dSBjoern A. Zeeb struct mt76_dev *mdev = &dev->mt76; 1736c92544dSBjoern A. Zeeb u32 hif1_ofs = 0; 1746c92544dSBjoern A. Zeeb 1756c92544dSBjoern A. Zeeb if (dev->hif2) 1766c92544dSBjoern A. Zeeb hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 1776c92544dSBjoern A. Zeeb 1786c92544dSBjoern A. Zeeb /* reset */ 1796c92544dSBjoern A. Zeeb if (rst) { 1806c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA0_RST, 1816c92544dSBjoern A. Zeeb MT_WFDMA0_RST_DMASHDL_ALL_RST | 1826c92544dSBjoern A. Zeeb MT_WFDMA0_RST_LOGIC_RST); 1836c92544dSBjoern A. Zeeb 1846c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA0_RST, 1856c92544dSBjoern A. Zeeb MT_WFDMA0_RST_DMASHDL_ALL_RST | 1866c92544dSBjoern A. Zeeb MT_WFDMA0_RST_LOGIC_RST); 1876c92544dSBjoern A. Zeeb 1886c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) { 1896c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA1_RST, 1906c92544dSBjoern A. Zeeb MT_WFDMA1_RST_DMASHDL_ALL_RST | 1916c92544dSBjoern A. Zeeb MT_WFDMA1_RST_LOGIC_RST); 1926c92544dSBjoern A. Zeeb 1936c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA1_RST, 1946c92544dSBjoern A. Zeeb MT_WFDMA1_RST_DMASHDL_ALL_RST | 1956c92544dSBjoern A. Zeeb MT_WFDMA1_RST_LOGIC_RST); 1966c92544dSBjoern A. Zeeb } 1976c92544dSBjoern A. Zeeb 1986c92544dSBjoern A. Zeeb if (dev->hif2) { 1996c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs, 2006c92544dSBjoern A. Zeeb MT_WFDMA0_RST_DMASHDL_ALL_RST | 2016c92544dSBjoern A. Zeeb MT_WFDMA0_RST_LOGIC_RST); 2026c92544dSBjoern A. Zeeb 2036c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA0_RST + hif1_ofs, 2046c92544dSBjoern A. Zeeb MT_WFDMA0_RST_DMASHDL_ALL_RST | 2056c92544dSBjoern A. Zeeb MT_WFDMA0_RST_LOGIC_RST); 2066c92544dSBjoern A. Zeeb 2076c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) { 2086c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs, 2096c92544dSBjoern A. Zeeb MT_WFDMA1_RST_DMASHDL_ALL_RST | 2106c92544dSBjoern A. Zeeb MT_WFDMA1_RST_LOGIC_RST); 2116c92544dSBjoern A. Zeeb 2126c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA1_RST + hif1_ofs, 2136c92544dSBjoern A. Zeeb MT_WFDMA1_RST_DMASHDL_ALL_RST | 2146c92544dSBjoern A. Zeeb MT_WFDMA1_RST_LOGIC_RST); 2156c92544dSBjoern A. Zeeb } 2166c92544dSBjoern A. Zeeb } 2176c92544dSBjoern A. Zeeb } 2186c92544dSBjoern A. Zeeb 2196c92544dSBjoern A. Zeeb /* disable */ 2206c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA0_GLO_CFG, 2216c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_TX_DMA_EN | 2226c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_RX_DMA_EN | 2236c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 2246c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 2256c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 2266c92544dSBjoern A. Zeeb 2276c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) 2286c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA1_GLO_CFG, 2296c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_TX_DMA_EN | 2306c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_RX_DMA_EN | 2316c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 2326c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | 2336c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); 2346c92544dSBjoern A. Zeeb 2356c92544dSBjoern A. Zeeb if (dev->hif2) { 2366c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 2376c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_TX_DMA_EN | 2386c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_RX_DMA_EN | 2396c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 2406c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 2416c92544dSBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 2426c92544dSBjoern A. Zeeb 2436c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) 2446c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 2456c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_TX_DMA_EN | 2466c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_RX_DMA_EN | 2476c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 2486c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_RX_INFO | 2496c92544dSBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2); 2506c92544dSBjoern A. Zeeb } 2516c92544dSBjoern A. Zeeb } 2526c92544dSBjoern A. Zeeb 253*cbb3ec25SBjoern A. Zeeb int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset) 2546c92544dSBjoern A. Zeeb { 2556c92544dSBjoern A. Zeeb struct mt76_dev *mdev = &dev->mt76; 2566c92544dSBjoern A. Zeeb u32 hif1_ofs = 0; 2576c92544dSBjoern A. Zeeb u32 irq_mask; 2586c92544dSBjoern A. Zeeb 2596c92544dSBjoern A. Zeeb if (dev->hif2) 2606c92544dSBjoern A. Zeeb hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 2616c92544dSBjoern A. Zeeb 262*cbb3ec25SBjoern A. Zeeb /* enable wpdma tx/rx */ 263*cbb3ec25SBjoern A. Zeeb if (!reset) { 264*cbb3ec25SBjoern A. Zeeb mt76_set(dev, MT_WFDMA0_GLO_CFG, 265*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_TX_DMA_EN | 266*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_RX_DMA_EN | 267*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 268*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 269*cbb3ec25SBjoern A. Zeeb 270*cbb3ec25SBjoern A. Zeeb if (is_mt7915(mdev)) 271*cbb3ec25SBjoern A. Zeeb mt76_set(dev, MT_WFDMA1_GLO_CFG, 272*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_TX_DMA_EN | 273*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_RX_DMA_EN | 274*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 275*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 276*cbb3ec25SBjoern A. Zeeb 277*cbb3ec25SBjoern A. Zeeb if (dev->hif2) { 278*cbb3ec25SBjoern A. Zeeb mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs, 279*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_TX_DMA_EN | 280*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_RX_DMA_EN | 281*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 282*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 283*cbb3ec25SBjoern A. Zeeb 284*cbb3ec25SBjoern A. Zeeb if (is_mt7915(mdev)) 285*cbb3ec25SBjoern A. Zeeb mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs, 286*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_TX_DMA_EN | 287*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_RX_DMA_EN | 288*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_TX_INFO | 289*cbb3ec25SBjoern A. Zeeb MT_WFDMA1_GLO_CFG_OMIT_RX_INFO); 290*cbb3ec25SBjoern A. Zeeb 291*cbb3ec25SBjoern A. Zeeb mt76_set(dev, MT_WFDMA_HOST_CONFIG, 292*cbb3ec25SBjoern A. Zeeb MT_WFDMA_HOST_CONFIG_PDMA_BAND); 293*cbb3ec25SBjoern A. Zeeb } 294*cbb3ec25SBjoern A. Zeeb } 295*cbb3ec25SBjoern A. Zeeb 296*cbb3ec25SBjoern A. Zeeb /* enable interrupts for TX/RX rings */ 297*cbb3ec25SBjoern A. Zeeb irq_mask = MT_INT_RX_DONE_MCU | 298*cbb3ec25SBjoern A. Zeeb MT_INT_TX_DONE_MCU | 299*cbb3ec25SBjoern A. Zeeb MT_INT_MCU_CMD; 300*cbb3ec25SBjoern A. Zeeb 301*cbb3ec25SBjoern A. Zeeb if (!dev->phy.mt76->band_idx) 302*cbb3ec25SBjoern A. Zeeb irq_mask |= MT_INT_BAND0_RX_DONE; 303*cbb3ec25SBjoern A. Zeeb 304*cbb3ec25SBjoern A. Zeeb if (dev->dbdc_support || dev->phy.mt76->band_idx) 305*cbb3ec25SBjoern A. Zeeb irq_mask |= MT_INT_BAND1_RX_DONE; 306*cbb3ec25SBjoern A. Zeeb 307*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) { 308*cbb3ec25SBjoern A. Zeeb u32 wed_irq_mask = irq_mask; 309*cbb3ec25SBjoern A. Zeeb int ret; 310*cbb3ec25SBjoern A. Zeeb 311*cbb3ec25SBjoern A. Zeeb wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1; 312*cbb3ec25SBjoern A. Zeeb if (!is_mt798x(&dev->mt76)) 313*cbb3ec25SBjoern A. Zeeb mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask); 314*cbb3ec25SBjoern A. Zeeb else 315*cbb3ec25SBjoern A. Zeeb mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 316*cbb3ec25SBjoern A. Zeeb 317*cbb3ec25SBjoern A. Zeeb ret = mt7915_mcu_wed_enable_rx_stats(dev); 318*cbb3ec25SBjoern A. Zeeb if (ret) 319*cbb3ec25SBjoern A. Zeeb return ret; 320*cbb3ec25SBjoern A. Zeeb 321*cbb3ec25SBjoern A. Zeeb mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask); 322*cbb3ec25SBjoern A. Zeeb } 323*cbb3ec25SBjoern A. Zeeb 324*cbb3ec25SBjoern A. Zeeb irq_mask = reset ? MT_INT_MCU_CMD : irq_mask; 325*cbb3ec25SBjoern A. Zeeb 326*cbb3ec25SBjoern A. Zeeb mt7915_irq_enable(dev, irq_mask); 327*cbb3ec25SBjoern A. Zeeb mt7915_irq_disable(dev, 0); 328*cbb3ec25SBjoern A. Zeeb 329*cbb3ec25SBjoern A. Zeeb return 0; 330*cbb3ec25SBjoern A. Zeeb } 331*cbb3ec25SBjoern A. Zeeb 332*cbb3ec25SBjoern A. Zeeb static int mt7915_dma_enable(struct mt7915_dev *dev, bool reset) 333*cbb3ec25SBjoern A. Zeeb { 334*cbb3ec25SBjoern A. Zeeb struct mt76_dev *mdev = &dev->mt76; 335*cbb3ec25SBjoern A. Zeeb u32 hif1_ofs = 0; 336*cbb3ec25SBjoern A. Zeeb 337*cbb3ec25SBjoern A. Zeeb if (dev->hif2) 338*cbb3ec25SBjoern A. Zeeb hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 339*cbb3ec25SBjoern A. Zeeb 3406c92544dSBjoern A. Zeeb /* reset dma idx */ 3416c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 3426c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) 3436c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0); 3446c92544dSBjoern A. Zeeb if (dev->hif2) { 3456c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0); 3466c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) 3476c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0); 3486c92544dSBjoern A. Zeeb } 3496c92544dSBjoern A. Zeeb 3506c92544dSBjoern A. Zeeb /* configure delay interrupt off */ 3516c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 3526c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) { 3536c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); 3546c92544dSBjoern A. Zeeb } else { 3556c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0); 3566c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0); 3576c92544dSBjoern A. Zeeb } 3586c92544dSBjoern A. Zeeb 3596c92544dSBjoern A. Zeeb if (dev->hif2) { 3606c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0); 3616c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) { 3626c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + 3636c92544dSBjoern A. Zeeb hif1_ofs, 0); 3646c92544dSBjoern A. Zeeb } else { 3656c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 + 3666c92544dSBjoern A. Zeeb hif1_ofs, 0); 3676c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 + 3686c92544dSBjoern A. Zeeb hif1_ofs, 0); 3696c92544dSBjoern A. Zeeb } 3706c92544dSBjoern A. Zeeb } 3716c92544dSBjoern A. Zeeb 3726c92544dSBjoern A. Zeeb /* configure perfetch settings */ 3736c92544dSBjoern A. Zeeb mt7915_dma_prefetch(dev); 3746c92544dSBjoern A. Zeeb 3756c92544dSBjoern A. Zeeb /* hif wait WFDMA idle */ 3766c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA0_BUSY_ENA, 3776c92544dSBjoern A. Zeeb MT_WFDMA0_BUSY_ENA_TX_FIFO0 | 3786c92544dSBjoern A. Zeeb MT_WFDMA0_BUSY_ENA_TX_FIFO1 | 3796c92544dSBjoern A. Zeeb MT_WFDMA0_BUSY_ENA_RX_FIFO); 3806c92544dSBjoern A. Zeeb 3816c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) 3826c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA1_BUSY_ENA, 3836c92544dSBjoern A. Zeeb MT_WFDMA1_BUSY_ENA_TX_FIFO0 | 3846c92544dSBjoern A. Zeeb MT_WFDMA1_BUSY_ENA_TX_FIFO1 | 3856c92544dSBjoern A. Zeeb MT_WFDMA1_BUSY_ENA_RX_FIFO); 3866c92544dSBjoern A. Zeeb 3876c92544dSBjoern A. Zeeb if (dev->hif2) { 3886c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs, 3896c92544dSBjoern A. Zeeb MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 | 3906c92544dSBjoern A. Zeeb MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 | 3916c92544dSBjoern A. Zeeb MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO); 3926c92544dSBjoern A. Zeeb 3936c92544dSBjoern A. Zeeb if (is_mt7915(mdev)) 3946c92544dSBjoern A. Zeeb mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs, 3956c92544dSBjoern A. Zeeb MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 | 3966c92544dSBjoern A. Zeeb MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 | 3976c92544dSBjoern A. Zeeb MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO); 3986c92544dSBjoern A. Zeeb } 3996c92544dSBjoern A. Zeeb 4006c92544dSBjoern A. Zeeb mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC, 4016c92544dSBjoern A. Zeeb MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000); 4026c92544dSBjoern A. Zeeb 403*cbb3ec25SBjoern A. Zeeb return mt7915_dma_start(dev, reset, true); 4046c92544dSBjoern A. Zeeb } 4056c92544dSBjoern A. Zeeb 4066c92544dSBjoern A. Zeeb int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) 4076c92544dSBjoern A. Zeeb { 4086c92544dSBjoern A. Zeeb struct mt76_dev *mdev = &dev->mt76; 4096c92544dSBjoern A. Zeeb u32 wa_rx_base, wa_rx_idx; 4106c92544dSBjoern A. Zeeb u32 hif1_ofs = 0; 4116c92544dSBjoern A. Zeeb int ret; 4126c92544dSBjoern A. Zeeb 4136c92544dSBjoern A. Zeeb mt7915_dma_config(dev); 4146c92544dSBjoern A. Zeeb 4156c92544dSBjoern A. Zeeb mt76_dma_attach(&dev->mt76); 4166c92544dSBjoern A. Zeeb 4176c92544dSBjoern A. Zeeb if (dev->hif2) 4186c92544dSBjoern A. Zeeb hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0); 4196c92544dSBjoern A. Zeeb 4206c92544dSBjoern A. Zeeb mt7915_dma_disable(dev, true); 4216c92544dSBjoern A. Zeeb 422*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&mdev->mmio.wed)) { 423*cbb3ec25SBjoern A. Zeeb if (!is_mt798x(mdev)) { 424*cbb3ec25SBjoern A. Zeeb u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2; 4256c92544dSBjoern A. Zeeb 426*cbb3ec25SBjoern A. Zeeb mt76_set(dev, MT_WFDMA_HOST_CONFIG, 427*cbb3ec25SBjoern A. Zeeb MT_WFDMA_HOST_CONFIG_WED); 4286c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL, 4296c92544dSBjoern A. Zeeb FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) | 4306c92544dSBjoern A. Zeeb FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) | 431*cbb3ec25SBjoern A. Zeeb FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1, 432*cbb3ec25SBjoern A. Zeeb wed_control_rx1)); 433*cbb3ec25SBjoern A. Zeeb if (is_mt7915(mdev)) 434*cbb3ec25SBjoern A. Zeeb mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, 435*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_EXT0_RXWB_KEEP); 436*cbb3ec25SBjoern A. Zeeb } 4376c92544dSBjoern A. Zeeb } else { 4386c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED); 4396c92544dSBjoern A. Zeeb } 4406c92544dSBjoern A. Zeeb 4416c92544dSBjoern A. Zeeb /* init tx queue */ 4426c92544dSBjoern A. Zeeb ret = mt7915_init_tx_queues(&dev->phy, 443*cbb3ec25SBjoern A. Zeeb MT_TXQ_ID(dev->phy.mt76->band_idx), 4446c92544dSBjoern A. Zeeb MT7915_TX_RING_SIZE, 4456c92544dSBjoern A. Zeeb MT_TXQ_RING_BASE(0)); 4466c92544dSBjoern A. Zeeb if (ret) 4476c92544dSBjoern A. Zeeb return ret; 4486c92544dSBjoern A. Zeeb 4496c92544dSBjoern A. Zeeb if (phy2) { 4506c92544dSBjoern A. Zeeb ret = mt7915_init_tx_queues(phy2, 451*cbb3ec25SBjoern A. Zeeb MT_TXQ_ID(phy2->mt76->band_idx), 4526c92544dSBjoern A. Zeeb MT7915_TX_RING_SIZE, 4536c92544dSBjoern A. Zeeb MT_TXQ_RING_BASE(1)); 4546c92544dSBjoern A. Zeeb if (ret) 4556c92544dSBjoern A. Zeeb return ret; 4566c92544dSBjoern A. Zeeb } 4576c92544dSBjoern A. Zeeb 4586c92544dSBjoern A. Zeeb /* command to WM */ 4596c92544dSBjoern A. Zeeb ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, 4606c92544dSBjoern A. Zeeb MT_MCUQ_ID(MT_MCUQ_WM), 4616c92544dSBjoern A. Zeeb MT7915_TX_MCU_RING_SIZE, 4626c92544dSBjoern A. Zeeb MT_MCUQ_RING_BASE(MT_MCUQ_WM)); 4636c92544dSBjoern A. Zeeb if (ret) 4646c92544dSBjoern A. Zeeb return ret; 4656c92544dSBjoern A. Zeeb 4666c92544dSBjoern A. Zeeb /* command to WA */ 4676c92544dSBjoern A. Zeeb ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, 4686c92544dSBjoern A. Zeeb MT_MCUQ_ID(MT_MCUQ_WA), 4696c92544dSBjoern A. Zeeb MT7915_TX_MCU_RING_SIZE, 4706c92544dSBjoern A. Zeeb MT_MCUQ_RING_BASE(MT_MCUQ_WA)); 4716c92544dSBjoern A. Zeeb if (ret) 4726c92544dSBjoern A. Zeeb return ret; 4736c92544dSBjoern A. Zeeb 4746c92544dSBjoern A. Zeeb /* firmware download */ 4756c92544dSBjoern A. Zeeb ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, 4766c92544dSBjoern A. Zeeb MT_MCUQ_ID(MT_MCUQ_FWDL), 4776c92544dSBjoern A. Zeeb MT7915_TX_FWDL_RING_SIZE, 4786c92544dSBjoern A. Zeeb MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); 4796c92544dSBjoern A. Zeeb if (ret) 4806c92544dSBjoern A. Zeeb return ret; 4816c92544dSBjoern A. Zeeb 4826c92544dSBjoern A. Zeeb /* event from WM */ 4836c92544dSBjoern A. Zeeb ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 4846c92544dSBjoern A. Zeeb MT_RXQ_ID(MT_RXQ_MCU), 4856c92544dSBjoern A. Zeeb MT7915_RX_MCU_RING_SIZE, 4866c92544dSBjoern A. Zeeb MT_RX_BUF_SIZE, 4876c92544dSBjoern A. Zeeb MT_RXQ_RING_BASE(MT_RXQ_MCU)); 4886c92544dSBjoern A. Zeeb if (ret) 4896c92544dSBjoern A. Zeeb return ret; 4906c92544dSBjoern A. Zeeb 4916c92544dSBjoern A. Zeeb /* event from WA */ 492*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) { 4936c92544dSBjoern A. Zeeb wa_rx_base = MT_WED_RX_RING_BASE; 4946c92544dSBjoern A. Zeeb wa_rx_idx = MT7915_RXQ_MCU_WA; 4956c92544dSBjoern A. Zeeb dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE; 4966c92544dSBjoern A. Zeeb } else { 4976c92544dSBjoern A. Zeeb wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA); 4986c92544dSBjoern A. Zeeb wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA); 4996c92544dSBjoern A. Zeeb } 5006c92544dSBjoern A. Zeeb ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA], 5016c92544dSBjoern A. Zeeb wa_rx_idx, MT7915_RX_MCU_RING_SIZE, 5026c92544dSBjoern A. Zeeb MT_RX_BUF_SIZE, wa_rx_base); 5036c92544dSBjoern A. Zeeb if (ret) 5046c92544dSBjoern A. Zeeb return ret; 5056c92544dSBjoern A. Zeeb 5066c92544dSBjoern A. Zeeb /* rx data queue for band0 */ 507*cbb3ec25SBjoern A. Zeeb if (!dev->phy.mt76->band_idx) { 508*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&mdev->mmio.wed) && 509*cbb3ec25SBjoern A. Zeeb mtk_wed_get_rx_capa(&mdev->mmio.wed)) { 510*cbb3ec25SBjoern A. Zeeb dev->mt76.q_rx[MT_RXQ_MAIN].flags = 511*cbb3ec25SBjoern A. Zeeb MT_WED_Q_RX(MT7915_RXQ_BAND0); 512*cbb3ec25SBjoern A. Zeeb dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; 513*cbb3ec25SBjoern A. Zeeb } 514*cbb3ec25SBjoern A. Zeeb 5156c92544dSBjoern A. Zeeb ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 5166c92544dSBjoern A. Zeeb MT_RXQ_ID(MT_RXQ_MAIN), 5176c92544dSBjoern A. Zeeb MT7915_RX_RING_SIZE, 5186c92544dSBjoern A. Zeeb MT_RX_BUF_SIZE, 5196c92544dSBjoern A. Zeeb MT_RXQ_RING_BASE(MT_RXQ_MAIN)); 5206c92544dSBjoern A. Zeeb if (ret) 5216c92544dSBjoern A. Zeeb return ret; 5226c92544dSBjoern A. Zeeb } 5236c92544dSBjoern A. Zeeb 5246c92544dSBjoern A. Zeeb /* tx free notify event from WA for band0 */ 5256c92544dSBjoern A. Zeeb if (!is_mt7915(mdev)) { 526*cbb3ec25SBjoern A. Zeeb wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA); 527*cbb3ec25SBjoern A. Zeeb wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA); 528*cbb3ec25SBjoern A. Zeeb 529*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&mdev->mmio.wed)) { 530*cbb3ec25SBjoern A. Zeeb mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE; 531*cbb3ec25SBjoern A. Zeeb if (is_mt7916(mdev)) { 532*cbb3ec25SBjoern A. Zeeb wa_rx_base = MT_WED_RX_RING_BASE; 533*cbb3ec25SBjoern A. Zeeb wa_rx_idx = MT7915_RXQ_MCU_WA; 534*cbb3ec25SBjoern A. Zeeb } 535*cbb3ec25SBjoern A. Zeeb } 536*cbb3ec25SBjoern A. Zeeb 5376c92544dSBjoern A. Zeeb ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA], 538*cbb3ec25SBjoern A. Zeeb wa_rx_idx, MT7915_RX_MCU_RING_SIZE, 539*cbb3ec25SBjoern A. Zeeb MT_RX_BUF_SIZE, wa_rx_base); 5406c92544dSBjoern A. Zeeb if (ret) 5416c92544dSBjoern A. Zeeb return ret; 5426c92544dSBjoern A. Zeeb } 5436c92544dSBjoern A. Zeeb 544*cbb3ec25SBjoern A. Zeeb if (dev->dbdc_support || dev->phy.mt76->band_idx) { 545*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&mdev->mmio.wed) && 546*cbb3ec25SBjoern A. Zeeb mtk_wed_get_rx_capa(&mdev->mmio.wed)) { 547*cbb3ec25SBjoern A. Zeeb dev->mt76.q_rx[MT_RXQ_BAND1].flags = 548*cbb3ec25SBjoern A. Zeeb MT_WED_Q_RX(MT7915_RXQ_BAND1); 549*cbb3ec25SBjoern A. Zeeb dev->mt76.rx_token_size += MT7915_RX_RING_SIZE; 550*cbb3ec25SBjoern A. Zeeb } 551*cbb3ec25SBjoern A. Zeeb 5526c92544dSBjoern A. Zeeb /* rx data queue for band1 */ 5536c92544dSBjoern A. Zeeb ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1], 5546c92544dSBjoern A. Zeeb MT_RXQ_ID(MT_RXQ_BAND1), 5556c92544dSBjoern A. Zeeb MT7915_RX_RING_SIZE, 5566c92544dSBjoern A. Zeeb MT_RX_BUF_SIZE, 5576c92544dSBjoern A. Zeeb MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs); 5586c92544dSBjoern A. Zeeb if (ret) 5596c92544dSBjoern A. Zeeb return ret; 5606c92544dSBjoern A. Zeeb 5616c92544dSBjoern A. Zeeb /* tx free notify event from WA for band1 */ 5626c92544dSBjoern A. Zeeb ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA], 5636c92544dSBjoern A. Zeeb MT_RXQ_ID(MT_RXQ_BAND1_WA), 5646c92544dSBjoern A. Zeeb MT7915_RX_MCU_RING_SIZE, 5656c92544dSBjoern A. Zeeb MT_RX_BUF_SIZE, 5666c92544dSBjoern A. Zeeb MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs); 5676c92544dSBjoern A. Zeeb if (ret) 5686c92544dSBjoern A. Zeeb return ret; 5696c92544dSBjoern A. Zeeb } 5706c92544dSBjoern A. Zeeb 5716c92544dSBjoern A. Zeeb ret = mt76_init_queues(dev, mt76_dma_rx_poll); 5726c92544dSBjoern A. Zeeb if (ret < 0) 5736c92544dSBjoern A. Zeeb return ret; 5746c92544dSBjoern A. Zeeb 5756c92544dSBjoern A. Zeeb netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 5766c92544dSBjoern A. Zeeb mt7915_poll_tx); 5776c92544dSBjoern A. Zeeb napi_enable(&dev->mt76.tx_napi); 5786c92544dSBjoern A. Zeeb 579*cbb3ec25SBjoern A. Zeeb mt7915_dma_enable(dev, false); 580*cbb3ec25SBjoern A. Zeeb 581*cbb3ec25SBjoern A. Zeeb return 0; 582*cbb3ec25SBjoern A. Zeeb } 583*cbb3ec25SBjoern A. Zeeb 584*cbb3ec25SBjoern A. Zeeb static void mt7915_dma_wed_reset(struct mt7915_dev *dev) 585*cbb3ec25SBjoern A. Zeeb { 586*cbb3ec25SBjoern A. Zeeb struct mt76_dev *mdev = &dev->mt76; 587*cbb3ec25SBjoern A. Zeeb 588*cbb3ec25SBjoern A. Zeeb if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state)) 589*cbb3ec25SBjoern A. Zeeb return; 590*cbb3ec25SBjoern A. Zeeb 591*cbb3ec25SBjoern A. Zeeb complete(&mdev->mmio.wed_reset); 592*cbb3ec25SBjoern A. Zeeb 593*cbb3ec25SBjoern A. Zeeb if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete, 594*cbb3ec25SBjoern A. Zeeb 3 * HZ)) 595*cbb3ec25SBjoern A. Zeeb dev_err(dev->mt76.dev, "wed reset complete timeout\n"); 596*cbb3ec25SBjoern A. Zeeb } 597*cbb3ec25SBjoern A. Zeeb 598*cbb3ec25SBjoern A. Zeeb static void 599*cbb3ec25SBjoern A. Zeeb mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q) 600*cbb3ec25SBjoern A. Zeeb { 601*cbb3ec25SBjoern A. Zeeb mt76_queue_reset(dev, q); 602*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 603*cbb3ec25SBjoern A. Zeeb mt76_dma_wed_setup(&dev->mt76, q, true); 604*cbb3ec25SBjoern A. Zeeb } 605*cbb3ec25SBjoern A. Zeeb 606*cbb3ec25SBjoern A. Zeeb int mt7915_dma_reset(struct mt7915_dev *dev, bool force) 607*cbb3ec25SBjoern A. Zeeb { 608*cbb3ec25SBjoern A. Zeeb struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1]; 609*cbb3ec25SBjoern A. Zeeb struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 610*cbb3ec25SBjoern A. Zeeb int i; 611*cbb3ec25SBjoern A. Zeeb 612*cbb3ec25SBjoern A. Zeeb /* clean up hw queues */ 613*cbb3ec25SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) { 614*cbb3ec25SBjoern A. Zeeb mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 615*cbb3ec25SBjoern A. Zeeb if (mphy_ext) 616*cbb3ec25SBjoern A. Zeeb mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); 617*cbb3ec25SBjoern A. Zeeb } 618*cbb3ec25SBjoern A. Zeeb 619*cbb3ec25SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) 620*cbb3ec25SBjoern A. Zeeb mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 621*cbb3ec25SBjoern A. Zeeb 622*cbb3ec25SBjoern A. Zeeb mt76_for_each_q_rx(&dev->mt76, i) 623*cbb3ec25SBjoern A. Zeeb mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 624*cbb3ec25SBjoern A. Zeeb 625*cbb3ec25SBjoern A. Zeeb /* reset wfsys */ 626*cbb3ec25SBjoern A. Zeeb if (force) 627*cbb3ec25SBjoern A. Zeeb mt7915_wfsys_reset(dev); 628*cbb3ec25SBjoern A. Zeeb 629*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(wed)) 630*cbb3ec25SBjoern A. Zeeb mtk_wed_device_dma_reset(wed); 631*cbb3ec25SBjoern A. Zeeb 632*cbb3ec25SBjoern A. Zeeb mt7915_dma_disable(dev, force); 633*cbb3ec25SBjoern A. Zeeb mt7915_dma_wed_reset(dev); 634*cbb3ec25SBjoern A. Zeeb 635*cbb3ec25SBjoern A. Zeeb /* reset hw queues */ 636*cbb3ec25SBjoern A. Zeeb for (i = 0; i < __MT_TXQ_MAX; i++) { 637*cbb3ec25SBjoern A. Zeeb mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]); 638*cbb3ec25SBjoern A. Zeeb if (mphy_ext) 639*cbb3ec25SBjoern A. Zeeb mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]); 640*cbb3ec25SBjoern A. Zeeb } 641*cbb3ec25SBjoern A. Zeeb 642*cbb3ec25SBjoern A. Zeeb for (i = 0; i < __MT_MCUQ_MAX; i++) 643*cbb3ec25SBjoern A. Zeeb mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 644*cbb3ec25SBjoern A. Zeeb 645*cbb3ec25SBjoern A. Zeeb mt76_for_each_q_rx(&dev->mt76, i) { 646*cbb3ec25SBjoern A. Zeeb if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE) 647*cbb3ec25SBjoern A. Zeeb continue; 648*cbb3ec25SBjoern A. Zeeb 649*cbb3ec25SBjoern A. Zeeb mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 650*cbb3ec25SBjoern A. Zeeb } 651*cbb3ec25SBjoern A. Zeeb 652*cbb3ec25SBjoern A. Zeeb mt76_tx_status_check(&dev->mt76, true); 653*cbb3ec25SBjoern A. Zeeb 654*cbb3ec25SBjoern A. Zeeb mt76_for_each_q_rx(&dev->mt76, i) 655*cbb3ec25SBjoern A. Zeeb mt76_queue_rx_reset(dev, i); 656*cbb3ec25SBjoern A. Zeeb 657*cbb3ec25SBjoern A. Zeeb if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76)) 658*cbb3ec25SBjoern A. Zeeb mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP, 659*cbb3ec25SBjoern A. Zeeb MT_WFDMA0_EXT0_RXWB_KEEP); 660*cbb3ec25SBjoern A. Zeeb 661*cbb3ec25SBjoern A. Zeeb mt7915_dma_enable(dev, !force); 6626c92544dSBjoern A. Zeeb 6636c92544dSBjoern A. Zeeb return 0; 6646c92544dSBjoern A. Zeeb } 6656c92544dSBjoern A. Zeeb 6666c92544dSBjoern A. Zeeb void mt7915_dma_cleanup(struct mt7915_dev *dev) 6676c92544dSBjoern A. Zeeb { 6686c92544dSBjoern A. Zeeb mt7915_dma_disable(dev, true); 6696c92544dSBjoern A. Zeeb 6706c92544dSBjoern A. Zeeb mt76_dma_cleanup(&dev->mt76); 6716c92544dSBjoern A. Zeeb } 672