16c92544dSBjoern A. Zeeb // SPDX-License-Identifier: ISC 26c92544dSBjoern A. Zeeb 36c92544dSBjoern A. Zeeb #include "mt7603.h" 46c92544dSBjoern A. Zeeb #include "mac.h" 56c92544dSBjoern A. Zeeb #include "../dma.h" 66c92544dSBjoern A. Zeeb 76c92544dSBjoern A. Zeeb static void 86c92544dSBjoern A. Zeeb mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) 96c92544dSBjoern A. Zeeb { 106c92544dSBjoern A. Zeeb static const u8 tid_to_ac[8] = { 116c92544dSBjoern A. Zeeb IEEE80211_AC_BE, 126c92544dSBjoern A. Zeeb IEEE80211_AC_BK, 136c92544dSBjoern A. Zeeb IEEE80211_AC_BK, 146c92544dSBjoern A. Zeeb IEEE80211_AC_BE, 156c92544dSBjoern A. Zeeb IEEE80211_AC_VI, 166c92544dSBjoern A. Zeeb IEEE80211_AC_VI, 176c92544dSBjoern A. Zeeb IEEE80211_AC_VO, 186c92544dSBjoern A. Zeeb IEEE80211_AC_VO 196c92544dSBjoern A. Zeeb }; 206c92544dSBjoern A. Zeeb __le32 *txd = (__le32 *)skb->data; 216c92544dSBjoern A. Zeeb struct ieee80211_hdr *hdr; 226c92544dSBjoern A. Zeeb struct ieee80211_sta *sta; 236c92544dSBjoern A. Zeeb struct mt7603_sta *msta; 246c92544dSBjoern A. Zeeb struct mt76_wcid *wcid; 256c92544dSBjoern A. Zeeb void *priv; 266c92544dSBjoern A. Zeeb int idx; 276c92544dSBjoern A. Zeeb u32 val; 286c92544dSBjoern A. Zeeb u8 tid = 0; 296c92544dSBjoern A. Zeeb 306c92544dSBjoern A. Zeeb if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr)) 316c92544dSBjoern A. Zeeb goto free; 326c92544dSBjoern A. Zeeb 336c92544dSBjoern A. Zeeb val = le32_to_cpu(txd[1]); 346c92544dSBjoern A. Zeeb idx = FIELD_GET(MT_TXD1_WLAN_IDX, val); 356c92544dSBjoern A. Zeeb skb->priority = FIELD_GET(MT_TXD1_TID, val); 366c92544dSBjoern A. Zeeb 376c92544dSBjoern A. Zeeb if (idx >= MT7603_WTBL_STA - 1) 386c92544dSBjoern A. Zeeb goto free; 396c92544dSBjoern A. Zeeb 406c92544dSBjoern A. Zeeb wcid = rcu_dereference(dev->mt76.wcid[idx]); 416c92544dSBjoern A. Zeeb if (!wcid) 426c92544dSBjoern A. Zeeb goto free; 436c92544dSBjoern A. Zeeb 446c92544dSBjoern A. Zeeb priv = msta = container_of(wcid, struct mt7603_sta, wcid); 456c92544dSBjoern A. Zeeb val = le32_to_cpu(txd[0]); 466c92544dSBjoern A. Zeeb val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); 476c92544dSBjoern A. Zeeb val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT); 486c92544dSBjoern A. Zeeb txd[0] = cpu_to_le32(val); 496c92544dSBjoern A. Zeeb 506c92544dSBjoern A. Zeeb sta = container_of(priv, struct ieee80211_sta, drv_priv); 516c92544dSBjoern A. Zeeb hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE]; 526c92544dSBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control)) 536c92544dSBjoern A. Zeeb tid = *ieee80211_get_qos_ctl(hdr) & 546c92544dSBjoern A. Zeeb IEEE80211_QOS_CTL_TAG1D_MASK; 556c92544dSBjoern A. Zeeb skb_set_queue_mapping(skb, tid_to_ac[tid]); 566c92544dSBjoern A. Zeeb ieee80211_sta_set_buffered(sta, tid, true); 576c92544dSBjoern A. Zeeb 586c92544dSBjoern A. Zeeb spin_lock_bh(&dev->ps_lock); 596c92544dSBjoern A. Zeeb __skb_queue_tail(&msta->psq, skb); 606c92544dSBjoern A. Zeeb if (skb_queue_len(&msta->psq) >= 64) { 616c92544dSBjoern A. Zeeb skb = __skb_dequeue(&msta->psq); 626c92544dSBjoern A. Zeeb dev_kfree_skb(skb); 636c92544dSBjoern A. Zeeb } 646c92544dSBjoern A. Zeeb spin_unlock_bh(&dev->ps_lock); 656c92544dSBjoern A. Zeeb return; 666c92544dSBjoern A. Zeeb 676c92544dSBjoern A. Zeeb free: 686c92544dSBjoern A. Zeeb dev_kfree_skb(skb); 696c92544dSBjoern A. Zeeb } 706c92544dSBjoern A. Zeeb 716c92544dSBjoern A. Zeeb void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 72*cbb3ec25SBjoern A. Zeeb struct sk_buff *skb, u32 *info) 736c92544dSBjoern A. Zeeb { 746c92544dSBjoern A. Zeeb struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 756c92544dSBjoern A. Zeeb __le32 *rxd = (__le32 *)skb->data; 766c92544dSBjoern A. Zeeb __le32 *end = (__le32 *)&skb->data[skb->len]; 776c92544dSBjoern A. Zeeb enum rx_pkt_type type; 786c92544dSBjoern A. Zeeb 796c92544dSBjoern A. Zeeb type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 806c92544dSBjoern A. Zeeb 816c92544dSBjoern A. Zeeb if (q == MT_RXQ_MCU) { 826c92544dSBjoern A. Zeeb if (type == PKT_TYPE_RX_EVENT) 836c92544dSBjoern A. Zeeb mt76_mcu_rx_event(&dev->mt76, skb); 846c92544dSBjoern A. Zeeb else 856c92544dSBjoern A. Zeeb mt7603_rx_loopback_skb(dev, skb); 866c92544dSBjoern A. Zeeb return; 876c92544dSBjoern A. Zeeb } 886c92544dSBjoern A. Zeeb 896c92544dSBjoern A. Zeeb switch (type) { 906c92544dSBjoern A. Zeeb case PKT_TYPE_TXS: 916c92544dSBjoern A. Zeeb for (rxd++; rxd + 5 <= end; rxd += 5) 926c92544dSBjoern A. Zeeb mt7603_mac_add_txs(dev, rxd); 936c92544dSBjoern A. Zeeb dev_kfree_skb(skb); 946c92544dSBjoern A. Zeeb break; 956c92544dSBjoern A. Zeeb case PKT_TYPE_RX_EVENT: 966c92544dSBjoern A. Zeeb mt76_mcu_rx_event(&dev->mt76, skb); 976c92544dSBjoern A. Zeeb return; 986c92544dSBjoern A. Zeeb case PKT_TYPE_NORMAL: 996c92544dSBjoern A. Zeeb if (mt7603_mac_fill_rx(dev, skb) == 0) { 1006c92544dSBjoern A. Zeeb mt76_rx(&dev->mt76, q, skb); 1016c92544dSBjoern A. Zeeb return; 1026c92544dSBjoern A. Zeeb } 1036c92544dSBjoern A. Zeeb fallthrough; 1046c92544dSBjoern A. Zeeb default: 1056c92544dSBjoern A. Zeeb dev_kfree_skb(skb); 1066c92544dSBjoern A. Zeeb break; 1076c92544dSBjoern A. Zeeb } 1086c92544dSBjoern A. Zeeb } 1096c92544dSBjoern A. Zeeb 1106c92544dSBjoern A. Zeeb static int 1116c92544dSBjoern A. Zeeb mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q, 1126c92544dSBjoern A. Zeeb int idx, int n_desc, int bufsize) 1136c92544dSBjoern A. Zeeb { 1146c92544dSBjoern A. Zeeb int err; 1156c92544dSBjoern A. Zeeb 1166c92544dSBjoern A. Zeeb err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, 1176c92544dSBjoern A. Zeeb MT_RX_RING_BASE); 1186c92544dSBjoern A. Zeeb if (err < 0) 1196c92544dSBjoern A. Zeeb return err; 1206c92544dSBjoern A. Zeeb 1216c92544dSBjoern A. Zeeb mt7603_irq_enable(dev, MT_INT_RX_DONE(idx)); 1226c92544dSBjoern A. Zeeb 1236c92544dSBjoern A. Zeeb return 0; 1246c92544dSBjoern A. Zeeb } 1256c92544dSBjoern A. Zeeb 1266c92544dSBjoern A. Zeeb static int mt7603_poll_tx(struct napi_struct *napi, int budget) 1276c92544dSBjoern A. Zeeb { 1286c92544dSBjoern A. Zeeb struct mt7603_dev *dev; 1296c92544dSBjoern A. Zeeb int i; 1306c92544dSBjoern A. Zeeb 1316c92544dSBjoern A. Zeeb dev = container_of(napi, struct mt7603_dev, mt76.tx_napi); 1326c92544dSBjoern A. Zeeb dev->tx_dma_check = 0; 1336c92544dSBjoern A. Zeeb 1346c92544dSBjoern A. Zeeb mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); 1356c92544dSBjoern A. Zeeb for (i = MT_TXQ_PSD; i >= 0; i--) 1366c92544dSBjoern A. Zeeb mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); 1376c92544dSBjoern A. Zeeb 1386c92544dSBjoern A. Zeeb if (napi_complete_done(napi, 0)) 1396c92544dSBjoern A. Zeeb mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL); 1406c92544dSBjoern A. Zeeb 1416c92544dSBjoern A. Zeeb mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); 1426c92544dSBjoern A. Zeeb for (i = MT_TXQ_PSD; i >= 0; i--) 1436c92544dSBjoern A. Zeeb mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); 1446c92544dSBjoern A. Zeeb 1456c92544dSBjoern A. Zeeb mt7603_mac_sta_poll(dev); 1466c92544dSBjoern A. Zeeb 1476c92544dSBjoern A. Zeeb mt76_worker_schedule(&dev->mt76.tx_worker); 1486c92544dSBjoern A. Zeeb 1496c92544dSBjoern A. Zeeb return 0; 1506c92544dSBjoern A. Zeeb } 1516c92544dSBjoern A. Zeeb 1526c92544dSBjoern A. Zeeb int mt7603_dma_init(struct mt7603_dev *dev) 1536c92544dSBjoern A. Zeeb { 1546c92544dSBjoern A. Zeeb static const u8 wmm_queue_map[] = { 1556c92544dSBjoern A. Zeeb [IEEE80211_AC_BK] = 0, 1566c92544dSBjoern A. Zeeb [IEEE80211_AC_BE] = 1, 1576c92544dSBjoern A. Zeeb [IEEE80211_AC_VI] = 2, 1586c92544dSBjoern A. Zeeb [IEEE80211_AC_VO] = 3, 1596c92544dSBjoern A. Zeeb }; 1606c92544dSBjoern A. Zeeb int ret; 1616c92544dSBjoern A. Zeeb int i; 1626c92544dSBjoern A. Zeeb 1636c92544dSBjoern A. Zeeb mt76_dma_attach(&dev->mt76); 1646c92544dSBjoern A. Zeeb 1656c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WPDMA_GLO_CFG, 1666c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_TX_DMA_EN | 1676c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_RX_DMA_EN | 1686c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | 1696c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1706c92544dSBjoern A. Zeeb 1716c92544dSBjoern A. Zeeb mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 1726c92544dSBjoern A. Zeeb mt7603_pse_client_reset(dev); 1736c92544dSBjoern A. Zeeb 1746c92544dSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { 1756c92544dSBjoern A. Zeeb ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], 1766c92544dSBjoern A. Zeeb MT7603_TX_RING_SIZE, MT_TX_RING_BASE, 0); 1776c92544dSBjoern A. Zeeb if (ret) 1786c92544dSBjoern A. Zeeb return ret; 1796c92544dSBjoern A. Zeeb } 1806c92544dSBjoern A. Zeeb 1816c92544dSBjoern A. Zeeb ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, 1826c92544dSBjoern A. Zeeb MT7603_PSD_RING_SIZE, MT_TX_RING_BASE, 0); 1836c92544dSBjoern A. Zeeb if (ret) 1846c92544dSBjoern A. Zeeb return ret; 1856c92544dSBjoern A. Zeeb 1866c92544dSBjoern A. Zeeb ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU, 1876c92544dSBjoern A. Zeeb MT_MCU_RING_SIZE, MT_TX_RING_BASE); 1886c92544dSBjoern A. Zeeb if (ret) 1896c92544dSBjoern A. Zeeb return ret; 1906c92544dSBjoern A. Zeeb 1916c92544dSBjoern A. Zeeb ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN, 1926c92544dSBjoern A. Zeeb MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0); 1936c92544dSBjoern A. Zeeb if (ret) 1946c92544dSBjoern A. Zeeb return ret; 1956c92544dSBjoern A. Zeeb 1966c92544dSBjoern A. Zeeb ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC, 1976c92544dSBjoern A. Zeeb MT_MCU_RING_SIZE, MT_TX_RING_BASE, 0); 1986c92544dSBjoern A. Zeeb if (ret) 1996c92544dSBjoern A. Zeeb return ret; 2006c92544dSBjoern A. Zeeb 2016c92544dSBjoern A. Zeeb mt7603_irq_enable(dev, 2026c92544dSBjoern A. Zeeb MT_INT_TX_DONE(IEEE80211_AC_VO) | 2036c92544dSBjoern A. Zeeb MT_INT_TX_DONE(IEEE80211_AC_VI) | 2046c92544dSBjoern A. Zeeb MT_INT_TX_DONE(IEEE80211_AC_BE) | 2056c92544dSBjoern A. Zeeb MT_INT_TX_DONE(IEEE80211_AC_BK) | 2066c92544dSBjoern A. Zeeb MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) | 2076c92544dSBjoern A. Zeeb MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU) | 2086c92544dSBjoern A. Zeeb MT_INT_TX_DONE(MT_TX_HW_QUEUE_BCN) | 2096c92544dSBjoern A. Zeeb MT_INT_TX_DONE(MT_TX_HW_QUEUE_BMC)); 2106c92544dSBjoern A. Zeeb 2116c92544dSBjoern A. Zeeb ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, 2126c92544dSBjoern A. Zeeb MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE); 2136c92544dSBjoern A. Zeeb if (ret) 2146c92544dSBjoern A. Zeeb return ret; 2156c92544dSBjoern A. Zeeb 2166c92544dSBjoern A. Zeeb ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, 2176c92544dSBjoern A. Zeeb MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE); 2186c92544dSBjoern A. Zeeb if (ret) 2196c92544dSBjoern A. Zeeb return ret; 2206c92544dSBjoern A. Zeeb 2216c92544dSBjoern A. Zeeb mt76_wr(dev, MT_DELAY_INT_CFG, 0); 2226c92544dSBjoern A. Zeeb ret = mt76_init_queues(dev, mt76_dma_rx_poll); 2236c92544dSBjoern A. Zeeb if (ret) 2246c92544dSBjoern A. Zeeb return ret; 2256c92544dSBjoern A. Zeeb 2266c92544dSBjoern A. Zeeb netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, 2276c92544dSBjoern A. Zeeb mt7603_poll_tx); 2286c92544dSBjoern A. Zeeb napi_enable(&dev->mt76.tx_napi); 2296c92544dSBjoern A. Zeeb 2306c92544dSBjoern A. Zeeb return 0; 2316c92544dSBjoern A. Zeeb } 2326c92544dSBjoern A. Zeeb 2336c92544dSBjoern A. Zeeb void mt7603_dma_cleanup(struct mt7603_dev *dev) 2346c92544dSBjoern A. Zeeb { 2356c92544dSBjoern A. Zeeb mt76_clear(dev, MT_WPDMA_GLO_CFG, 2366c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_TX_DMA_EN | 2376c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_RX_DMA_EN | 2386c92544dSBjoern A. Zeeb MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 2396c92544dSBjoern A. Zeeb 2406c92544dSBjoern A. Zeeb mt76_dma_cleanup(&dev->mt76); 2416c92544dSBjoern A. Zeeb } 242