1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 #ifndef __MT76_DMA_H 6 #define __MT76_DMA_H 7 8 #define DMA_DUMMY_DATA ((void *)~0) 9 10 #define MT_RING_SIZE 0x10 11 12 #define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0) 13 #define MT_DMA_CTL_LAST_SEC1 BIT(14) 14 #define MT_DMA_CTL_BURST BIT(15) 15 #define MT_DMA_CTL_SD_LEN0 GENMASK(29, 16) 16 #define MT_DMA_CTL_LAST_SEC0 BIT(30) 17 #define MT_DMA_CTL_DMA_DONE BIT(31) 18 #define MT_DMA_CTL_TO_HOST BIT(8) 19 #define MT_DMA_CTL_TO_HOST_A BIT(12) 20 #define MT_DMA_CTL_DROP BIT(14) 21 #define MT_DMA_CTL_TOKEN GENMASK(31, 16) 22 #define MT_DMA_CTL_SDP1_H GENMASK(19, 16) 23 #define MT_DMA_CTL_SDP0_H GENMASK(3, 0) 24 #define MT_DMA_CTL_WO_DROP BIT(8) 25 26 #define MT_DMA_PPE_CPU_REASON GENMASK(15, 11) 27 #define MT_DMA_PPE_ENTRY GENMASK(30, 16) 28 #define MT_DMA_INFO_DMA_FRAG BIT(9) 29 #define MT_DMA_INFO_PPE_VLD BIT(31) 30 31 #define MT_DMA_CTL_PN_CHK_FAIL BIT(13) 32 #define MT_DMA_CTL_VER_MASK BIT(7) 33 34 #define MT_DMA_SDP0 GENMASK(15, 0) 35 #define MT_DMA_TOKEN_ID GENMASK(31, 16) 36 #define MT_DMA_MAGIC_MASK GENMASK(31, 28) 37 #define MT_DMA_RRO_EN BIT(13) 38 39 #define MT_DMA_MAGIC_CNT 16 40 41 #define MT_DMA_WED_IND_CMD_CNT 8 42 #define MT_DMA_WED_IND_REASON GENMASK(15, 12) 43 44 #define MT_DMA_HDR_LEN 4 45 #define MT_RX_INFO_LEN 4 46 #define MT_FCE_INFO_LEN 4 47 #define MT_RX_RXWI_LEN 32 48 49 struct mt76_desc { 50 __le32 buf0; 51 __le32 ctrl; 52 __le32 buf1; 53 __le32 info; 54 } __packed __aligned(4); 55 56 struct mt76_wed_rro_desc { 57 __le32 buf0; 58 __le32 buf1; 59 } __packed __aligned(4); 60 61 /* data1 */ 62 #define RRO_RXDMAD_DATA1_LS_MASK BIT(30) 63 #define RRO_RXDMAD_DATA1_SDL0_MASK GENMASK(29, 16) 64 /* data2 */ 65 #define RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK GENMASK(31, 16) 66 #define RRO_RXDMAD_DATA2_IND_REASON_MASK GENMASK(15, 12) 67 /* data3 */ 68 #define RRO_RXDMAD_DATA3_MAGIC_CNT_MASK GENMASK(31, 28) 69 struct mt76_rro_rxdmad_c { 70 __le32 data0; 71 __le32 data1; 72 __le32 data2; 73 __le32 data3; 74 }; 75 76 enum mt76_qsel { 77 MT_QSEL_MGMT, 78 MT_QSEL_HCCA, 79 MT_QSEL_EDCA, 80 MT_QSEL_EDCA_2, 81 }; 82 83 enum mt76_mcu_evt_type { 84 EVT_CMD_DONE, 85 EVT_CMD_ERROR, 86 EVT_CMD_RETRY, 87 EVT_EVENT_PWR_RSP, 88 EVT_EVENT_WOW_RSP, 89 EVT_EVENT_CARRIER_DETECT_RSP, 90 EVT_EVENT_DFS_DETECT_RSP, 91 }; 92 93 enum mt76_dma_wed_ind_reason { 94 MT_DMA_WED_IND_REASON_NORMAL, 95 MT_DMA_WED_IND_REASON_REPEAT, 96 MT_DMA_WED_IND_REASON_OLDPKT, 97 }; 98 99 int mt76_dma_rx_poll(struct napi_struct *napi, int budget); 100 void mt76_dma_attach(struct mt76_dev *dev); 101 void mt76_dma_cleanup(struct mt76_dev *dev); 102 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 103 bool allow_direct); 104 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, 105 bool reset_idx); 106 107 static inline void 108 mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) 109 { 110 dev->queue_ops->reset_q(dev, q, true); 111 if (mtk_wed_device_active(&dev->mmio.wed)) 112 mt76_wed_dma_setup(dev, q, true); 113 } 114 115 static inline void 116 mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info) 117 { 118 if (!drop) 119 return; 120 121 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP)); 122 if (!(ctrl & MT_DMA_CTL_VER_MASK)) 123 return; 124 125 switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) { 126 case MT_DMA_WED_IND_REASON_REPEAT: 127 *drop = true; 128 break; 129 case MT_DMA_WED_IND_REASON_OLDPKT: 130 *drop = !(info & MT_DMA_INFO_DMA_FRAG); 131 break; 132 default: 133 *drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL); 134 break; 135 } 136 } 137 138 static inline void *mt76_priv(struct net_device *dev) 139 { 140 struct mt76_dev **priv; 141 142 priv = netdev_priv(dev); 143 144 return *priv; 145 } 146 147 #endif 148