xref: /linux/drivers/net/wireless/mediatek/mt76/dma.h (revision 2cddfc2e8fc78c13b0f5286ea5dd48cdf527ad41)
1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 #ifndef __MT76_DMA_H
6 #define __MT76_DMA_H
7 
8 #define DMA_DUMMY_DATA			((void *)~0)
9 
10 #define MT_RING_SIZE			0x10
11 
12 #define MT_DMA_CTL_SD_LEN1		GENMASK(13, 0)
13 #define MT_DMA_CTL_LAST_SEC1		BIT(14)
14 #define MT_DMA_CTL_BURST		BIT(15)
15 #define MT_DMA_CTL_SD_LEN0		GENMASK(29, 16)
16 #define MT_DMA_CTL_LAST_SEC0		BIT(30)
17 #define MT_DMA_CTL_DMA_DONE		BIT(31)
18 #define MT_DMA_CTL_TO_HOST		BIT(8)
19 #define MT_DMA_CTL_TO_HOST_A		BIT(12)
20 #define MT_DMA_CTL_DROP			BIT(14)
21 #define MT_DMA_CTL_TOKEN		GENMASK(31, 16)
22 #define MT_DMA_CTL_SDP1_H		GENMASK(19, 16)
23 #define MT_DMA_CTL_SDP0_H		GENMASK(3, 0)
24 #define MT_DMA_CTL_WO_DROP		BIT(8)
25 
26 #define MT_DMA_PPE_CPU_REASON		GENMASK(15, 11)
27 #define MT_DMA_PPE_ENTRY		GENMASK(30, 16)
28 #define MT_DMA_INFO_DMA_FRAG		BIT(9)
29 #define MT_DMA_INFO_PPE_VLD		BIT(31)
30 
31 #define MT_DMA_CTL_PN_CHK_FAIL		BIT(13)
32 #define MT_DMA_CTL_VER_MASK		BIT(7)
33 
34 #define MT_DMA_SDP0			GENMASK(15, 0)
35 #define MT_DMA_TOKEN_ID			GENMASK(31, 16)
36 #define MT_DMA_MAGIC_MASK		GENMASK(31, 28)
37 #define MT_DMA_RRO_EN			BIT(13)
38 
39 #define MT_DMA_MAGIC_CNT		16
40 
41 #define MT_DMA_WED_IND_CMD_CNT		8
42 #define MT_DMA_WED_IND_REASON		GENMASK(15, 12)
43 
44 #define MT_DMA_HDR_LEN			4
45 #define MT_RX_INFO_LEN			4
46 #define MT_FCE_INFO_LEN			4
47 #define MT_RX_RXWI_LEN			32
48 
49 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
50 
51 #define Q_READ(_q, _field) ({						\
52 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
53 	u32 _val;							\
54 	if ((_q)->flags & MT_QFLAG_WED)					\
55 		_val = mtk_wed_device_reg_read((_q)->wed,		\
56 					       ((_q)->wed_regs +	\
57 						_offset));		\
58 	else								\
59 		_val = readl(&(_q)->regs->_field);			\
60 	_val;								\
61 })
62 
63 #define Q_WRITE(_q, _field, _val)	do {				\
64 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
65 	if ((_q)->flags & MT_QFLAG_WED)					\
66 		mtk_wed_device_reg_write((_q)->wed,			\
67 					 ((_q)->wed_regs + _offset),	\
68 					 _val);				\
69 	else								\
70 		writel(_val, &(_q)->regs->_field);			\
71 } while (0)
72 
73 #elif IS_ENABLED(CONFIG_MT76_NPU)
74 
75 #define Q_READ(_q, _field) ({						\
76 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
77 	u32 _val = 0;							\
78 	if ((_q)->flags & MT_QFLAG_NPU) {				\
79 		struct airoha_npu *npu;					\
80 									\
81 		rcu_read_lock();					\
82 		npu = rcu_dereference(q->dev->mmio.npu);		\
83 		if (npu)						\
84 			regmap_read(npu->regmap,			\
85 				    ((_q)->wed_regs + _offset), &_val);	\
86 		rcu_read_unlock();					\
87 	} else {							\
88 		_val = readl(&(_q)->regs->_field);			\
89 	}								\
90 	_val;								\
91 })
92 
93 #define Q_WRITE(_q, _field, _val)	do {				\
94 	u32 _offset = offsetof(struct mt76_queue_regs, _field);		\
95 	if ((_q)->flags & MT_QFLAG_NPU) {				\
96 		struct airoha_npu *npu;					\
97 									\
98 		rcu_read_lock();					\
99 		npu = rcu_dereference(q->dev->mmio.npu);		\
100 		if (npu)						\
101 			regmap_write(npu->regmap,			\
102 				     ((_q)->wed_regs + _offset), _val);	\
103 		rcu_read_unlock();					\
104 	} else {							\
105 		writel(_val, &(_q)->regs->_field);			\
106 	}								\
107 } while (0)
108 
109 #else
110 
111 #define Q_READ(_q, _field)		readl(&(_q)->regs->_field)
112 #define Q_WRITE(_q, _field, _val)	writel(_val, &(_q)->regs->_field)
113 
114 #endif
115 
116 struct mt76_desc {
117 	__le32 buf0;
118 	__le32 ctrl;
119 	__le32 buf1;
120 	__le32 info;
121 } __packed __aligned(4);
122 
123 struct mt76_wed_rro_desc {
124 	__le32 buf0;
125 	__le32 buf1;
126 } __packed __aligned(4);
127 
128 /* data1 */
129 #define RRO_RXDMAD_DATA1_LS_MASK		BIT(30)
130 #define RRO_RXDMAD_DATA1_SDL0_MASK		GENMASK(29, 16)
131 /* data2 */
132 #define RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK	GENMASK(31, 16)
133 #define RRO_RXDMAD_DATA2_IND_REASON_MASK	GENMASK(15, 12)
134 /* data3 */
135 #define RRO_RXDMAD_DATA3_MAGIC_CNT_MASK		GENMASK(31, 28)
136 struct mt76_rro_rxdmad_c {
137 	__le32 data0;
138 	__le32 data1;
139 	__le32 data2;
140 	__le32 data3;
141 };
142 
143 enum mt76_qsel {
144 	MT_QSEL_MGMT,
145 	MT_QSEL_HCCA,
146 	MT_QSEL_EDCA,
147 	MT_QSEL_EDCA_2,
148 };
149 
150 enum mt76_mcu_evt_type {
151 	EVT_CMD_DONE,
152 	EVT_CMD_ERROR,
153 	EVT_CMD_RETRY,
154 	EVT_EVENT_PWR_RSP,
155 	EVT_EVENT_WOW_RSP,
156 	EVT_EVENT_CARRIER_DETECT_RSP,
157 	EVT_EVENT_DFS_DETECT_RSP,
158 };
159 
160 enum mt76_dma_wed_ind_reason {
161 	MT_DMA_WED_IND_REASON_NORMAL,
162 	MT_DMA_WED_IND_REASON_REPEAT,
163 	MT_DMA_WED_IND_REASON_OLDPKT,
164 };
165 
166 int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
167 void mt76_dma_attach(struct mt76_dev *dev);
168 void mt76_dma_cleanup(struct mt76_dev *dev);
169 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
170 		     bool allow_direct);
171 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
172 			  bool reset_idx);
173 
174 static inline void
175 mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
176 {
177 	dev->queue_ops->reset_q(dev, q, true);
178 	if (mtk_wed_device_active(&dev->mmio.wed))
179 		mt76_wed_dma_setup(dev, q, true);
180 }
181 
182 static inline void
183 mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
184 {
185 	if (!drop)
186 		return;
187 
188 	*drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | MT_DMA_CTL_DROP));
189 	if (!(ctrl & MT_DMA_CTL_VER_MASK))
190 		return;
191 
192 	switch (FIELD_GET(MT_DMA_WED_IND_REASON, buf1)) {
193 	case MT_DMA_WED_IND_REASON_REPEAT:
194 		*drop = true;
195 		break;
196 	case MT_DMA_WED_IND_REASON_OLDPKT:
197 		*drop = !(info & MT_DMA_INFO_DMA_FRAG);
198 		break;
199 	default:
200 		*drop = !!(ctrl & MT_DMA_CTL_PN_CHK_FAIL);
201 		break;
202 	}
203 }
204 
205 static inline void *mt76_priv(struct net_device *dev)
206 {
207 	struct mt76_dev **priv;
208 
209 	priv = netdev_priv(dev);
210 
211 	return *priv;
212 }
213 
214 #endif
215