xref: /linux/drivers/net/wireless/mediatek/mt76/wed.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
4  */
5 
6 #include "mt76.h"
7 #include "dma.h"
8 
mt76_wed_release_rx_buf(struct mtk_wed_device * wed)9 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
10 {
11 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
12 	int i;
13 
14 	for (i = 0; i < dev->rx_token_size; i++) {
15 		struct mt76_txwi_cache *t;
16 
17 		t = mt76_rx_token_release(dev, i);
18 		if (!t || !t->ptr)
19 			continue;
20 
21 		mt76_put_page_pool_buf(t->ptr, false);
22 		t->ptr = NULL;
23 
24 		mt76_put_rxwi(dev, t);
25 	}
26 
27 	mt76_free_pending_rxwi(dev);
28 }
29 EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
30 
31 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
mt76_wed_init_rx_buf(struct mtk_wed_device * wed,int size)32 u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
33 {
34 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
35 	struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
36 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
37 	int i, len = SKB_WITH_OVERHEAD(q->buf_size);
38 	struct mt76_txwi_cache *t = NULL;
39 
40 	for (i = 0; i < size; i++) {
41 		enum dma_data_direction dir;
42 		dma_addr_t addr;
43 		u32 offset;
44 		int token;
45 		void *buf;
46 
47 		t = mt76_get_rxwi(dev);
48 		if (!t)
49 			goto unmap;
50 
51 		buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
52 		if (!buf)
53 			goto unmap;
54 
55 		addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
56 		dir = page_pool_get_dma_dir(q->page_pool);
57 		dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
58 
59 		desc->buf0 = cpu_to_le32(addr);
60 		token = mt76_rx_token_consume(dev, buf, t, addr);
61 		if (token < 0) {
62 			mt76_put_page_pool_buf(buf, false);
63 			goto unmap;
64 		}
65 
66 		token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
67 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
68 		token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
69 #endif
70 		desc->token |= cpu_to_le32(token);
71 		desc++;
72 	}
73 
74 	return 0;
75 
76 unmap:
77 	if (t)
78 		mt76_put_rxwi(dev, t);
79 	mt76_wed_release_rx_buf(wed);
80 
81 	return -ENOMEM;
82 }
83 EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
84 
mt76_wed_offload_enable(struct mtk_wed_device * wed)85 int mt76_wed_offload_enable(struct mtk_wed_device *wed)
86 {
87 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
88 
89 	spin_lock_bh(&dev->token_lock);
90 	dev->token_size = wed->wlan.token_start;
91 	spin_unlock_bh(&dev->token_lock);
92 
93 	return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
94 }
95 EXPORT_SYMBOL_GPL(mt76_wed_offload_enable);
96 
mt76_wed_dma_setup(struct mt76_dev * dev,struct mt76_queue * q,bool reset)97 int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
98 {
99 	int ret = 0, type, ring;
100 	u16 flags;
101 
102 	if (!q || !q->ndesc)
103 		return -EINVAL;
104 
105 	flags = q->flags;
106 	if (!q->wed || !mtk_wed_device_active(q->wed))
107 		q->flags &= ~MT_QFLAG_WED;
108 
109 	if (!(q->flags & MT_QFLAG_WED))
110 		return 0;
111 
112 	type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
113 	ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
114 
115 	switch (type) {
116 	case MT76_WED_Q_TX:
117 		ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
118 						   reset);
119 		if (!ret)
120 			q->wed_regs = q->wed->tx_ring[ring].reg_base;
121 		break;
122 	case MT76_WED_Q_TXFREE:
123 		/* WED txfree queue needs ring to be initialized before setup */
124 		q->flags = 0;
125 		mt76_dma_queue_reset(dev, q);
126 		mt76_dma_rx_fill(dev, q, false);
127 
128 		ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
129 		if (!ret)
130 			q->wed_regs = q->wed->txfree_ring.reg_base;
131 		break;
132 	case MT76_WED_Q_RX:
133 		ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
134 						   reset);
135 		if (!ret)
136 			q->wed_regs = q->wed->rx_ring[ring].reg_base;
137 		break;
138 	case MT76_WED_RRO_Q_DATA:
139 		q->flags &= ~MT_QFLAG_WED;
140 		__mt76_dma_queue_reset(dev, q, false);
141 		mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
142 		q->head = q->ndesc - 1;
143 		q->queued = q->head;
144 		break;
145 	case MT76_WED_RRO_Q_MSDU_PG:
146 		q->flags &= ~MT_QFLAG_WED;
147 		__mt76_dma_queue_reset(dev, q, false);
148 		mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
149 		q->head = q->ndesc - 1;
150 		q->queued = q->head;
151 		break;
152 	case MT76_WED_RRO_Q_IND:
153 		q->flags &= ~MT_QFLAG_WED;
154 		mt76_dma_queue_reset(dev, q);
155 		mt76_dma_rx_fill(dev, q, false);
156 		mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
157 		break;
158 	default:
159 		ret = -EINVAL;
160 		break;
161 	}
162 	q->flags = flags;
163 
164 	return ret;
165 }
166 EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
167 #endif /*CONFIG_NET_MEDIATEK_SOC_WED */
168 
mt76_wed_offload_disable(struct mtk_wed_device * wed)169 void mt76_wed_offload_disable(struct mtk_wed_device *wed)
170 {
171 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
172 
173 	spin_lock_bh(&dev->token_lock);
174 	dev->token_size = dev->drv->token_size;
175 	spin_unlock_bh(&dev->token_lock);
176 }
177 EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
178 
mt76_wed_reset_complete(struct mtk_wed_device * wed)179 void mt76_wed_reset_complete(struct mtk_wed_device *wed)
180 {
181 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
182 
183 	complete(&dev->mmio.wed_reset_complete);
184 }
185 EXPORT_SYMBOL_GPL(mt76_wed_reset_complete);
186 
mt76_wed_net_setup_tc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct net_device * netdev,enum tc_setup_type type,void * type_data)187 int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
188 			  struct net_device *netdev, enum tc_setup_type type,
189 			  void *type_data)
190 {
191 	struct mt76_phy *phy = hw->priv;
192 	struct mtk_wed_device *wed = &phy->dev->mmio.wed;
193 
194 	if (!mtk_wed_device_active(wed))
195 		return -EOPNOTSUPP;
196 
197 	return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
198 }
199 EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc);
200 
mt76_wed_dma_reset(struct mt76_dev * dev)201 void mt76_wed_dma_reset(struct mt76_dev *dev)
202 {
203 	struct mt76_mmio *mmio = &dev->mmio;
204 
205 	if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
206 		return;
207 
208 	complete(&mmio->wed_reset);
209 
210 	if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
211 		dev_err(dev->dev, "wed reset complete timeout\n");
212 }
213 EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);
214