xref: /linux/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3  *
4  * Author: Ryder Lee <ryder.lee@mediatek.com>
5  *         Roy Luo <royluo@google.com>
6  *         Felix Fietkau <nbd@nbd.name>
7  *         Lorenzo Bianconi <lorenzo@kernel.org>
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/timekeeping.h>
12 
13 #include "mt7615.h"
14 #include "../dma.h"
15 #include "mac.h"
16 
17 static void
mt7615_write_fw_txp(struct mt7615_dev * dev,struct mt76_tx_info * tx_info,void * txp_ptr,u32 id)18 mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
19 		    void *txp_ptr, u32 id)
20 {
21 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
22 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
23 	struct ieee80211_key_conf *key = info->control.hw_key;
24 	struct ieee80211_vif *vif = info->control.vif;
25 	struct mt76_connac_fw_txp *txp = txp_ptr;
26 	u8 *rept_wds_wcid = (u8 *)&txp->rept_wds_wcid;
27 	int nbuf = tx_info->nbuf - 1;
28 	int i;
29 
30 	for (i = 0; i < nbuf; i++) {
31 		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
32 		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
33 	}
34 	txp->nbuf = nbuf;
35 
36 	/* pass partial skb header to fw */
37 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
38 	tx_info->buf[1].len = MT_CT_PARSE_LEN;
39 	tx_info->buf[1].skip_unmap = true;
40 	tx_info->nbuf = MT_CT_DMA_BUF_NUM;
41 
42 	txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
43 
44 	if (!key)
45 		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
46 
47 	if (ieee80211_is_mgmt(hdr->frame_control))
48 		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
49 
50 	if (vif) {
51 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
52 
53 		txp->bss_idx = mvif->idx;
54 	}
55 
56 	txp->token = cpu_to_le16(id);
57 	*rept_wds_wcid = 0xff;
58 }
59 
mt7615_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)60 int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
61 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
62 			  struct ieee80211_sta *sta,
63 			  struct mt76_tx_info *tx_info)
64 {
65 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
66 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
67 	struct ieee80211_key_conf *key = info->control.hw_key;
68 	int pid, id;
69 	u8 *txwi = (u8 *)txwi_ptr;
70 	struct mt76_txwi_cache *t;
71 	struct mt7615_sta *msta;
72 	void *txp;
73 
74 	msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
75 	if (!wcid)
76 		wcid = &dev->mt76.global_wcid;
77 
78 	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
79 		struct mt7615_phy *phy = &dev->phy;
80 		u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
81 
82 		if (phy_idx && mdev->phys[MT_BAND1])
83 			phy = mdev->phys[MT_BAND1]->priv;
84 
85 		spin_lock_bh(&dev->mt76.lock);
86 		mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
87 				     msta->rates);
88 		spin_unlock_bh(&dev->mt76.lock);
89 	}
90 
91 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
92 	t->skb = tx_info->skb;
93 
94 	id = mt76_token_get(mdev, &t);
95 	if (id < 0)
96 		return id;
97 
98 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
99 	mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
100 			      pid, key, qid, false);
101 
102 	txp = txwi + MT_TXD_SIZE;
103 	memset(txp, 0, sizeof(struct mt76_connac_txp_common));
104 	if (is_mt7615(&dev->mt76))
105 		mt7615_write_fw_txp(dev, tx_info, txp, id);
106 	else
107 		mt76_connac_write_hw_txp(mdev, tx_info, txp, id);
108 
109 	tx_info->skb = NULL;
110 
111 	return 0;
112 }
113 
mt7615_dma_reset(struct mt7615_dev * dev)114 void mt7615_dma_reset(struct mt7615_dev *dev)
115 {
116 	int i;
117 
118 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
119 		   MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
120 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
121 
122 	usleep_range(1000, 2000);
123 
124 	for (i = 0; i < __MT_TXQ_MAX; i++)
125 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
126 
127 	for (i = 0; i < __MT_MCUQ_MAX; i++)
128 		mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
129 
130 	mt76_for_each_q_rx(&dev->mt76, i)
131 		mt76_queue_rx_reset(dev, i);
132 
133 	mt76_tx_status_check(&dev->mt76, true);
134 
135 	mt7615_dma_start(dev);
136 }
137 EXPORT_SYMBOL_GPL(mt7615_dma_reset);
138 
139 static void
mt7615_hif_int_event_trigger(struct mt7615_dev * dev,u8 event)140 mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event)
141 {
142 	u32 reg = MT_MCU_INT_EVENT;
143 
144 	if (is_mt7663(&dev->mt76))
145 		reg = MT7663_MCU_INT_EVENT;
146 
147 	mt76_wr(dev, reg, event);
148 
149 	mt7622_trigger_hif_int(dev, true);
150 	mt7622_trigger_hif_int(dev, false);
151 }
152 
153 static bool
mt7615_wait_reset_state(struct mt7615_dev * dev,u32 state)154 mt7615_wait_reset_state(struct mt7615_dev *dev, u32 state)
155 {
156 	bool ret;
157 
158 	ret = wait_event_timeout(dev->reset_wait,
159 				 (READ_ONCE(dev->reset_state) & state),
160 				 MT7615_RESET_TIMEOUT);
161 	WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
162 	return ret;
163 }
164 
165 static void
mt7615_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)166 mt7615_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
167 {
168 	struct ieee80211_hw *hw = priv;
169 	struct mt7615_dev *dev = mt7615_hw_dev(hw);
170 
171 	switch (vif->type) {
172 	case NL80211_IFTYPE_MESH_POINT:
173 	case NL80211_IFTYPE_ADHOC:
174 	case NL80211_IFTYPE_AP:
175 		mt7615_mcu_add_beacon(dev, hw, vif,
176 				      vif->bss_conf.enable_beacon);
177 		break;
178 	default:
179 		break;
180 	}
181 }
182 
183 static void
mt7615_update_beacons(struct mt7615_dev * dev)184 mt7615_update_beacons(struct mt7615_dev *dev)
185 {
186 	struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
187 
188 	ieee80211_iterate_active_interfaces(dev->mt76.hw,
189 		IEEE80211_IFACE_ITER_RESUME_ALL,
190 		mt7615_update_vif_beacon, dev->mt76.hw);
191 
192 	if (!mphy_ext)
193 		return;
194 
195 	ieee80211_iterate_active_interfaces(mphy_ext->hw,
196 		IEEE80211_IFACE_ITER_RESUME_ALL,
197 		mt7615_update_vif_beacon, mphy_ext->hw);
198 }
199 
mt7615_mac_reset_work(struct work_struct * work)200 void mt7615_mac_reset_work(struct work_struct *work)
201 {
202 	struct mt7615_phy *phy2;
203 	struct mt76_phy *ext_phy;
204 	struct mt7615_dev *dev;
205 	unsigned long timeout;
206 	int i;
207 
208 	dev = container_of(work, struct mt7615_dev, reset_work);
209 	ext_phy = dev->mt76.phys[MT_BAND1];
210 	phy2 = ext_phy ? ext_phy->priv : NULL;
211 
212 	if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_PDMA))
213 		return;
214 
215 	ieee80211_stop_queues(mt76_hw(dev));
216 	if (ext_phy)
217 		ieee80211_stop_queues(ext_phy->hw);
218 
219 	set_bit(MT76_RESET, &dev->mphy.state);
220 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
221 	wake_up(&dev->mt76.mcu.wait);
222 	cancel_delayed_work_sync(&dev->mphy.mac_work);
223 	del_timer_sync(&dev->phy.roc_timer);
224 	cancel_work_sync(&dev->phy.roc_work);
225 	if (phy2) {
226 		set_bit(MT76_RESET, &phy2->mt76->state);
227 		cancel_delayed_work_sync(&phy2->mt76->mac_work);
228 		del_timer_sync(&phy2->roc_timer);
229 		cancel_work_sync(&phy2->roc_work);
230 	}
231 
232 	/* lock/unlock all queues to ensure that no tx is pending */
233 	mt76_txq_schedule_all(&dev->mphy);
234 	if (ext_phy)
235 		mt76_txq_schedule_all(ext_phy);
236 
237 	mt76_worker_disable(&dev->mt76.tx_worker);
238 	mt76_for_each_q_rx(&dev->mt76, i)
239 		napi_disable(&dev->mt76.napi[i]);
240 	napi_disable(&dev->mt76.tx_napi);
241 
242 	mt7615_mutex_acquire(dev);
243 
244 	mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED);
245 
246 	if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
247 		mt7615_dma_reset(dev);
248 
249 		mt7615_tx_token_put(dev);
250 		idr_init(&dev->mt76.token);
251 
252 		mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
253 
254 		mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT);
255 		mt7615_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
256 	}
257 
258 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
259 	clear_bit(MT76_RESET, &dev->mphy.state);
260 	if (phy2)
261 		clear_bit(MT76_RESET, &phy2->mt76->state);
262 
263 	mt76_worker_enable(&dev->mt76.tx_worker);
264 
265 	local_bh_disable();
266 	napi_enable(&dev->mt76.tx_napi);
267 	napi_schedule(&dev->mt76.tx_napi);
268 
269 	mt76_for_each_q_rx(&dev->mt76, i) {
270 		napi_enable(&dev->mt76.napi[i]);
271 		napi_schedule(&dev->mt76.napi[i]);
272 	}
273 	local_bh_enable();
274 
275 	ieee80211_wake_queues(mt76_hw(dev));
276 	if (ext_phy)
277 		ieee80211_wake_queues(ext_phy->hw);
278 
279 	mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_RESET_DONE);
280 	mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
281 
282 	mt7615_update_beacons(dev);
283 
284 	mt7615_mutex_release(dev);
285 
286 	timeout = mt7615_get_macwork_timeout(dev);
287 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
288 				     timeout);
289 	if (phy2)
290 		ieee80211_queue_delayed_work(ext_phy->hw,
291 					     &phy2->mt76->mac_work, timeout);
292 
293 }
294