xref: /linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 
20 #include "mt76.h"
21 #include "mt76x02_dma.h"
22 #include "mt76x02_mcu.h"
23 #include "mt76x02_usb.h"
24 
25 #define MT_CMD_HDR_LEN			4
26 
27 #define MT_FCE_DMA_ADDR			0x0230
28 #define MT_FCE_DMA_LEN			0x0234
29 
30 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
31 
32 static struct sk_buff *
33 mt76x02u_mcu_msg_alloc(const void *data, int len)
34 {
35 	struct sk_buff *skb;
36 
37 	skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
38 	if (!skb)
39 		return NULL;
40 
41 	skb_reserve(skb, MT_CMD_HDR_LEN);
42 	skb_put_data(skb, data, len);
43 
44 	return skb;
45 }
46 
47 static void
48 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
49 {
50 	struct mt76_usb *usb = &dev->usb;
51 	u32 reg, val;
52 	int i;
53 
54 	if (usb->mcu.burst) {
55 		WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
56 
57 		reg = usb->mcu.rp[0].reg - usb->mcu.base;
58 		for (i = 0; i < usb->mcu.rp_len; i++) {
59 			val = get_unaligned_le32(data + 4 * i);
60 			usb->mcu.rp[i].reg = reg++;
61 			usb->mcu.rp[i].value = val;
62 		}
63 	} else {
64 		WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
65 
66 		for (i = 0; i < usb->mcu.rp_len; i++) {
67 			reg = get_unaligned_le32(data + 8 * i) -
68 			      usb->mcu.base;
69 			val = get_unaligned_le32(data + 8 * i + 4);
70 
71 			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
72 			usb->mcu.rp[i].value = val;
73 		}
74 	}
75 }
76 
77 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
78 {
79 	struct mt76_usb *usb = &dev->usb;
80 	struct mt76u_buf *buf = &usb->mcu.res;
81 	struct urb *urb = buf->urb;
82 	int i, ret;
83 	u32 rxfce;
84 	u8 *data;
85 
86 	for (i = 0; i < 5; i++) {
87 		if (!wait_for_completion_timeout(&usb->mcu.cmpl,
88 						 msecs_to_jiffies(300)))
89 			continue;
90 
91 		if (urb->status)
92 			return -EIO;
93 
94 		data = sg_virt(&urb->sg[0]);
95 		if (usb->mcu.rp)
96 			mt76x02u_multiple_mcu_reads(dev, data + 4,
97 						    urb->actual_length - 8);
98 
99 		rxfce = get_unaligned_le32(data);
100 		ret = mt76u_submit_buf(dev, USB_DIR_IN,
101 				       MT_EP_IN_CMD_RESP,
102 				       buf, GFP_KERNEL,
103 				       mt76u_mcu_complete_urb,
104 				       &usb->mcu.cmpl);
105 		if (ret)
106 			return ret;
107 
108 		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
109 		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
110 			return 0;
111 
112 		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
113 			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
114 			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
115 	}
116 
117 	dev_err(dev->dev, "error: %s timed out\n", __func__);
118 	return -ETIMEDOUT;
119 }
120 
121 static int
122 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
123 			int cmd, bool wait_resp)
124 {
125 	struct usb_interface *intf = to_usb_interface(dev->dev);
126 	struct usb_device *udev = interface_to_usbdev(intf);
127 	struct mt76_usb *usb = &dev->usb;
128 	unsigned int pipe;
129 	int ret, sent;
130 	u8 seq = 0;
131 	u32 info;
132 
133 	if (!skb)
134 		return -EINVAL;
135 
136 	if (test_bit(MT76_REMOVED, &dev->state))
137 		return 0;
138 
139 	pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
140 	if (wait_resp) {
141 		seq = ++usb->mcu.msg_seq & 0xf;
142 		if (!seq)
143 			seq = ++usb->mcu.msg_seq & 0xf;
144 	}
145 
146 	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
147 	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
148 	       MT_MCU_MSG_TYPE_CMD;
149 	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
150 	if (ret)
151 		return ret;
152 
153 	ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
154 	if (ret)
155 		return ret;
156 
157 	if (wait_resp)
158 		ret = mt76x02u_mcu_wait_resp(dev, seq);
159 
160 	consume_skb(skb);
161 
162 	return ret;
163 }
164 
165 static int
166 mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
167 		      int cmd, bool wait_resp)
168 {
169 	struct mt76_usb *usb = &dev->usb;
170 	int err;
171 
172 	mutex_lock(&usb->mcu.mutex);
173 	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
174 	mutex_unlock(&usb->mcu.mutex);
175 
176 	return err;
177 }
178 
179 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
180 {
181 	put_unaligned_le32(val, skb_put(skb, 4));
182 }
183 
184 static int
185 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
186 		   const struct mt76_reg_pair *data, int n)
187 {
188 	const int CMD_RANDOM_WRITE = 12;
189 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
190 	struct sk_buff *skb;
191 	int cnt, i, ret;
192 
193 	if (!n)
194 		return 0;
195 
196 	cnt = min(max_vals_per_cmd, n);
197 
198 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
199 	if (!skb)
200 		return -ENOMEM;
201 	skb_reserve(skb, MT_DMA_HDR_LEN);
202 
203 	for (i = 0; i < cnt; i++) {
204 		skb_put_le32(skb, base + data[i].reg);
205 		skb_put_le32(skb, data[i].value);
206 	}
207 
208 	ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
209 	if (ret)
210 		return ret;
211 
212 	return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
213 }
214 
215 static int
216 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
217 		   struct mt76_reg_pair *data, int n)
218 {
219 	const int CMD_RANDOM_READ = 10;
220 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
221 	struct mt76_usb *usb = &dev->usb;
222 	struct sk_buff *skb;
223 	int cnt, i, ret;
224 
225 	if (!n)
226 		return 0;
227 
228 	cnt = min(max_vals_per_cmd, n);
229 	if (cnt != n)
230 		return -EINVAL;
231 
232 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
233 	if (!skb)
234 		return -ENOMEM;
235 	skb_reserve(skb, MT_DMA_HDR_LEN);
236 
237 	for (i = 0; i < cnt; i++) {
238 		skb_put_le32(skb, base + data[i].reg);
239 		skb_put_le32(skb, data[i].value);
240 	}
241 
242 	mutex_lock(&usb->mcu.mutex);
243 
244 	usb->mcu.rp = data;
245 	usb->mcu.rp_len = n;
246 	usb->mcu.base = base;
247 	usb->mcu.burst = false;
248 
249 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
250 
251 	usb->mcu.rp = NULL;
252 
253 	mutex_unlock(&usb->mcu.mutex);
254 
255 	return ret;
256 }
257 
258 void mt76x02u_mcu_fw_reset(struct mt76_dev *dev)
259 {
260 	mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
261 			     USB_DIR_OUT | USB_TYPE_VENDOR,
262 			     0x1, 0, NULL, 0);
263 }
264 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
265 
266 static int
267 __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
268 			    const void *fw_data, int len, u32 dst_addr)
269 {
270 	u8 *data = sg_virt(&buf->urb->sg[0]);
271 	DECLARE_COMPLETION_ONSTACK(cmpl);
272 	__le32 info;
273 	u32 val;
274 	int err;
275 
276 	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
277 			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
278 			   MT_MCU_MSG_TYPE_CMD);
279 
280 	memcpy(data, &info, sizeof(info));
281 	memcpy(data + sizeof(info), fw_data, len);
282 	memset(data + sizeof(info) + len, 0, 4);
283 
284 	mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
285 			MT_FCE_DMA_ADDR, dst_addr);
286 	len = roundup(len, 4);
287 	mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
288 			MT_FCE_DMA_LEN, len << 16);
289 
290 	buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
291 	err = mt76u_submit_buf(dev, USB_DIR_OUT,
292 			       MT_EP_OUT_INBAND_CMD,
293 			       buf, GFP_KERNEL,
294 			       mt76u_mcu_complete_urb, &cmpl);
295 	if (err < 0)
296 		return err;
297 
298 	if (!wait_for_completion_timeout(&cmpl,
299 					 msecs_to_jiffies(1000))) {
300 		dev_err(dev->dev, "firmware upload timed out\n");
301 		usb_kill_urb(buf->urb);
302 		return -ETIMEDOUT;
303 	}
304 
305 	if (mt76u_urb_error(buf->urb)) {
306 		dev_err(dev->dev, "firmware upload failed: %d\n",
307 			buf->urb->status);
308 		return buf->urb->status;
309 	}
310 
311 	val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
312 	val++;
313 	mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
314 
315 	return 0;
316 }
317 
318 int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
319 			      int data_len, u32 max_payload, u32 offset)
320 {
321 	int err, len, pos = 0, max_len = max_payload - 8;
322 	struct mt76u_buf buf;
323 
324 	err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
325 			      GFP_KERNEL);
326 	if (err < 0)
327 		return err;
328 
329 	while (data_len > 0) {
330 		len = min_t(int, data_len, max_len);
331 		err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
332 						  len, offset + pos);
333 		if (err < 0)
334 			break;
335 
336 		data_len -= len;
337 		pos += len;
338 		usleep_range(5000, 10000);
339 	}
340 	mt76u_buf_free(&buf);
341 
342 	return err;
343 }
344 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
345 
346 void mt76x02u_init_mcu(struct mt76_dev *dev)
347 {
348 	static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
349 		.mcu_msg_alloc = mt76x02u_mcu_msg_alloc,
350 		.mcu_send_msg = mt76x02u_mcu_send_msg,
351 		.mcu_wr_rp = mt76x02u_mcu_wr_rp,
352 		.mcu_rd_rp = mt76x02u_mcu_rd_rp,
353 	};
354 
355 	dev->mcu_ops = &mt76x02u_mcu_ops;
356 }
357 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
358 
359 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
360 MODULE_LICENSE("Dual BSD/GPL");
361