xref: /freebsd/sys/contrib/dev/mediatek/mt76/usb.c (revision 853e0440c97a4a1797edd6d653fba4398dc885e8)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  */
5 
6 #if defined(__FreeBSD__)
7 #define	LINUXKPI_PARAM_PREFIX	mt76_usb_
8 #endif
9 
10 #include <linux/module.h>
11 #include "mt76.h"
12 #include "usb_trace.h"
13 #include "dma.h"
14 
15 #define MT_VEND_REQ_MAX_RETRY	10
16 #define MT_VEND_REQ_TOUT_MS	300
17 
18 static bool disable_usb_sg;
19 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
20 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
21 
__mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)22 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
23 			   u16 val, u16 offset, void *buf, size_t len)
24 {
25 	struct usb_interface *uintf = to_usb_interface(dev->dev);
26 	struct usb_device *udev = interface_to_usbdev(uintf);
27 	unsigned int pipe;
28 	int i, ret;
29 
30 	lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
31 
32 	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
33 				       : usb_sndctrlpipe(udev, 0);
34 	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
35 		if (test_bit(MT76_REMOVED, &dev->phy.state))
36 			return -EIO;
37 
38 		ret = usb_control_msg(udev, pipe, req, req_type, val,
39 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
40 		if (ret == -ENODEV || ret == -EPROTO)
41 			set_bit(MT76_REMOVED, &dev->phy.state);
42 		if (ret >= 0 || ret == -ENODEV || ret == -EPROTO)
43 			return ret;
44 		usleep_range(5000, 10000);
45 	}
46 
47 	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
48 		req, offset, ret);
49 	return ret;
50 }
51 EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
52 
mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)53 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
54 			 u8 req_type, u16 val, u16 offset,
55 			 void *buf, size_t len)
56 {
57 	int ret;
58 
59 	mutex_lock(&dev->usb.usb_ctrl_mtx);
60 	ret = __mt76u_vendor_request(dev, req, req_type,
61 				     val, offset, buf, len);
62 	trace_usb_reg_wr(dev, offset, val);
63 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
64 
65 	return ret;
66 }
67 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
68 
___mt76u_rr(struct mt76_dev * dev,u8 req,u8 req_type,u32 addr)69 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
70 {
71 	struct mt76_usb *usb = &dev->usb;
72 	u32 data = ~0;
73 	int ret;
74 
75 	ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
76 				     addr, usb->data, sizeof(__le32));
77 	if (ret == sizeof(__le32))
78 		data = get_unaligned_le32(usb->data);
79 	trace_usb_reg_rr(dev, addr, data);
80 
81 	return data;
82 }
83 EXPORT_SYMBOL_GPL(___mt76u_rr);
84 
__mt76u_rr(struct mt76_dev * dev,u32 addr)85 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
86 {
87 	u8 req;
88 
89 	switch (addr & MT_VEND_TYPE_MASK) {
90 	case MT_VEND_TYPE_EEPROM:
91 		req = MT_VEND_READ_EEPROM;
92 		break;
93 	case MT_VEND_TYPE_CFG:
94 		req = MT_VEND_READ_CFG;
95 		break;
96 	default:
97 		req = MT_VEND_MULTI_READ;
98 		break;
99 	}
100 
101 	return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
102 			   addr & ~MT_VEND_TYPE_MASK);
103 }
104 
mt76u_rr(struct mt76_dev * dev,u32 addr)105 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
106 {
107 	u32 ret;
108 
109 	mutex_lock(&dev->usb.usb_ctrl_mtx);
110 	ret = __mt76u_rr(dev, addr);
111 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
112 
113 	return ret;
114 }
115 
___mt76u_wr(struct mt76_dev * dev,u8 req,u8 req_type,u32 addr,u32 val)116 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
117 		 u32 addr, u32 val)
118 {
119 	struct mt76_usb *usb = &dev->usb;
120 
121 	put_unaligned_le32(val, usb->data);
122 	__mt76u_vendor_request(dev, req, req_type, addr >> 16,
123 			       addr, usb->data, sizeof(__le32));
124 	trace_usb_reg_wr(dev, addr, val);
125 }
126 EXPORT_SYMBOL_GPL(___mt76u_wr);
127 
__mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)128 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
129 {
130 	u8 req;
131 
132 	switch (addr & MT_VEND_TYPE_MASK) {
133 	case MT_VEND_TYPE_CFG:
134 		req = MT_VEND_WRITE_CFG;
135 		break;
136 	default:
137 		req = MT_VEND_MULTI_WRITE;
138 		break;
139 	}
140 	___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
141 		    addr & ~MT_VEND_TYPE_MASK, val);
142 }
143 
mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)144 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
145 {
146 	mutex_lock(&dev->usb.usb_ctrl_mtx);
147 	__mt76u_wr(dev, addr, val);
148 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
149 }
150 
mt76u_rmw(struct mt76_dev * dev,u32 addr,u32 mask,u32 val)151 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
152 		     u32 mask, u32 val)
153 {
154 	mutex_lock(&dev->usb.usb_ctrl_mtx);
155 	val |= __mt76u_rr(dev, addr) & ~mask;
156 	__mt76u_wr(dev, addr, val);
157 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
158 
159 	return val;
160 }
161 
mt76u_copy(struct mt76_dev * dev,u32 offset,const void * data,int len)162 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
163 		       const void *data, int len)
164 {
165 	struct mt76_usb *usb = &dev->usb;
166 	const u8 *val = data;
167 	int ret;
168 	int current_batch_size;
169 	int i = 0;
170 
171 	/* Assure that always a multiple of 4 bytes are copied,
172 	 * otherwise beacons can be corrupted.
173 	 * See: "mt76: round up length on mt76_wr_copy"
174 	 * Commit 850e8f6fbd5d0003b0
175 	 */
176 	len = round_up(len, 4);
177 
178 	mutex_lock(&usb->usb_ctrl_mtx);
179 	while (i < len) {
180 		current_batch_size = min_t(int, usb->data_len, len - i);
181 		memcpy(usb->data, val + i, current_batch_size);
182 		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
183 					     USB_DIR_OUT | USB_TYPE_VENDOR,
184 					     0, offset + i, usb->data,
185 					     current_batch_size);
186 		if (ret < 0)
187 			break;
188 
189 		i += current_batch_size;
190 	}
191 	mutex_unlock(&usb->usb_ctrl_mtx);
192 }
193 
mt76u_read_copy(struct mt76_dev * dev,u32 offset,void * data,int len)194 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
195 		     void *data, int len)
196 {
197 	struct mt76_usb *usb = &dev->usb;
198 	int i = 0, batch_len, ret;
199 	u8 *val = data;
200 
201 	len = round_up(len, 4);
202 	mutex_lock(&usb->usb_ctrl_mtx);
203 	while (i < len) {
204 		batch_len = min_t(int, usb->data_len, len - i);
205 		ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
206 					     USB_DIR_IN | USB_TYPE_VENDOR,
207 					     (offset + i) >> 16, offset + i,
208 					     usb->data, batch_len);
209 		if (ret < 0)
210 			break;
211 
212 		memcpy(val + i, usb->data, batch_len);
213 		i += batch_len;
214 	}
215 	mutex_unlock(&usb->usb_ctrl_mtx);
216 }
217 EXPORT_SYMBOL_GPL(mt76u_read_copy);
218 
mt76u_single_wr(struct mt76_dev * dev,const u8 req,const u16 offset,const u32 val)219 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
220 		     const u16 offset, const u32 val)
221 {
222 	mutex_lock(&dev->usb.usb_ctrl_mtx);
223 	__mt76u_vendor_request(dev, req,
224 			       USB_DIR_OUT | USB_TYPE_VENDOR,
225 			       val & 0xffff, offset, NULL, 0);
226 	__mt76u_vendor_request(dev, req,
227 			       USB_DIR_OUT | USB_TYPE_VENDOR,
228 			       val >> 16, offset + 2, NULL, 0);
229 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
230 }
231 EXPORT_SYMBOL_GPL(mt76u_single_wr);
232 
233 static int
mt76u_req_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int len)234 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
235 		const struct mt76_reg_pair *data, int len)
236 {
237 	struct mt76_usb *usb = &dev->usb;
238 
239 	mutex_lock(&usb->usb_ctrl_mtx);
240 	while (len > 0) {
241 		__mt76u_wr(dev, base + data->reg, data->value);
242 		len--;
243 		data++;
244 	}
245 	mutex_unlock(&usb->usb_ctrl_mtx);
246 
247 	return 0;
248 }
249 
250 static int
mt76u_wr_rp(struct mt76_dev * dev,u32 base,const struct mt76_reg_pair * data,int n)251 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
252 	    const struct mt76_reg_pair *data, int n)
253 {
254 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
255 		return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
256 	else
257 		return mt76u_req_wr_rp(dev, base, data, n);
258 }
259 
260 static int
mt76u_req_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int len)261 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
262 		int len)
263 {
264 	struct mt76_usb *usb = &dev->usb;
265 
266 	mutex_lock(&usb->usb_ctrl_mtx);
267 	while (len > 0) {
268 		data->value = __mt76u_rr(dev, base + data->reg);
269 		len--;
270 		data++;
271 	}
272 	mutex_unlock(&usb->usb_ctrl_mtx);
273 
274 	return 0;
275 }
276 
277 static int
mt76u_rd_rp(struct mt76_dev * dev,u32 base,struct mt76_reg_pair * data,int n)278 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
279 	    struct mt76_reg_pair *data, int n)
280 {
281 	if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
282 		return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
283 	else
284 		return mt76u_req_rd_rp(dev, base, data, n);
285 }
286 
mt76u_check_sg(struct mt76_dev * dev)287 static bool mt76u_check_sg(struct mt76_dev *dev)
288 {
289 	struct usb_interface *uintf = to_usb_interface(dev->dev);
290 	struct usb_device *udev = interface_to_usbdev(uintf);
291 
292 	return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
293 		udev->bus->no_sg_constraint);
294 }
295 
296 static int
mt76u_set_endpoints(struct usb_interface * intf,struct mt76_usb * usb)297 mt76u_set_endpoints(struct usb_interface *intf,
298 		    struct mt76_usb *usb)
299 {
300 	struct usb_host_interface *intf_desc = intf->cur_altsetting;
301 	struct usb_endpoint_descriptor *ep_desc;
302 	int i, in_ep = 0, out_ep = 0;
303 
304 	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
305 		ep_desc = &intf_desc->endpoint[i].desc;
306 
307 		if (usb_endpoint_is_bulk_in(ep_desc) &&
308 		    in_ep < __MT_EP_IN_MAX) {
309 			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
310 			in_ep++;
311 		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
312 			   out_ep < __MT_EP_OUT_MAX) {
313 			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
314 			out_ep++;
315 		}
316 	}
317 
318 	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
319 		return -EINVAL;
320 	return 0;
321 }
322 
323 static int
mt76u_fill_rx_sg(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs)324 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
325 		 int nsgs)
326 {
327 	int i;
328 
329 	for (i = 0; i < nsgs; i++) {
330 		void *data;
331 		int offset;
332 
333 		data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
334 		if (!data)
335 			break;
336 
337 		sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
338 			    offset);
339 	}
340 
341 	if (i < nsgs) {
342 		int j;
343 
344 		for (j = nsgs; j < urb->num_sgs; j++)
345 			mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
346 		urb->num_sgs = i;
347 	}
348 
349 	urb->num_sgs = max_t(int, i, urb->num_sgs);
350 	urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
351 	sg_init_marker(urb->sg, urb->num_sgs);
352 
353 	return i ? : -ENOMEM;
354 }
355 
356 static int
mt76u_refill_rx(struct mt76_dev * dev,struct mt76_queue * q,struct urb * urb,int nsgs)357 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
358 		struct urb *urb, int nsgs)
359 {
360 	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
361 	int offset;
362 
363 	if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
364 		return mt76u_fill_rx_sg(dev, q, urb, nsgs);
365 
366 	urb->transfer_buffer_length = q->buf_size;
367 	urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
368 
369 	return urb->transfer_buffer ? 0 : -ENOMEM;
370 }
371 
372 static int
mt76u_urb_alloc(struct mt76_dev * dev,struct mt76_queue_entry * e,int sg_max_size)373 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
374 		int sg_max_size)
375 {
376 	unsigned int size = sizeof(struct urb);
377 
378 	if (dev->usb.sg_en)
379 		size += sg_max_size * sizeof(struct scatterlist);
380 
381 	e->urb = kzalloc(size, GFP_KERNEL);
382 	if (!e->urb)
383 		return -ENOMEM;
384 
385 	usb_init_urb(e->urb);
386 
387 	if (dev->usb.sg_en && sg_max_size > 0)
388 		e->urb->sg = (struct scatterlist *)(e->urb + 1);
389 
390 	return 0;
391 }
392 
393 static int
mt76u_rx_urb_alloc(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_entry * e)394 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
395 		   struct mt76_queue_entry *e)
396 {
397 	enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
398 	int err, sg_size;
399 
400 	sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
401 	err = mt76u_urb_alloc(dev, e, sg_size);
402 	if (err)
403 		return err;
404 
405 	return mt76u_refill_rx(dev, q, e->urb, sg_size);
406 }
407 
mt76u_urb_free(struct urb * urb)408 static void mt76u_urb_free(struct urb *urb)
409 {
410 	int i;
411 
412 	for (i = 0; i < urb->num_sgs; i++)
413 		mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
414 
415 	if (urb->transfer_buffer)
416 		mt76_put_page_pool_buf(urb->transfer_buffer, false);
417 
418 	usb_free_urb(urb);
419 }
420 
421 static void
mt76u_fill_bulk_urb(struct mt76_dev * dev,int dir,int index,struct urb * urb,usb_complete_t complete_fn,void * context)422 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
423 		    struct urb *urb, usb_complete_t complete_fn,
424 		    void *context)
425 {
426 	struct usb_interface *uintf = to_usb_interface(dev->dev);
427 	struct usb_device *udev = interface_to_usbdev(uintf);
428 	unsigned int pipe;
429 
430 	if (dir == USB_DIR_IN)
431 		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
432 	else
433 		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
434 
435 	urb->dev = udev;
436 	urb->pipe = pipe;
437 	urb->complete = complete_fn;
438 	urb->context = context;
439 }
440 
441 static struct urb *
mt76u_get_next_rx_entry(struct mt76_queue * q)442 mt76u_get_next_rx_entry(struct mt76_queue *q)
443 {
444 	struct urb *urb = NULL;
445 	unsigned long flags;
446 
447 	spin_lock_irqsave(&q->lock, flags);
448 	if (q->queued > 0) {
449 		urb = q->entry[q->tail].urb;
450 		q->tail = (q->tail + 1) % q->ndesc;
451 		q->queued--;
452 	}
453 	spin_unlock_irqrestore(&q->lock, flags);
454 
455 	return urb;
456 }
457 
458 static int
mt76u_get_rx_entry_len(struct mt76_dev * dev,u8 * data,u32 data_len)459 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
460 		       u32 data_len)
461 {
462 	u16 dma_len, min_len;
463 
464 	dma_len = get_unaligned_le16(data);
465 	if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
466 		return dma_len;
467 
468 	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
469 	if (data_len < min_len || !dma_len ||
470 	    dma_len + MT_DMA_HDR_LEN > data_len ||
471 	    (dma_len & 0x3))
472 		return -EINVAL;
473 	return dma_len;
474 }
475 
476 static struct sk_buff *
mt76u_build_rx_skb(struct mt76_dev * dev,void * data,int len,int buf_size)477 mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
478 		   int len, int buf_size)
479 {
480 	int head_room, drv_flags = dev->drv->drv_flags;
481 	struct sk_buff *skb;
482 
483 	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
484 	if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
485 		struct page *page;
486 
487 		/* slow path, not enough space for data and
488 		 * skb_shared_info
489 		 */
490 		skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
491 		if (!skb)
492 			return NULL;
493 
494 		skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
495 		data += head_room + MT_SKB_HEAD_LEN;
496 		page = virt_to_head_page(data);
497 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
498 				page, data - page_address(page),
499 				len - MT_SKB_HEAD_LEN, buf_size);
500 
501 		return skb;
502 	}
503 
504 	/* fast path */
505 	skb = build_skb(data, buf_size);
506 	if (!skb)
507 		return NULL;
508 
509 	skb_reserve(skb, head_room);
510 	__skb_put(skb, len);
511 
512 	return skb;
513 }
514 
515 static int
mt76u_process_rx_entry(struct mt76_dev * dev,struct urb * urb,int buf_size)516 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
517 		       int buf_size)
518 {
519 	u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
520 	int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
521 	int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
522 	struct sk_buff *skb;
523 
524 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
525 		return 0;
526 
527 	len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
528 	if (len < 0)
529 		return 0;
530 
531 	head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
532 	data_len = min_t(int, len, data_len - head_room);
533 
534 	if (len == data_len &&
535 	    dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
536 		return 0;
537 
538 	skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
539 	if (!skb)
540 		return 0;
541 
542 	len -= data_len;
543 	while (len > 0 && nsgs < urb->num_sgs) {
544 		data_len = min_t(int, len, urb->sg[nsgs].length);
545 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
546 				sg_page(&urb->sg[nsgs]),
547 				urb->sg[nsgs].offset, data_len,
548 				buf_size);
549 		len -= data_len;
550 		nsgs++;
551 	}
552 
553 	skb_mark_for_recycle(skb);
554 	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
555 
556 	return nsgs;
557 }
558 
mt76u_complete_rx(struct urb * urb)559 static void mt76u_complete_rx(struct urb *urb)
560 {
561 	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
562 	struct mt76_queue *q = urb->context;
563 	unsigned long flags;
564 
565 	trace_rx_urb(dev, urb);
566 
567 	switch (urb->status) {
568 	case -ECONNRESET:
569 	case -ESHUTDOWN:
570 	case -ENOENT:
571 	case -EPROTO:
572 		return;
573 	default:
574 		dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
575 				    urb->status);
576 		fallthrough;
577 	case 0:
578 		break;
579 	}
580 
581 	spin_lock_irqsave(&q->lock, flags);
582 	if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
583 		goto out;
584 
585 	q->head = (q->head + 1) % q->ndesc;
586 	q->queued++;
587 	mt76_worker_schedule(&dev->usb.rx_worker);
588 out:
589 	spin_unlock_irqrestore(&q->lock, flags);
590 }
591 
592 static int
mt76u_submit_rx_buf(struct mt76_dev * dev,enum mt76_rxq_id qid,struct urb * urb)593 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
594 		    struct urb *urb)
595 {
596 	int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
597 
598 	mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
599 			    mt76u_complete_rx, &dev->q_rx[qid]);
600 	trace_submit_urb(dev, urb);
601 
602 	return usb_submit_urb(urb, GFP_ATOMIC);
603 }
604 
605 static void
mt76u_process_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)606 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
607 {
608 	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
609 	struct urb *urb;
610 	int err, count;
611 
612 	while (true) {
613 		urb = mt76u_get_next_rx_entry(q);
614 		if (!urb)
615 			break;
616 
617 		count = mt76u_process_rx_entry(dev, urb, q->buf_size);
618 		if (count > 0) {
619 			err = mt76u_refill_rx(dev, q, urb, count);
620 			if (err < 0)
621 				break;
622 		}
623 		mt76u_submit_rx_buf(dev, qid, urb);
624 	}
625 	if (qid == MT_RXQ_MAIN) {
626 		local_bh_disable();
627 		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
628 		local_bh_enable();
629 	}
630 }
631 
mt76u_rx_worker(struct mt76_worker * w)632 static void mt76u_rx_worker(struct mt76_worker *w)
633 {
634 	struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
635 	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
636 	int i;
637 
638 	rcu_read_lock();
639 	mt76_for_each_q_rx(dev, i)
640 		mt76u_process_rx_queue(dev, &dev->q_rx[i]);
641 	rcu_read_unlock();
642 }
643 
644 static int
mt76u_submit_rx_buffers(struct mt76_dev * dev,enum mt76_rxq_id qid)645 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
646 {
647 	struct mt76_queue *q = &dev->q_rx[qid];
648 	unsigned long flags;
649 	int i, err = 0;
650 
651 	spin_lock_irqsave(&q->lock, flags);
652 	for (i = 0; i < q->ndesc; i++) {
653 		err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
654 		if (err < 0)
655 			break;
656 	}
657 	q->head = q->tail = 0;
658 	q->queued = 0;
659 	spin_unlock_irqrestore(&q->lock, flags);
660 
661 	return err;
662 }
663 
664 static int
mt76u_alloc_rx_queue(struct mt76_dev * dev,enum mt76_rxq_id qid)665 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
666 {
667 	struct mt76_queue *q = &dev->q_rx[qid];
668 	int i, err;
669 
670 	err = mt76_create_page_pool(dev, q);
671 	if (err)
672 		return err;
673 
674 	spin_lock_init(&q->lock);
675 	q->entry = devm_kcalloc(dev->dev,
676 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
677 				GFP_KERNEL);
678 	if (!q->entry)
679 		return -ENOMEM;
680 
681 	q->ndesc = MT_NUM_RX_ENTRIES;
682 	q->buf_size = PAGE_SIZE;
683 
684 	for (i = 0; i < q->ndesc; i++) {
685 		err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
686 		if (err < 0)
687 			return err;
688 	}
689 
690 	return mt76u_submit_rx_buffers(dev, qid);
691 }
692 
mt76u_alloc_mcu_queue(struct mt76_dev * dev)693 int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
694 {
695 	return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
696 }
697 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
698 
699 static void
mt76u_free_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)700 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
701 {
702 	int i;
703 
704 	for (i = 0; i < q->ndesc; i++) {
705 		if (!q->entry[i].urb)
706 			continue;
707 
708 		mt76u_urb_free(q->entry[i].urb);
709 		q->entry[i].urb = NULL;
710 	}
711 	page_pool_destroy(q->page_pool);
712 	q->page_pool = NULL;
713 }
714 
mt76u_free_rx(struct mt76_dev * dev)715 static void mt76u_free_rx(struct mt76_dev *dev)
716 {
717 	int i;
718 
719 	mt76_worker_teardown(&dev->usb.rx_worker);
720 
721 	mt76_for_each_q_rx(dev, i)
722 		mt76u_free_rx_queue(dev, &dev->q_rx[i]);
723 }
724 
mt76u_stop_rx(struct mt76_dev * dev)725 void mt76u_stop_rx(struct mt76_dev *dev)
726 {
727 	int i;
728 
729 	mt76_worker_disable(&dev->usb.rx_worker);
730 
731 	mt76_for_each_q_rx(dev, i) {
732 		struct mt76_queue *q = &dev->q_rx[i];
733 		int j;
734 
735 		for (j = 0; j < q->ndesc; j++)
736 			usb_poison_urb(q->entry[j].urb);
737 	}
738 }
739 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
740 
mt76u_resume_rx(struct mt76_dev * dev)741 int mt76u_resume_rx(struct mt76_dev *dev)
742 {
743 	int i;
744 
745 	mt76_for_each_q_rx(dev, i) {
746 		struct mt76_queue *q = &dev->q_rx[i];
747 		int err, j;
748 
749 		for (j = 0; j < q->ndesc; j++)
750 			usb_unpoison_urb(q->entry[j].urb);
751 
752 		err = mt76u_submit_rx_buffers(dev, i);
753 		if (err < 0)
754 			return err;
755 	}
756 
757 	mt76_worker_enable(&dev->usb.rx_worker);
758 
759 	return 0;
760 }
761 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
762 
mt76u_status_worker(struct mt76_worker * w)763 static void mt76u_status_worker(struct mt76_worker *w)
764 {
765 	struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
766 	struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
767 	struct mt76_queue_entry entry;
768 	struct mt76_queue *q;
769 	int i;
770 
771 	if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
772 		return;
773 
774 	for (i = 0; i <= MT_TXQ_PSD; i++) {
775 		q = dev->phy.q_tx[i];
776 		if (!q)
777 			continue;
778 
779 		while (q->queued > 0) {
780 			if (!q->entry[q->tail].done)
781 				break;
782 
783 			entry = q->entry[q->tail];
784 			q->entry[q->tail].done = false;
785 
786 			mt76_queue_tx_complete(dev, q, &entry);
787 		}
788 
789 		if (!q->queued)
790 			wake_up(&dev->tx_wait);
791 
792 		mt76_worker_schedule(&dev->tx_worker);
793 	}
794 
795 	if (dev->drv->tx_status_data &&
796 	    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
797 		queue_work(dev->wq, &dev->usb.stat_work);
798 }
799 
mt76u_tx_status_data(struct work_struct * work)800 static void mt76u_tx_status_data(struct work_struct *work)
801 {
802 	struct mt76_usb *usb;
803 	struct mt76_dev *dev;
804 	u8 update = 1;
805 	u16 count = 0;
806 
807 	usb = container_of(work, struct mt76_usb, stat_work);
808 	dev = container_of(usb, struct mt76_dev, usb);
809 
810 	while (true) {
811 		if (test_bit(MT76_REMOVED, &dev->phy.state))
812 			break;
813 
814 		if (!dev->drv->tx_status_data(dev, &update))
815 			break;
816 		count++;
817 	}
818 
819 	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
820 		queue_work(dev->wq, &usb->stat_work);
821 	else
822 		clear_bit(MT76_READING_STATS, &dev->phy.state);
823 }
824 
mt76u_complete_tx(struct urb * urb)825 static void mt76u_complete_tx(struct urb *urb)
826 {
827 	struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
828 	struct mt76_queue_entry *e = urb->context;
829 
830 	if (mt76u_urb_error(urb))
831 		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
832 	e->done = true;
833 
834 	mt76_worker_schedule(&dev->usb.status_worker);
835 }
836 
837 static int
mt76u_tx_setup_buffers(struct mt76_dev * dev,struct sk_buff * skb,struct urb * urb)838 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
839 		       struct urb *urb)
840 {
841 	urb->transfer_buffer_length = skb->len;
842 
843 	if (!dev->usb.sg_en) {
844 		urb->transfer_buffer = skb->data;
845 		return 0;
846 	}
847 
848 	sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
849 	urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
850 	if (!urb->num_sgs)
851 		return -ENOMEM;
852 
853 	return urb->num_sgs;
854 }
855 
856 static int
mt76u_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)857 mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
858 		   enum mt76_txq_id qid, struct sk_buff *skb,
859 		   struct mt76_wcid *wcid, struct ieee80211_sta *sta)
860 {
861 	struct mt76_tx_info tx_info = {
862 		.skb = skb,
863 	};
864 	struct mt76_dev *dev = phy->dev;
865 	u16 idx = q->head;
866 	int err;
867 
868 	if (q->queued == q->ndesc)
869 		return -ENOSPC;
870 
871 	skb->prev = skb->next = NULL;
872 	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
873 	if (err < 0)
874 		return err;
875 
876 	err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
877 	if (err < 0)
878 		return err;
879 
880 	mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
881 			    mt76u_complete_tx, &q->entry[idx]);
882 
883 	q->head = (q->head + 1) % q->ndesc;
884 	q->entry[idx].skb = tx_info.skb;
885 	q->entry[idx].wcid = 0xffff;
886 	q->queued++;
887 
888 	return idx;
889 }
890 
mt76u_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)891 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
892 {
893 	struct urb *urb;
894 	int err;
895 
896 	while (q->first != q->head) {
897 		urb = q->entry[q->first].urb;
898 
899 		trace_submit_urb(dev, urb);
900 		err = usb_submit_urb(urb, GFP_ATOMIC);
901 		if (err < 0) {
902 			if (err == -ENODEV)
903 				set_bit(MT76_REMOVED, &dev->phy.state);
904 			else
905 				dev_err(dev->dev, "tx urb submit failed:%d\n",
906 					err);
907 			break;
908 		}
909 		q->first = (q->first + 1) % q->ndesc;
910 	}
911 }
912 
913 static void
mt76u_ac_to_hwq(struct mt76_dev * dev,struct mt76_queue * q,u8 qid)914 mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
915 {
916 	u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
917 
918 	switch (mt76_chip(dev)) {
919 	case 0x7663: {
920 		static const u8 lmac_queue_map[] = {
921 			/* ac to lmac mapping */
922 			[IEEE80211_AC_BK] = 0,
923 			[IEEE80211_AC_BE] = 1,
924 			[IEEE80211_AC_VI] = 2,
925 			[IEEE80211_AC_VO] = 4,
926 		};
927 
928 		q->hw_idx = lmac_queue_map[ac];
929 		q->ep = q->hw_idx + 1;
930 		break;
931 	}
932 	case 0x7961:
933 	case 0x7925:
934 		q->hw_idx = mt76_ac_to_hwq(ac);
935 		q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
936 		break;
937 	default:
938 		q->hw_idx = mt76_ac_to_hwq(ac);
939 		q->ep = q->hw_idx + 1;
940 		break;
941 	}
942 }
943 
mt76u_alloc_tx(struct mt76_dev * dev)944 static int mt76u_alloc_tx(struct mt76_dev *dev)
945 {
946 	int i;
947 
948 	for (i = 0; i <= MT_TXQ_PSD; i++) {
949 		struct mt76_queue *q;
950 		int j, err;
951 
952 		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
953 		if (!q)
954 			return -ENOMEM;
955 
956 		spin_lock_init(&q->lock);
957 		mt76u_ac_to_hwq(dev, q, i);
958 		dev->phy.q_tx[i] = q;
959 
960 		q->entry = devm_kcalloc(dev->dev,
961 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
962 					GFP_KERNEL);
963 		if (!q->entry)
964 			return -ENOMEM;
965 
966 		q->ndesc = MT_NUM_TX_ENTRIES;
967 		for (j = 0; j < q->ndesc; j++) {
968 			err = mt76u_urb_alloc(dev, &q->entry[j],
969 					      MT_TX_SG_MAX_SIZE);
970 			if (err < 0)
971 				return err;
972 		}
973 	}
974 	return 0;
975 }
976 
mt76u_free_tx(struct mt76_dev * dev)977 static void mt76u_free_tx(struct mt76_dev *dev)
978 {
979 	int i;
980 
981 	mt76_worker_teardown(&dev->usb.status_worker);
982 
983 	for (i = 0; i <= MT_TXQ_PSD; i++) {
984 		struct mt76_queue *q;
985 		int j;
986 
987 		q = dev->phy.q_tx[i];
988 		if (!q)
989 			continue;
990 
991 		for (j = 0; j < q->ndesc; j++) {
992 			usb_free_urb(q->entry[j].urb);
993 			q->entry[j].urb = NULL;
994 		}
995 	}
996 }
997 
mt76u_stop_tx(struct mt76_dev * dev)998 void mt76u_stop_tx(struct mt76_dev *dev)
999 {
1000 	int ret;
1001 
1002 	mt76_worker_disable(&dev->usb.status_worker);
1003 
1004 	ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
1005 				 HZ / 5);
1006 	if (!ret) {
1007 		struct mt76_queue_entry entry;
1008 		struct mt76_queue *q;
1009 		int i, j;
1010 
1011 		dev_err(dev->dev, "timed out waiting for pending tx\n");
1012 
1013 		for (i = 0; i <= MT_TXQ_PSD; i++) {
1014 			q = dev->phy.q_tx[i];
1015 			if (!q)
1016 				continue;
1017 
1018 			for (j = 0; j < q->ndesc; j++)
1019 				usb_kill_urb(q->entry[j].urb);
1020 		}
1021 
1022 		mt76_worker_disable(&dev->tx_worker);
1023 
1024 		/* On device removal we maight queue skb's, but mt76u_tx_kick()
1025 		 * will fail to submit urb, cleanup those skb's manually.
1026 		 */
1027 		for (i = 0; i <= MT_TXQ_PSD; i++) {
1028 			q = dev->phy.q_tx[i];
1029 			if (!q)
1030 				continue;
1031 
1032 			while (q->queued > 0) {
1033 				entry = q->entry[q->tail];
1034 				q->entry[q->tail].done = false;
1035 				mt76_queue_tx_complete(dev, q, &entry);
1036 			}
1037 		}
1038 
1039 		mt76_worker_enable(&dev->tx_worker);
1040 	}
1041 
1042 	cancel_work_sync(&dev->usb.stat_work);
1043 	clear_bit(MT76_READING_STATS, &dev->phy.state);
1044 
1045 	mt76_worker_enable(&dev->usb.status_worker);
1046 
1047 	mt76_tx_status_check(dev, true);
1048 }
1049 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1050 
mt76u_queues_deinit(struct mt76_dev * dev)1051 void mt76u_queues_deinit(struct mt76_dev *dev)
1052 {
1053 	mt76u_stop_rx(dev);
1054 	mt76u_stop_tx(dev);
1055 
1056 	mt76u_free_rx(dev);
1057 	mt76u_free_tx(dev);
1058 }
1059 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1060 
mt76u_alloc_queues(struct mt76_dev * dev)1061 int mt76u_alloc_queues(struct mt76_dev *dev)
1062 {
1063 	int err;
1064 
1065 	err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1066 	if (err < 0)
1067 		return err;
1068 
1069 	return mt76u_alloc_tx(dev);
1070 }
1071 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1072 
1073 static const struct mt76_queue_ops usb_queue_ops = {
1074 	.tx_queue_skb = mt76u_tx_queue_skb,
1075 	.kick = mt76u_tx_kick,
1076 };
1077 
__mt76u_init(struct mt76_dev * dev,struct usb_interface * intf,struct mt76_bus_ops * ops)1078 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1079 		 struct mt76_bus_ops *ops)
1080 {
1081 	struct usb_device *udev = interface_to_usbdev(intf);
1082 	struct mt76_usb *usb = &dev->usb;
1083 	int err;
1084 
1085 	INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1086 
1087 	usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
1088 	if (usb->data_len < 32)
1089 		usb->data_len = 32;
1090 
1091 	usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1092 	if (!usb->data)
1093 		return -ENOMEM;
1094 
1095 	mutex_init(&usb->usb_ctrl_mtx);
1096 	dev->bus = ops;
1097 	dev->queue_ops = &usb_queue_ops;
1098 
1099 	dev_set_drvdata(&udev->dev, dev);
1100 
1101 	usb->sg_en = mt76u_check_sg(dev);
1102 
1103 	err = mt76u_set_endpoints(intf, usb);
1104 	if (err < 0)
1105 		return err;
1106 
1107 	err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1108 				"usb-rx");
1109 	if (err)
1110 		return err;
1111 
1112 	err = mt76_worker_setup(dev->hw, &usb->status_worker,
1113 				mt76u_status_worker, "usb-status");
1114 	if (err)
1115 		return err;
1116 
1117 	sched_set_fifo_low(usb->rx_worker.task);
1118 	sched_set_fifo_low(usb->status_worker.task);
1119 
1120 	return 0;
1121 }
1122 EXPORT_SYMBOL_GPL(__mt76u_init);
1123 
mt76u_init(struct mt76_dev * dev,struct usb_interface * intf)1124 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
1125 {
1126 	static struct mt76_bus_ops bus_ops = {
1127 		.rr = mt76u_rr,
1128 		.wr = mt76u_wr,
1129 		.rmw = mt76u_rmw,
1130 		.read_copy = mt76u_read_copy,
1131 		.write_copy = mt76u_copy,
1132 		.wr_rp = mt76u_wr_rp,
1133 		.rd_rp = mt76u_rd_rp,
1134 		.type = MT76_BUS_USB,
1135 	};
1136 
1137 	return __mt76u_init(dev, intf, &bus_ops);
1138 }
1139 EXPORT_SYMBOL_GPL(mt76u_init);
1140 
1141 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1142 MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
1143 MODULE_LICENSE("Dual BSD/GPL");
1144