1 /* 2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/module.h> 18 #include "mt76.h" 19 #include "usb_trace.h" 20 #include "dma.h" 21 22 #define MT_VEND_REQ_MAX_RETRY 10 23 #define MT_VEND_REQ_TOUT_MS 300 24 25 static bool disable_usb_sg; 26 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); 27 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); 28 29 /* should be called with usb_ctrl_mtx locked */ 30 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, 31 u8 req_type, u16 val, u16 offset, 32 void *buf, size_t len) 33 { 34 struct usb_device *udev = to_usb_device(dev->dev); 35 unsigned int pipe; 36 int i, ret; 37 38 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) 39 : usb_sndctrlpipe(udev, 0); 40 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { 41 if (test_bit(MT76_REMOVED, &dev->state)) 42 return -EIO; 43 44 ret = usb_control_msg(udev, pipe, req, req_type, val, 45 offset, buf, len, MT_VEND_REQ_TOUT_MS); 46 if (ret == -ENODEV) 47 set_bit(MT76_REMOVED, &dev->state); 48 if (ret >= 0 || ret == -ENODEV) 49 return ret; 50 usleep_range(5000, 10000); 51 } 52 53 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", 54 req, offset, ret); 55 return ret; 56 } 57 58 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 59 u8 req_type, u16 val, u16 offset, 60 void *buf, size_t len) 61 { 62 int ret; 63 64 mutex_lock(&dev->usb.usb_ctrl_mtx); 65 ret = __mt76u_vendor_request(dev, req, req_type, 66 val, offset, buf, len); 67 trace_usb_reg_wr(dev, offset, val); 68 mutex_unlock(&dev->usb.usb_ctrl_mtx); 69 70 return ret; 71 } 72 EXPORT_SYMBOL_GPL(mt76u_vendor_request); 73 74 /* should be called with usb_ctrl_mtx locked */ 75 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) 76 { 77 struct mt76_usb *usb = &dev->usb; 78 u32 data = ~0; 79 u16 offset; 80 int ret; 81 u8 req; 82 83 switch (addr & MT_VEND_TYPE_MASK) { 84 case MT_VEND_TYPE_EEPROM: 85 req = MT_VEND_READ_EEPROM; 86 break; 87 case MT_VEND_TYPE_CFG: 88 req = MT_VEND_READ_CFG; 89 break; 90 default: 91 req = MT_VEND_MULTI_READ; 92 break; 93 } 94 offset = addr & ~MT_VEND_TYPE_MASK; 95 96 ret = __mt76u_vendor_request(dev, req, 97 USB_DIR_IN | USB_TYPE_VENDOR, 98 0, offset, usb->data, sizeof(__le32)); 99 if (ret == sizeof(__le32)) 100 data = get_unaligned_le32(usb->data); 101 trace_usb_reg_rr(dev, addr, data); 102 103 return data; 104 } 105 106 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) 107 { 108 u32 ret; 109 110 mutex_lock(&dev->usb.usb_ctrl_mtx); 111 ret = __mt76u_rr(dev, addr); 112 mutex_unlock(&dev->usb.usb_ctrl_mtx); 113 114 return ret; 115 } 116 117 /* should be called with usb_ctrl_mtx locked */ 118 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 119 { 120 struct mt76_usb *usb = &dev->usb; 121 u16 offset; 122 u8 req; 123 124 switch (addr & MT_VEND_TYPE_MASK) { 125 case MT_VEND_TYPE_CFG: 126 req = MT_VEND_WRITE_CFG; 127 break; 128 default: 129 req = MT_VEND_MULTI_WRITE; 130 break; 131 } 132 offset = addr & ~MT_VEND_TYPE_MASK; 133 134 put_unaligned_le32(val, usb->data); 135 __mt76u_vendor_request(dev, req, 136 USB_DIR_OUT | USB_TYPE_VENDOR, 0, 137 offset, usb->data, sizeof(__le32)); 138 trace_usb_reg_wr(dev, addr, val); 139 } 140 141 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 142 { 143 mutex_lock(&dev->usb.usb_ctrl_mtx); 144 __mt76u_wr(dev, addr, val); 145 mutex_unlock(&dev->usb.usb_ctrl_mtx); 146 } 147 148 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, 149 u32 mask, u32 val) 150 { 151 mutex_lock(&dev->usb.usb_ctrl_mtx); 152 val |= __mt76u_rr(dev, addr) & ~mask; 153 __mt76u_wr(dev, addr, val); 154 mutex_unlock(&dev->usb.usb_ctrl_mtx); 155 156 return val; 157 } 158 159 static void mt76u_copy(struct mt76_dev *dev, u32 offset, 160 const void *data, int len) 161 { 162 struct mt76_usb *usb = &dev->usb; 163 const u32 *val = data; 164 int i, ret; 165 166 mutex_lock(&usb->usb_ctrl_mtx); 167 for (i = 0; i < (len / 4); i++) { 168 put_unaligned_le32(val[i], usb->data); 169 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, 170 USB_DIR_OUT | USB_TYPE_VENDOR, 171 0, offset + i * 4, usb->data, 172 sizeof(__le32)); 173 if (ret < 0) 174 break; 175 } 176 mutex_unlock(&usb->usb_ctrl_mtx); 177 } 178 179 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 180 const u16 offset, const u32 val) 181 { 182 mutex_lock(&dev->usb.usb_ctrl_mtx); 183 __mt76u_vendor_request(dev, req, 184 USB_DIR_OUT | USB_TYPE_VENDOR, 185 val & 0xffff, offset, NULL, 0); 186 __mt76u_vendor_request(dev, req, 187 USB_DIR_OUT | USB_TYPE_VENDOR, 188 val >> 16, offset + 2, NULL, 0); 189 mutex_unlock(&dev->usb.usb_ctrl_mtx); 190 } 191 EXPORT_SYMBOL_GPL(mt76u_single_wr); 192 193 static int 194 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, 195 const struct mt76_reg_pair *data, int len) 196 { 197 struct mt76_usb *usb = &dev->usb; 198 199 mutex_lock(&usb->usb_ctrl_mtx); 200 while (len > 0) { 201 __mt76u_wr(dev, base + data->reg, data->value); 202 len--; 203 data++; 204 } 205 mutex_unlock(&usb->usb_ctrl_mtx); 206 207 return 0; 208 } 209 210 static int 211 mt76u_wr_rp(struct mt76_dev *dev, u32 base, 212 const struct mt76_reg_pair *data, int n) 213 { 214 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) 215 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); 216 else 217 return mt76u_req_wr_rp(dev, base, data, n); 218 } 219 220 static int 221 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, 222 int len) 223 { 224 struct mt76_usb *usb = &dev->usb; 225 226 mutex_lock(&usb->usb_ctrl_mtx); 227 while (len > 0) { 228 data->value = __mt76u_rr(dev, base + data->reg); 229 len--; 230 data++; 231 } 232 mutex_unlock(&usb->usb_ctrl_mtx); 233 234 return 0; 235 } 236 237 static int 238 mt76u_rd_rp(struct mt76_dev *dev, u32 base, 239 struct mt76_reg_pair *data, int n) 240 { 241 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state)) 242 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); 243 else 244 return mt76u_req_rd_rp(dev, base, data, n); 245 } 246 247 static bool mt76u_check_sg(struct mt76_dev *dev) 248 { 249 struct usb_device *udev = to_usb_device(dev->dev); 250 251 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && 252 (udev->bus->no_sg_constraint || 253 udev->speed == USB_SPEED_WIRELESS)); 254 } 255 256 static int 257 mt76u_set_endpoints(struct usb_interface *intf, 258 struct mt76_usb *usb) 259 { 260 struct usb_host_interface *intf_desc = intf->cur_altsetting; 261 struct usb_endpoint_descriptor *ep_desc; 262 int i, in_ep = 0, out_ep = 0; 263 264 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { 265 ep_desc = &intf_desc->endpoint[i].desc; 266 267 if (usb_endpoint_is_bulk_in(ep_desc) && 268 in_ep < __MT_EP_IN_MAX) { 269 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); 270 in_ep++; 271 } else if (usb_endpoint_is_bulk_out(ep_desc) && 272 out_ep < __MT_EP_OUT_MAX) { 273 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); 274 out_ep++; 275 } 276 } 277 278 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) 279 return -EINVAL; 280 return 0; 281 } 282 283 static int 284 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, 285 int nsgs, gfp_t gfp) 286 { 287 int i; 288 289 for (i = 0; i < nsgs; i++) { 290 struct page *page; 291 void *data; 292 int offset; 293 294 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp); 295 if (!data) 296 break; 297 298 page = virt_to_head_page(data); 299 offset = data - page_address(page); 300 sg_set_page(&urb->sg[i], page, q->buf_size, offset); 301 } 302 303 if (i < nsgs) { 304 int j; 305 306 for (j = nsgs; j < urb->num_sgs; j++) 307 skb_free_frag(sg_virt(&urb->sg[j])); 308 urb->num_sgs = i; 309 } 310 311 urb->num_sgs = max_t(int, i, urb->num_sgs); 312 urb->transfer_buffer_length = urb->num_sgs * q->buf_size, 313 sg_init_marker(urb->sg, urb->num_sgs); 314 315 return i ? : -ENOMEM; 316 } 317 318 static int 319 mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp) 320 { 321 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 322 323 if (dev->usb.sg_en) { 324 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); 325 } else { 326 urb->transfer_buffer_length = q->buf_size; 327 urb->transfer_buffer = page_frag_alloc(&q->rx_page, 328 q->buf_size, gfp); 329 return urb->transfer_buffer ? 0 : -ENOMEM; 330 } 331 } 332 333 static int 334 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, 335 int sg_max_size) 336 { 337 unsigned int size = sizeof(struct urb); 338 339 if (dev->usb.sg_en) 340 size += sg_max_size * sizeof(struct scatterlist); 341 342 e->urb = kzalloc(size, GFP_KERNEL); 343 if (!e->urb) 344 return -ENOMEM; 345 346 usb_init_urb(e->urb); 347 348 if (dev->usb.sg_en) 349 e->urb->sg = (struct scatterlist *)(e->urb + 1); 350 351 return 0; 352 } 353 354 static int 355 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e) 356 { 357 int err; 358 359 err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE); 360 if (err) 361 return err; 362 363 return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE, 364 GFP_KERNEL); 365 } 366 367 static void mt76u_urb_free(struct urb *urb) 368 { 369 int i; 370 371 for (i = 0; i < urb->num_sgs; i++) 372 skb_free_frag(sg_virt(&urb->sg[i])); 373 374 if (urb->transfer_buffer) 375 skb_free_frag(urb->transfer_buffer); 376 377 usb_free_urb(urb); 378 } 379 380 static void 381 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, 382 struct urb *urb, usb_complete_t complete_fn, 383 void *context) 384 { 385 struct usb_device *udev = to_usb_device(dev->dev); 386 unsigned int pipe; 387 388 if (dir == USB_DIR_IN) 389 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); 390 else 391 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); 392 393 urb->dev = udev; 394 urb->pipe = pipe; 395 urb->complete = complete_fn; 396 urb->context = context; 397 } 398 399 static inline struct urb * 400 mt76u_get_next_rx_entry(struct mt76_dev *dev) 401 { 402 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 403 struct urb *urb = NULL; 404 unsigned long flags; 405 406 spin_lock_irqsave(&q->lock, flags); 407 if (q->queued > 0) { 408 urb = q->entry[q->head].urb; 409 q->head = (q->head + 1) % q->ndesc; 410 q->queued--; 411 } 412 spin_unlock_irqrestore(&q->lock, flags); 413 414 return urb; 415 } 416 417 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) 418 { 419 u16 dma_len, min_len; 420 421 dma_len = get_unaligned_le16(data); 422 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + 423 MT_FCE_INFO_LEN; 424 425 if (data_len < min_len || !dma_len || 426 dma_len + MT_DMA_HDR_LEN > data_len || 427 (dma_len & 0x3)) 428 return -EINVAL; 429 return dma_len; 430 } 431 432 static int 433 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) 434 { 435 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 436 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; 437 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; 438 int len, nsgs = 1; 439 struct sk_buff *skb; 440 441 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state)) 442 return 0; 443 444 len = mt76u_get_rx_entry_len(data, urb->actual_length); 445 if (len < 0) 446 return 0; 447 448 data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN); 449 if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) { 450 dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len); 451 return 0; 452 } 453 454 skb = build_skb(data, q->buf_size); 455 if (!skb) 456 return 0; 457 458 skb_reserve(skb, MT_DMA_HDR_LEN); 459 __skb_put(skb, data_len); 460 len -= data_len; 461 462 while (len > 0 && nsgs < urb->num_sgs) { 463 data_len = min_t(int, len, urb->sg[nsgs].length); 464 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 465 sg_page(&urb->sg[nsgs]), 466 urb->sg[nsgs].offset, 467 data_len, q->buf_size); 468 len -= data_len; 469 nsgs++; 470 } 471 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); 472 473 return nsgs; 474 } 475 476 static void mt76u_complete_rx(struct urb *urb) 477 { 478 struct mt76_dev *dev = urb->context; 479 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 480 unsigned long flags; 481 482 trace_rx_urb(dev, urb); 483 484 switch (urb->status) { 485 case -ECONNRESET: 486 case -ESHUTDOWN: 487 case -ENOENT: 488 return; 489 default: 490 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", 491 urb->status); 492 /* fall through */ 493 case 0: 494 break; 495 } 496 497 spin_lock_irqsave(&q->lock, flags); 498 if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch")) 499 goto out; 500 501 q->tail = (q->tail + 1) % q->ndesc; 502 q->queued++; 503 tasklet_schedule(&dev->usb.rx_tasklet); 504 out: 505 spin_unlock_irqrestore(&q->lock, flags); 506 } 507 508 static int 509 mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb) 510 { 511 mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb, 512 mt76u_complete_rx, dev); 513 trace_submit_urb(dev, urb); 514 515 return usb_submit_urb(urb, GFP_ATOMIC); 516 } 517 518 static void mt76u_rx_tasklet(unsigned long data) 519 { 520 struct mt76_dev *dev = (struct mt76_dev *)data; 521 struct urb *urb; 522 int err, count; 523 524 rcu_read_lock(); 525 526 while (true) { 527 urb = mt76u_get_next_rx_entry(dev); 528 if (!urb) 529 break; 530 531 count = mt76u_process_rx_entry(dev, urb); 532 if (count > 0) { 533 err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC); 534 if (err < 0) 535 break; 536 } 537 mt76u_submit_rx_buf(dev, urb); 538 } 539 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 540 541 rcu_read_unlock(); 542 } 543 544 static int mt76u_submit_rx_buffers(struct mt76_dev *dev) 545 { 546 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 547 unsigned long flags; 548 int i, err = 0; 549 550 spin_lock_irqsave(&q->lock, flags); 551 for (i = 0; i < q->ndesc; i++) { 552 err = mt76u_submit_rx_buf(dev, q->entry[i].urb); 553 if (err < 0) 554 break; 555 } 556 q->head = q->tail = 0; 557 q->queued = 0; 558 spin_unlock_irqrestore(&q->lock, flags); 559 560 return err; 561 } 562 563 static int mt76u_alloc_rx(struct mt76_dev *dev) 564 { 565 struct mt76_usb *usb = &dev->usb; 566 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 567 int i, err; 568 569 usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL); 570 if (!usb->mcu.data) 571 return -ENOMEM; 572 573 spin_lock_init(&q->lock); 574 q->entry = devm_kcalloc(dev->dev, 575 MT_NUM_RX_ENTRIES, sizeof(*q->entry), 576 GFP_KERNEL); 577 if (!q->entry) 578 return -ENOMEM; 579 580 q->ndesc = MT_NUM_RX_ENTRIES; 581 q->buf_size = PAGE_SIZE; 582 583 for (i = 0; i < q->ndesc; i++) { 584 err = mt76u_rx_urb_alloc(dev, &q->entry[i]); 585 if (err < 0) 586 return err; 587 } 588 589 return mt76u_submit_rx_buffers(dev); 590 } 591 592 static void mt76u_free_rx(struct mt76_dev *dev) 593 { 594 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 595 struct page *page; 596 int i; 597 598 for (i = 0; i < q->ndesc; i++) 599 mt76u_urb_free(q->entry[i].urb); 600 601 if (!q->rx_page.va) 602 return; 603 604 page = virt_to_page(q->rx_page.va); 605 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); 606 memset(&q->rx_page, 0, sizeof(q->rx_page)); 607 } 608 609 void mt76u_stop_rx(struct mt76_dev *dev) 610 { 611 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 612 int i; 613 614 for (i = 0; i < q->ndesc; i++) 615 usb_poison_urb(q->entry[i].urb); 616 617 tasklet_kill(&dev->usb.rx_tasklet); 618 } 619 EXPORT_SYMBOL_GPL(mt76u_stop_rx); 620 621 int mt76u_resume_rx(struct mt76_dev *dev) 622 { 623 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 624 int i; 625 626 for (i = 0; i < q->ndesc; i++) 627 usb_unpoison_urb(q->entry[i].urb); 628 629 return mt76u_submit_rx_buffers(dev); 630 } 631 EXPORT_SYMBOL_GPL(mt76u_resume_rx); 632 633 static void mt76u_tx_tasklet(unsigned long data) 634 { 635 struct mt76_dev *dev = (struct mt76_dev *)data; 636 struct mt76_queue_entry entry; 637 struct mt76_sw_queue *sq; 638 struct mt76_queue *q; 639 bool wake; 640 int i; 641 642 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 643 u32 n_dequeued = 0, n_sw_dequeued = 0; 644 645 sq = &dev->q_tx[i]; 646 q = sq->q; 647 648 while (q->queued > n_dequeued) { 649 if (!q->entry[q->head].done) 650 break; 651 652 if (q->entry[q->head].schedule) { 653 q->entry[q->head].schedule = false; 654 n_sw_dequeued++; 655 } 656 657 entry = q->entry[q->head]; 658 q->entry[q->head].done = false; 659 q->head = (q->head + 1) % q->ndesc; 660 n_dequeued++; 661 662 dev->drv->tx_complete_skb(dev, i, &entry); 663 } 664 665 spin_lock_bh(&q->lock); 666 667 sq->swq_queued -= n_sw_dequeued; 668 q->queued -= n_dequeued; 669 670 wake = q->stopped && q->queued < q->ndesc - 8; 671 if (wake) 672 q->stopped = false; 673 674 if (!q->queued) 675 wake_up(&dev->tx_wait); 676 677 spin_unlock_bh(&q->lock); 678 679 mt76_txq_schedule(dev, i); 680 681 if (!test_and_set_bit(MT76_READING_STATS, &dev->state)) 682 ieee80211_queue_delayed_work(dev->hw, 683 &dev->usb.stat_work, 684 msecs_to_jiffies(10)); 685 686 if (wake) 687 ieee80211_wake_queue(dev->hw, i); 688 } 689 } 690 691 static void mt76u_tx_status_data(struct work_struct *work) 692 { 693 struct mt76_usb *usb; 694 struct mt76_dev *dev; 695 u8 update = 1; 696 u16 count = 0; 697 698 usb = container_of(work, struct mt76_usb, stat_work.work); 699 dev = container_of(usb, struct mt76_dev, usb); 700 701 while (true) { 702 if (test_bit(MT76_REMOVED, &dev->state)) 703 break; 704 705 if (!dev->drv->tx_status_data(dev, &update)) 706 break; 707 count++; 708 } 709 710 if (count && test_bit(MT76_STATE_RUNNING, &dev->state)) 711 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work, 712 msecs_to_jiffies(10)); 713 else 714 clear_bit(MT76_READING_STATS, &dev->state); 715 } 716 717 static void mt76u_complete_tx(struct urb *urb) 718 { 719 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); 720 struct mt76_queue_entry *e = urb->context; 721 722 if (mt76u_urb_error(urb)) 723 dev_err(dev->dev, "tx urb failed: %d\n", urb->status); 724 e->done = true; 725 726 tasklet_schedule(&dev->tx_tasklet); 727 } 728 729 static int 730 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, 731 struct urb *urb) 732 { 733 urb->transfer_buffer_length = skb->len; 734 735 if (!dev->usb.sg_en) { 736 urb->transfer_buffer = skb->data; 737 return 0; 738 } else { 739 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); 740 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); 741 if (urb->num_sgs == 0) 742 return -ENOMEM; 743 return urb->num_sgs; 744 } 745 } 746 747 static int 748 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, 749 struct sk_buff *skb, struct mt76_wcid *wcid, 750 struct ieee80211_sta *sta) 751 { 752 struct mt76_queue *q = dev->q_tx[qid].q; 753 struct mt76_tx_info tx_info = { 754 .skb = skb, 755 }; 756 u16 idx = q->tail; 757 int err; 758 759 if (q->queued == q->ndesc) 760 return -ENOSPC; 761 762 skb->prev = skb->next = NULL; 763 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 764 if (err < 0) 765 return err; 766 767 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); 768 if (err < 0) 769 return err; 770 771 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), 772 q->entry[idx].urb, mt76u_complete_tx, 773 &q->entry[idx]); 774 775 q->tail = (q->tail + 1) % q->ndesc; 776 q->entry[idx].skb = tx_info.skb; 777 q->queued++; 778 779 return idx; 780 } 781 782 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 783 { 784 struct urb *urb; 785 int err; 786 787 while (q->first != q->tail) { 788 urb = q->entry[q->first].urb; 789 790 trace_submit_urb(dev, urb); 791 err = usb_submit_urb(urb, GFP_ATOMIC); 792 if (err < 0) { 793 if (err == -ENODEV) 794 set_bit(MT76_REMOVED, &dev->state); 795 else 796 dev_err(dev->dev, "tx urb submit failed:%d\n", 797 err); 798 break; 799 } 800 q->first = (q->first + 1) % q->ndesc; 801 } 802 } 803 804 static int mt76u_alloc_tx(struct mt76_dev *dev) 805 { 806 struct mt76_queue *q; 807 int i, j, err; 808 809 for (i = 0; i <= MT_TXQ_PSD; i++) { 810 INIT_LIST_HEAD(&dev->q_tx[i].swq); 811 812 if (i >= IEEE80211_NUM_ACS) { 813 dev->q_tx[i].q = dev->q_tx[0].q; 814 continue; 815 } 816 817 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 818 if (!q) 819 return -ENOMEM; 820 821 spin_lock_init(&q->lock); 822 q->hw_idx = mt76_ac_to_hwq(i); 823 dev->q_tx[i].q = q; 824 825 q->entry = devm_kcalloc(dev->dev, 826 MT_NUM_TX_ENTRIES, sizeof(*q->entry), 827 GFP_KERNEL); 828 if (!q->entry) 829 return -ENOMEM; 830 831 q->ndesc = MT_NUM_TX_ENTRIES; 832 for (j = 0; j < q->ndesc; j++) { 833 err = mt76u_urb_alloc(dev, &q->entry[j], 834 MT_TX_SG_MAX_SIZE); 835 if (err < 0) 836 return err; 837 } 838 } 839 return 0; 840 } 841 842 static void mt76u_free_tx(struct mt76_dev *dev) 843 { 844 struct mt76_queue *q; 845 int i, j; 846 847 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 848 q = dev->q_tx[i].q; 849 for (j = 0; j < q->ndesc; j++) 850 usb_free_urb(q->entry[j].urb); 851 } 852 } 853 854 void mt76u_stop_tx(struct mt76_dev *dev) 855 { 856 struct mt76_queue_entry entry; 857 struct mt76_queue *q; 858 int i, j, ret; 859 860 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5); 861 if (!ret) { 862 dev_err(dev->dev, "timed out waiting for pending tx\n"); 863 864 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 865 q = dev->q_tx[i].q; 866 for (j = 0; j < q->ndesc; j++) 867 usb_kill_urb(q->entry[j].urb); 868 } 869 870 tasklet_kill(&dev->tx_tasklet); 871 872 /* On device removal we maight queue skb's, but mt76u_tx_kick() 873 * will fail to submit urb, cleanup those skb's manually. 874 */ 875 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 876 q = dev->q_tx[i].q; 877 878 /* Assure we are in sync with killed tasklet. */ 879 spin_lock_bh(&q->lock); 880 while (q->queued) { 881 entry = q->entry[q->head]; 882 q->head = (q->head + 1) % q->ndesc; 883 q->queued--; 884 885 dev->drv->tx_complete_skb(dev, i, &entry); 886 } 887 spin_unlock_bh(&q->lock); 888 } 889 } 890 891 cancel_delayed_work_sync(&dev->usb.stat_work); 892 clear_bit(MT76_READING_STATS, &dev->state); 893 894 mt76_tx_status_check(dev, NULL, true); 895 } 896 EXPORT_SYMBOL_GPL(mt76u_stop_tx); 897 898 void mt76u_queues_deinit(struct mt76_dev *dev) 899 { 900 mt76u_stop_rx(dev); 901 mt76u_stop_tx(dev); 902 903 mt76u_free_rx(dev); 904 mt76u_free_tx(dev); 905 } 906 EXPORT_SYMBOL_GPL(mt76u_queues_deinit); 907 908 int mt76u_alloc_queues(struct mt76_dev *dev) 909 { 910 int err; 911 912 err = mt76u_alloc_rx(dev); 913 if (err < 0) 914 return err; 915 916 return mt76u_alloc_tx(dev); 917 } 918 EXPORT_SYMBOL_GPL(mt76u_alloc_queues); 919 920 static const struct mt76_queue_ops usb_queue_ops = { 921 .tx_queue_skb = mt76u_tx_queue_skb, 922 .kick = mt76u_tx_kick, 923 }; 924 925 int mt76u_init(struct mt76_dev *dev, 926 struct usb_interface *intf) 927 { 928 static const struct mt76_bus_ops mt76u_ops = { 929 .rr = mt76u_rr, 930 .wr = mt76u_wr, 931 .rmw = mt76u_rmw, 932 .copy = mt76u_copy, 933 .wr_rp = mt76u_wr_rp, 934 .rd_rp = mt76u_rd_rp, 935 .type = MT76_BUS_USB, 936 }; 937 struct mt76_usb *usb = &dev->usb; 938 939 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); 940 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); 941 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data); 942 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]); 943 944 mutex_init(&usb->mcu.mutex); 945 946 mutex_init(&usb->usb_ctrl_mtx); 947 dev->bus = &mt76u_ops; 948 dev->queue_ops = &usb_queue_ops; 949 950 usb->sg_en = mt76u_check_sg(dev); 951 952 return mt76u_set_endpoints(intf, usb); 953 } 954 EXPORT_SYMBOL_GPL(mt76u_init); 955 956 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); 957 MODULE_LICENSE("Dual BSD/GPL"); 958