1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 */ 5 6 #include <linux/module.h> 7 #include "mt76.h" 8 #include "usb_trace.h" 9 #include "dma.h" 10 11 #define MT_VEND_REQ_MAX_RETRY 10 12 #define MT_VEND_REQ_TOUT_MS 300 13 14 static bool disable_usb_sg; 15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); 16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); 17 18 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, 19 u16 val, u16 offset, void *buf, size_t len) 20 { 21 struct usb_interface *uintf = to_usb_interface(dev->dev); 22 struct usb_device *udev = interface_to_usbdev(uintf); 23 unsigned int pipe; 24 int i, ret; 25 26 lockdep_assert_held(&dev->usb.usb_ctrl_mtx); 27 28 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0) 29 : usb_sndctrlpipe(udev, 0); 30 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { 31 if (test_bit(MT76_REMOVED, &dev->phy.state)) 32 return -EIO; 33 34 ret = usb_control_msg(udev, pipe, req, req_type, val, 35 offset, buf, len, MT_VEND_REQ_TOUT_MS); 36 if (ret == -ENODEV) 37 set_bit(MT76_REMOVED, &dev->phy.state); 38 if (ret >= 0 || ret == -ENODEV) 39 return ret; 40 usleep_range(5000, 10000); 41 } 42 43 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n", 44 req, offset, ret); 45 return ret; 46 } 47 EXPORT_SYMBOL_GPL(__mt76u_vendor_request); 48 49 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 50 u8 req_type, u16 val, u16 offset, 51 void *buf, size_t len) 52 { 53 int ret; 54 55 mutex_lock(&dev->usb.usb_ctrl_mtx); 56 ret = __mt76u_vendor_request(dev, req, req_type, 57 val, offset, buf, len); 58 trace_usb_reg_wr(dev, offset, val); 59 mutex_unlock(&dev->usb.usb_ctrl_mtx); 60 61 return ret; 62 } 63 EXPORT_SYMBOL_GPL(mt76u_vendor_request); 64 65 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr) 66 { 67 struct mt76_usb *usb = &dev->usb; 68 u32 data = ~0; 69 int ret; 70 71 ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16, 72 addr, usb->data, sizeof(__le32)); 73 if (ret == sizeof(__le32)) 74 data = get_unaligned_le32(usb->data); 75 trace_usb_reg_rr(dev, addr, data); 76 77 return data; 78 } 79 EXPORT_SYMBOL_GPL(___mt76u_rr); 80 81 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) 82 { 83 u8 req; 84 85 switch (addr & MT_VEND_TYPE_MASK) { 86 case MT_VEND_TYPE_EEPROM: 87 req = MT_VEND_READ_EEPROM; 88 break; 89 case MT_VEND_TYPE_CFG: 90 req = MT_VEND_READ_CFG; 91 break; 92 default: 93 req = MT_VEND_MULTI_READ; 94 break; 95 } 96 97 return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR, 98 addr & ~MT_VEND_TYPE_MASK); 99 } 100 101 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) 102 { 103 u32 ret; 104 105 mutex_lock(&dev->usb.usb_ctrl_mtx); 106 ret = __mt76u_rr(dev, addr); 107 mutex_unlock(&dev->usb.usb_ctrl_mtx); 108 109 return ret; 110 } 111 112 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, 113 u32 addr, u32 val) 114 { 115 struct mt76_usb *usb = &dev->usb; 116 117 put_unaligned_le32(val, usb->data); 118 __mt76u_vendor_request(dev, req, req_type, addr >> 16, 119 addr, usb->data, sizeof(__le32)); 120 trace_usb_reg_wr(dev, addr, val); 121 } 122 EXPORT_SYMBOL_GPL(___mt76u_wr); 123 124 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 125 { 126 u8 req; 127 128 switch (addr & MT_VEND_TYPE_MASK) { 129 case MT_VEND_TYPE_CFG: 130 req = MT_VEND_WRITE_CFG; 131 break; 132 default: 133 req = MT_VEND_MULTI_WRITE; 134 break; 135 } 136 ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR, 137 addr & ~MT_VEND_TYPE_MASK, val); 138 } 139 140 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) 141 { 142 mutex_lock(&dev->usb.usb_ctrl_mtx); 143 __mt76u_wr(dev, addr, val); 144 mutex_unlock(&dev->usb.usb_ctrl_mtx); 145 } 146 147 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, 148 u32 mask, u32 val) 149 { 150 mutex_lock(&dev->usb.usb_ctrl_mtx); 151 val |= __mt76u_rr(dev, addr) & ~mask; 152 __mt76u_wr(dev, addr, val); 153 mutex_unlock(&dev->usb.usb_ctrl_mtx); 154 155 return val; 156 } 157 158 static void mt76u_copy(struct mt76_dev *dev, u32 offset, 159 const void *data, int len) 160 { 161 struct mt76_usb *usb = &dev->usb; 162 const u8 *val = data; 163 int ret; 164 int current_batch_size; 165 int i = 0; 166 167 /* Assure that always a multiple of 4 bytes are copied, 168 * otherwise beacons can be corrupted. 169 * See: "mt76: round up length on mt76_wr_copy" 170 * Commit 850e8f6fbd5d0003b0 171 */ 172 len = round_up(len, 4); 173 174 mutex_lock(&usb->usb_ctrl_mtx); 175 while (i < len) { 176 current_batch_size = min_t(int, usb->data_len, len - i); 177 memcpy(usb->data, val + i, current_batch_size); 178 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE, 179 USB_DIR_OUT | USB_TYPE_VENDOR, 180 0, offset + i, usb->data, 181 current_batch_size); 182 if (ret < 0) 183 break; 184 185 i += current_batch_size; 186 } 187 mutex_unlock(&usb->usb_ctrl_mtx); 188 } 189 190 void mt76u_read_copy(struct mt76_dev *dev, u32 offset, 191 void *data, int len) 192 { 193 struct mt76_usb *usb = &dev->usb; 194 int i = 0, batch_len, ret; 195 u8 *val = data; 196 197 len = round_up(len, 4); 198 mutex_lock(&usb->usb_ctrl_mtx); 199 while (i < len) { 200 batch_len = min_t(int, usb->data_len, len - i); 201 ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT, 202 USB_DIR_IN | USB_TYPE_VENDOR, 203 (offset + i) >> 16, offset + i, 204 usb->data, batch_len); 205 if (ret < 0) 206 break; 207 208 memcpy(val + i, usb->data, batch_len); 209 i += batch_len; 210 } 211 mutex_unlock(&usb->usb_ctrl_mtx); 212 } 213 EXPORT_SYMBOL_GPL(mt76u_read_copy); 214 215 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 216 const u16 offset, const u32 val) 217 { 218 mutex_lock(&dev->usb.usb_ctrl_mtx); 219 __mt76u_vendor_request(dev, req, 220 USB_DIR_OUT | USB_TYPE_VENDOR, 221 val & 0xffff, offset, NULL, 0); 222 __mt76u_vendor_request(dev, req, 223 USB_DIR_OUT | USB_TYPE_VENDOR, 224 val >> 16, offset + 2, NULL, 0); 225 mutex_unlock(&dev->usb.usb_ctrl_mtx); 226 } 227 EXPORT_SYMBOL_GPL(mt76u_single_wr); 228 229 static int 230 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base, 231 const struct mt76_reg_pair *data, int len) 232 { 233 struct mt76_usb *usb = &dev->usb; 234 235 mutex_lock(&usb->usb_ctrl_mtx); 236 while (len > 0) { 237 __mt76u_wr(dev, base + data->reg, data->value); 238 len--; 239 data++; 240 } 241 mutex_unlock(&usb->usb_ctrl_mtx); 242 243 return 0; 244 } 245 246 static int 247 mt76u_wr_rp(struct mt76_dev *dev, u32 base, 248 const struct mt76_reg_pair *data, int n) 249 { 250 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 251 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n); 252 else 253 return mt76u_req_wr_rp(dev, base, data, n); 254 } 255 256 static int 257 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data, 258 int len) 259 { 260 struct mt76_usb *usb = &dev->usb; 261 262 mutex_lock(&usb->usb_ctrl_mtx); 263 while (len > 0) { 264 data->value = __mt76u_rr(dev, base + data->reg); 265 len--; 266 data++; 267 } 268 mutex_unlock(&usb->usb_ctrl_mtx); 269 270 return 0; 271 } 272 273 static int 274 mt76u_rd_rp(struct mt76_dev *dev, u32 base, 275 struct mt76_reg_pair *data, int n) 276 { 277 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 278 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n); 279 else 280 return mt76u_req_rd_rp(dev, base, data, n); 281 } 282 283 static bool mt76u_check_sg(struct mt76_dev *dev) 284 { 285 struct usb_interface *uintf = to_usb_interface(dev->dev); 286 struct usb_device *udev = interface_to_usbdev(uintf); 287 288 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && 289 udev->bus->no_sg_constraint); 290 } 291 292 static int 293 mt76u_set_endpoints(struct usb_interface *intf, 294 struct mt76_usb *usb) 295 { 296 struct usb_host_interface *intf_desc = intf->cur_altsetting; 297 struct usb_endpoint_descriptor *ep_desc; 298 int i, in_ep = 0, out_ep = 0; 299 300 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { 301 ep_desc = &intf_desc->endpoint[i].desc; 302 303 if (usb_endpoint_is_bulk_in(ep_desc) && 304 in_ep < __MT_EP_IN_MAX) { 305 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc); 306 in_ep++; 307 } else if (usb_endpoint_is_bulk_out(ep_desc) && 308 out_ep < __MT_EP_OUT_MAX) { 309 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc); 310 out_ep++; 311 } 312 } 313 314 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX) 315 return -EINVAL; 316 return 0; 317 } 318 319 static int 320 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, 321 int nsgs) 322 { 323 int i; 324 325 for (i = 0; i < nsgs; i++) { 326 void *data; 327 int offset; 328 329 data = mt76_get_page_pool_buf(q, &offset, q->buf_size); 330 if (!data) 331 break; 332 333 sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, 334 offset); 335 } 336 337 if (i < nsgs) { 338 int j; 339 340 for (j = nsgs; j < urb->num_sgs; j++) 341 mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false); 342 urb->num_sgs = i; 343 } 344 345 urb->num_sgs = max_t(int, i, urb->num_sgs); 346 urb->transfer_buffer_length = urb->num_sgs * q->buf_size; 347 sg_init_marker(urb->sg, urb->num_sgs); 348 349 return i ? : -ENOMEM; 350 } 351 352 static int 353 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, 354 struct urb *urb, int nsgs) 355 { 356 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; 357 int offset; 358 359 if (qid == MT_RXQ_MAIN && dev->usb.sg_en) 360 return mt76u_fill_rx_sg(dev, q, urb, nsgs); 361 362 urb->transfer_buffer_length = q->buf_size; 363 urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); 364 365 return urb->transfer_buffer ? 0 : -ENOMEM; 366 } 367 368 static int 369 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e, 370 int sg_max_size) 371 { 372 unsigned int size = sizeof(struct urb); 373 374 if (dev->usb.sg_en) 375 size += sg_max_size * sizeof(struct scatterlist); 376 377 e->urb = kzalloc(size, GFP_KERNEL); 378 if (!e->urb) 379 return -ENOMEM; 380 381 usb_init_urb(e->urb); 382 383 if (dev->usb.sg_en && sg_max_size > 0) 384 e->urb->sg = (struct scatterlist *)(e->urb + 1); 385 386 return 0; 387 } 388 389 static int 390 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, 391 struct mt76_queue_entry *e) 392 { 393 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; 394 int err, sg_size; 395 396 sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0; 397 err = mt76u_urb_alloc(dev, e, sg_size); 398 if (err) 399 return err; 400 401 return mt76u_refill_rx(dev, q, e->urb, sg_size); 402 } 403 404 static void mt76u_urb_free(struct urb *urb) 405 { 406 int i; 407 408 for (i = 0; i < urb->num_sgs; i++) 409 mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false); 410 411 if (urb->transfer_buffer) 412 mt76_put_page_pool_buf(urb->transfer_buffer, false); 413 414 usb_free_urb(urb); 415 } 416 417 static void 418 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, 419 struct urb *urb, usb_complete_t complete_fn, 420 void *context) 421 { 422 struct usb_interface *uintf = to_usb_interface(dev->dev); 423 struct usb_device *udev = interface_to_usbdev(uintf); 424 unsigned int pipe; 425 426 if (dir == USB_DIR_IN) 427 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]); 428 else 429 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); 430 431 urb->dev = udev; 432 urb->pipe = pipe; 433 urb->complete = complete_fn; 434 urb->context = context; 435 } 436 437 static struct urb * 438 mt76u_get_next_rx_entry(struct mt76_queue *q) 439 { 440 struct urb *urb = NULL; 441 unsigned long flags; 442 443 spin_lock_irqsave(&q->lock, flags); 444 if (q->queued > 0) { 445 urb = q->entry[q->tail].urb; 446 q->tail = (q->tail + 1) % q->ndesc; 447 q->queued--; 448 } 449 spin_unlock_irqrestore(&q->lock, flags); 450 451 return urb; 452 } 453 454 static int 455 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data, 456 u32 data_len) 457 { 458 u16 dma_len, min_len; 459 460 dma_len = get_unaligned_le16(data); 461 if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR) 462 return dma_len; 463 464 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN; 465 if (data_len < min_len || !dma_len || 466 dma_len + MT_DMA_HDR_LEN > data_len || 467 (dma_len & 0x3)) 468 return -EINVAL; 469 return dma_len; 470 } 471 472 static struct sk_buff * 473 mt76u_build_rx_skb(struct mt76_dev *dev, void *data, 474 int len, int buf_size) 475 { 476 int head_room, drv_flags = dev->drv->drv_flags; 477 struct sk_buff *skb; 478 479 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; 480 if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) { 481 struct page *page; 482 483 /* slow path, not enough space for data and 484 * skb_shared_info 485 */ 486 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC); 487 if (!skb) 488 return NULL; 489 490 skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN); 491 data += head_room + MT_SKB_HEAD_LEN; 492 page = virt_to_head_page(data); 493 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 494 page, data - page_address(page), 495 len - MT_SKB_HEAD_LEN, buf_size); 496 497 return skb; 498 } 499 500 /* fast path */ 501 skb = build_skb(data, buf_size); 502 if (!skb) 503 return NULL; 504 505 skb_reserve(skb, head_room); 506 __skb_put(skb, len); 507 508 return skb; 509 } 510 511 static int 512 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, 513 int buf_size) 514 { 515 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; 516 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; 517 int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags; 518 struct sk_buff *skb; 519 520 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) 521 return 0; 522 523 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length); 524 if (len < 0) 525 return 0; 526 527 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN; 528 data_len = min_t(int, len, data_len - head_room); 529 530 if (len == data_len && 531 dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len)) 532 return 0; 533 534 skb = mt76u_build_rx_skb(dev, data, data_len, buf_size); 535 if (!skb) 536 return 0; 537 538 len -= data_len; 539 while (len > 0 && nsgs < urb->num_sgs) { 540 data_len = min_t(int, len, urb->sg[nsgs].length); 541 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 542 sg_page(&urb->sg[nsgs]), 543 urb->sg[nsgs].offset, data_len, 544 buf_size); 545 len -= data_len; 546 nsgs++; 547 } 548 549 skb_mark_for_recycle(skb); 550 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL); 551 552 return nsgs; 553 } 554 555 static void mt76u_complete_rx(struct urb *urb) 556 { 557 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); 558 struct mt76_queue *q = urb->context; 559 unsigned long flags; 560 561 trace_rx_urb(dev, urb); 562 563 switch (urb->status) { 564 case -ECONNRESET: 565 case -ESHUTDOWN: 566 case -ENOENT: 567 case -EPROTO: 568 return; 569 default: 570 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", 571 urb->status); 572 fallthrough; 573 case 0: 574 break; 575 } 576 577 spin_lock_irqsave(&q->lock, flags); 578 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch")) 579 goto out; 580 581 q->head = (q->head + 1) % q->ndesc; 582 q->queued++; 583 mt76_worker_schedule(&dev->usb.rx_worker); 584 out: 585 spin_unlock_irqrestore(&q->lock, flags); 586 } 587 588 static int 589 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid, 590 struct urb *urb) 591 { 592 int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP; 593 594 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb, 595 mt76u_complete_rx, &dev->q_rx[qid]); 596 trace_submit_urb(dev, urb); 597 598 return usb_submit_urb(urb, GFP_ATOMIC); 599 } 600 601 static void 602 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 603 { 604 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; 605 struct urb *urb; 606 int err, count; 607 608 while (true) { 609 urb = mt76u_get_next_rx_entry(q); 610 if (!urb) 611 break; 612 613 count = mt76u_process_rx_entry(dev, urb, q->buf_size); 614 if (count > 0) { 615 err = mt76u_refill_rx(dev, q, urb, count); 616 if (err < 0) 617 break; 618 } 619 mt76u_submit_rx_buf(dev, qid, urb); 620 } 621 if (qid == MT_RXQ_MAIN) { 622 local_bh_disable(); 623 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 624 local_bh_enable(); 625 } 626 } 627 628 static void mt76u_rx_worker(struct mt76_worker *w) 629 { 630 struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker); 631 struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); 632 int i; 633 634 rcu_read_lock(); 635 mt76_for_each_q_rx(dev, i) 636 mt76u_process_rx_queue(dev, &dev->q_rx[i]); 637 rcu_read_unlock(); 638 } 639 640 static int 641 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid) 642 { 643 struct mt76_queue *q = &dev->q_rx[qid]; 644 unsigned long flags; 645 int i, err = 0; 646 647 spin_lock_irqsave(&q->lock, flags); 648 for (i = 0; i < q->ndesc; i++) { 649 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb); 650 if (err < 0) 651 break; 652 } 653 q->head = q->tail = 0; 654 q->queued = 0; 655 spin_unlock_irqrestore(&q->lock, flags); 656 657 return err; 658 } 659 660 static int 661 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) 662 { 663 struct mt76_queue *q = &dev->q_rx[qid]; 664 int i, err; 665 666 err = mt76_create_page_pool(dev, q); 667 if (err) 668 return err; 669 670 spin_lock_init(&q->lock); 671 q->entry = devm_kcalloc(dev->dev, 672 MT_NUM_RX_ENTRIES, sizeof(*q->entry), 673 GFP_KERNEL); 674 if (!q->entry) 675 return -ENOMEM; 676 677 q->ndesc = MT_NUM_RX_ENTRIES; 678 q->buf_size = PAGE_SIZE; 679 680 for (i = 0; i < q->ndesc; i++) { 681 err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]); 682 if (err < 0) 683 return err; 684 } 685 686 return mt76u_submit_rx_buffers(dev, qid); 687 } 688 689 int mt76u_alloc_mcu_queue(struct mt76_dev *dev) 690 { 691 return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU); 692 } 693 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue); 694 695 static void 696 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 697 { 698 int i; 699 700 for (i = 0; i < q->ndesc; i++) { 701 if (!q->entry[i].urb) 702 continue; 703 704 mt76u_urb_free(q->entry[i].urb); 705 q->entry[i].urb = NULL; 706 } 707 page_pool_destroy(q->page_pool); 708 q->page_pool = NULL; 709 } 710 711 static void mt76u_free_rx(struct mt76_dev *dev) 712 { 713 int i; 714 715 mt76_worker_teardown(&dev->usb.rx_worker); 716 717 mt76_for_each_q_rx(dev, i) 718 mt76u_free_rx_queue(dev, &dev->q_rx[i]); 719 } 720 721 void mt76u_stop_rx(struct mt76_dev *dev) 722 { 723 int i; 724 725 mt76_worker_disable(&dev->usb.rx_worker); 726 727 mt76_for_each_q_rx(dev, i) { 728 struct mt76_queue *q = &dev->q_rx[i]; 729 int j; 730 731 for (j = 0; j < q->ndesc; j++) 732 usb_poison_urb(q->entry[j].urb); 733 } 734 } 735 EXPORT_SYMBOL_GPL(mt76u_stop_rx); 736 737 int mt76u_resume_rx(struct mt76_dev *dev) 738 { 739 int i; 740 741 mt76_for_each_q_rx(dev, i) { 742 struct mt76_queue *q = &dev->q_rx[i]; 743 int err, j; 744 745 for (j = 0; j < q->ndesc; j++) 746 usb_unpoison_urb(q->entry[j].urb); 747 748 err = mt76u_submit_rx_buffers(dev, i); 749 if (err < 0) 750 return err; 751 } 752 753 mt76_worker_enable(&dev->usb.rx_worker); 754 755 return 0; 756 } 757 EXPORT_SYMBOL_GPL(mt76u_resume_rx); 758 759 static void mt76u_status_worker(struct mt76_worker *w) 760 { 761 struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker); 762 struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); 763 struct mt76_queue_entry entry; 764 struct mt76_queue *q; 765 int i; 766 767 if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 768 return; 769 770 for (i = 0; i <= MT_TXQ_PSD; i++) { 771 q = dev->phy.q_tx[i]; 772 if (!q) 773 continue; 774 775 while (q->queued > 0) { 776 if (!q->entry[q->tail].done) 777 break; 778 779 entry = q->entry[q->tail]; 780 q->entry[q->tail].done = false; 781 782 mt76_queue_tx_complete(dev, q, &entry); 783 } 784 785 if (!q->queued) 786 wake_up(&dev->tx_wait); 787 788 mt76_worker_schedule(&dev->tx_worker); 789 } 790 791 if (dev->drv->tx_status_data && 792 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) 793 queue_work(dev->wq, &dev->usb.stat_work); 794 } 795 796 static void mt76u_tx_status_data(struct work_struct *work) 797 { 798 struct mt76_usb *usb; 799 struct mt76_dev *dev; 800 u8 update = 1; 801 u16 count = 0; 802 803 usb = container_of(work, struct mt76_usb, stat_work); 804 dev = container_of(usb, struct mt76_dev, usb); 805 806 while (true) { 807 if (test_bit(MT76_REMOVED, &dev->phy.state)) 808 break; 809 810 if (!dev->drv->tx_status_data(dev, &update)) 811 break; 812 count++; 813 } 814 815 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 816 queue_work(dev->wq, &usb->stat_work); 817 else 818 clear_bit(MT76_READING_STATS, &dev->phy.state); 819 } 820 821 static void mt76u_complete_tx(struct urb *urb) 822 { 823 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); 824 struct mt76_queue_entry *e = urb->context; 825 826 if (mt76u_urb_error(urb)) 827 dev_err(dev->dev, "tx urb failed: %d\n", urb->status); 828 e->done = true; 829 830 mt76_worker_schedule(&dev->usb.status_worker); 831 } 832 833 static int 834 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, 835 struct urb *urb) 836 { 837 urb->transfer_buffer_length = skb->len; 838 839 if (!dev->usb.sg_en) { 840 urb->transfer_buffer = skb->data; 841 return 0; 842 } 843 844 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); 845 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); 846 if (!urb->num_sgs) 847 return -ENOMEM; 848 849 return urb->num_sgs; 850 } 851 852 static int 853 mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, 854 enum mt76_txq_id qid, struct sk_buff *skb, 855 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 856 { 857 struct mt76_tx_info tx_info = { 858 .skb = skb, 859 }; 860 struct mt76_dev *dev = phy->dev; 861 u16 idx = q->head; 862 int err; 863 864 if (q->queued == q->ndesc) 865 return -ENOSPC; 866 867 skb->prev = skb->next = NULL; 868 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 869 if (err < 0) 870 return err; 871 872 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); 873 if (err < 0) 874 return err; 875 876 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb, 877 mt76u_complete_tx, &q->entry[idx]); 878 879 q->head = (q->head + 1) % q->ndesc; 880 q->entry[idx].skb = tx_info.skb; 881 q->entry[idx].wcid = 0xffff; 882 q->queued++; 883 884 return idx; 885 } 886 887 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 888 { 889 struct urb *urb; 890 int err; 891 892 while (q->first != q->head) { 893 urb = q->entry[q->first].urb; 894 895 trace_submit_urb(dev, urb); 896 err = usb_submit_urb(urb, GFP_ATOMIC); 897 if (err < 0) { 898 if (err == -ENODEV) 899 set_bit(MT76_REMOVED, &dev->phy.state); 900 else 901 dev_err(dev->dev, "tx urb submit failed:%d\n", 902 err); 903 break; 904 } 905 q->first = (q->first + 1) % q->ndesc; 906 } 907 } 908 909 static void 910 mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid) 911 { 912 u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE; 913 914 switch (mt76_chip(dev)) { 915 case 0x7663: { 916 static const u8 lmac_queue_map[] = { 917 /* ac to lmac mapping */ 918 [IEEE80211_AC_BK] = 0, 919 [IEEE80211_AC_BE] = 1, 920 [IEEE80211_AC_VI] = 2, 921 [IEEE80211_AC_VO] = 4, 922 }; 923 924 q->hw_idx = lmac_queue_map[ac]; 925 q->ep = q->hw_idx + 1; 926 break; 927 } 928 case 0x7961: 929 case 0x7925: 930 q->hw_idx = mt76_ac_to_hwq(ac); 931 q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1; 932 break; 933 default: 934 q->hw_idx = mt76_ac_to_hwq(ac); 935 q->ep = q->hw_idx + 1; 936 break; 937 } 938 } 939 940 static int mt76u_alloc_tx(struct mt76_dev *dev) 941 { 942 int i; 943 944 for (i = 0; i <= MT_TXQ_PSD; i++) { 945 struct mt76_queue *q; 946 int j, err; 947 948 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 949 if (!q) 950 return -ENOMEM; 951 952 spin_lock_init(&q->lock); 953 mt76u_ac_to_hwq(dev, q, i); 954 dev->phy.q_tx[i] = q; 955 956 q->entry = devm_kcalloc(dev->dev, 957 MT_NUM_TX_ENTRIES, sizeof(*q->entry), 958 GFP_KERNEL); 959 if (!q->entry) 960 return -ENOMEM; 961 962 q->ndesc = MT_NUM_TX_ENTRIES; 963 for (j = 0; j < q->ndesc; j++) { 964 err = mt76u_urb_alloc(dev, &q->entry[j], 965 MT_TX_SG_MAX_SIZE); 966 if (err < 0) 967 return err; 968 } 969 } 970 return 0; 971 } 972 973 static void mt76u_free_tx(struct mt76_dev *dev) 974 { 975 int i; 976 977 mt76_worker_teardown(&dev->usb.status_worker); 978 979 for (i = 0; i <= MT_TXQ_PSD; i++) { 980 struct mt76_queue *q; 981 int j; 982 983 q = dev->phy.q_tx[i]; 984 if (!q) 985 continue; 986 987 for (j = 0; j < q->ndesc; j++) { 988 usb_free_urb(q->entry[j].urb); 989 q->entry[j].urb = NULL; 990 } 991 } 992 } 993 994 void mt76u_stop_tx(struct mt76_dev *dev) 995 { 996 int ret; 997 998 mt76_worker_disable(&dev->usb.status_worker); 999 1000 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), 1001 HZ / 5); 1002 if (!ret) { 1003 struct mt76_queue_entry entry; 1004 struct mt76_queue *q; 1005 int i, j; 1006 1007 dev_err(dev->dev, "timed out waiting for pending tx\n"); 1008 1009 for (i = 0; i <= MT_TXQ_PSD; i++) { 1010 q = dev->phy.q_tx[i]; 1011 if (!q) 1012 continue; 1013 1014 for (j = 0; j < q->ndesc; j++) 1015 usb_kill_urb(q->entry[j].urb); 1016 } 1017 1018 mt76_worker_disable(&dev->tx_worker); 1019 1020 /* On device removal we maight queue skb's, but mt76u_tx_kick() 1021 * will fail to submit urb, cleanup those skb's manually. 1022 */ 1023 for (i = 0; i <= MT_TXQ_PSD; i++) { 1024 q = dev->phy.q_tx[i]; 1025 if (!q) 1026 continue; 1027 1028 while (q->queued > 0) { 1029 entry = q->entry[q->tail]; 1030 q->entry[q->tail].done = false; 1031 mt76_queue_tx_complete(dev, q, &entry); 1032 } 1033 } 1034 1035 mt76_worker_enable(&dev->tx_worker); 1036 } 1037 1038 cancel_work_sync(&dev->usb.stat_work); 1039 clear_bit(MT76_READING_STATS, &dev->phy.state); 1040 1041 mt76_worker_enable(&dev->usb.status_worker); 1042 1043 mt76_tx_status_check(dev, true); 1044 } 1045 EXPORT_SYMBOL_GPL(mt76u_stop_tx); 1046 1047 void mt76u_queues_deinit(struct mt76_dev *dev) 1048 { 1049 mt76u_stop_rx(dev); 1050 mt76u_stop_tx(dev); 1051 1052 mt76u_free_rx(dev); 1053 mt76u_free_tx(dev); 1054 } 1055 EXPORT_SYMBOL_GPL(mt76u_queues_deinit); 1056 1057 int mt76u_alloc_queues(struct mt76_dev *dev) 1058 { 1059 int err; 1060 1061 err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN); 1062 if (err < 0) 1063 return err; 1064 1065 return mt76u_alloc_tx(dev); 1066 } 1067 EXPORT_SYMBOL_GPL(mt76u_alloc_queues); 1068 1069 static const struct mt76_queue_ops usb_queue_ops = { 1070 .tx_queue_skb = mt76u_tx_queue_skb, 1071 .kick = mt76u_tx_kick, 1072 }; 1073 1074 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1075 struct mt76_bus_ops *ops) 1076 { 1077 struct usb_device *udev = interface_to_usbdev(intf); 1078 struct mt76_usb *usb = &dev->usb; 1079 int err; 1080 1081 INIT_WORK(&usb->stat_work, mt76u_tx_status_data); 1082 1083 usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0)); 1084 if (usb->data_len < 32) 1085 usb->data_len = 32; 1086 1087 usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL); 1088 if (!usb->data) 1089 return -ENOMEM; 1090 1091 mutex_init(&usb->usb_ctrl_mtx); 1092 dev->bus = ops; 1093 dev->queue_ops = &usb_queue_ops; 1094 1095 dev_set_drvdata(&udev->dev, dev); 1096 1097 usb->sg_en = mt76u_check_sg(dev); 1098 1099 err = mt76u_set_endpoints(intf, usb); 1100 if (err < 0) 1101 return err; 1102 1103 err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker, 1104 "usb-rx"); 1105 if (err) 1106 return err; 1107 1108 err = mt76_worker_setup(dev->hw, &usb->status_worker, 1109 mt76u_status_worker, "usb-status"); 1110 if (err) 1111 return err; 1112 1113 sched_set_fifo_low(usb->rx_worker.task); 1114 sched_set_fifo_low(usb->status_worker.task); 1115 1116 return 0; 1117 } 1118 EXPORT_SYMBOL_GPL(__mt76u_init); 1119 1120 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf) 1121 { 1122 static struct mt76_bus_ops bus_ops = { 1123 .rr = mt76u_rr, 1124 .wr = mt76u_wr, 1125 .rmw = mt76u_rmw, 1126 .read_copy = mt76u_read_copy, 1127 .write_copy = mt76u_copy, 1128 .wr_rp = mt76u_wr_rp, 1129 .rd_rp = mt76u_rd_rp, 1130 .type = MT76_BUS_USB, 1131 }; 1132 1133 return __mt76u_init(dev, intf, &bus_ops); 1134 } 1135 EXPORT_SYMBOL_GPL(mt76u_init); 1136 1137 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>"); 1138 MODULE_DESCRIPTION("MediaTek MT76x USB helpers"); 1139 MODULE_LICENSE("Dual BSD/GPL"); 1140