1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/dma-mapping.h> 7 #if defined(__FreeBSD__) 8 #include <linux/cache.h> 9 #endif 10 #include "mt76.h" 11 #include "dma.h" 12 13 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) 14 15 #define Q_READ(_dev, _q, _field) ({ \ 16 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 17 u32 _val; \ 18 if ((_q)->flags & MT_QFLAG_WED) \ 19 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ 20 ((_q)->wed_regs + \ 21 _offset)); \ 22 else \ 23 _val = readl(&(_q)->regs->_field); \ 24 _val; \ 25 }) 26 27 #define Q_WRITE(_dev, _q, _field, _val) do { \ 28 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 29 if ((_q)->flags & MT_QFLAG_WED) \ 30 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ 31 ((_q)->wed_regs + _offset), \ 32 _val); \ 33 else \ 34 writel(_val, &(_q)->regs->_field); \ 35 } while (0) 36 37 #else 38 39 #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) 40 #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) 41 42 #endif 43 44 static struct mt76_txwi_cache * 45 mt76_alloc_txwi(struct mt76_dev *dev) 46 { 47 struct mt76_txwi_cache *t; 48 dma_addr_t addr; 49 u8 *txwi; 50 int size; 51 52 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 53 txwi = kzalloc(size, GFP_ATOMIC); 54 if (!txwi) 55 return NULL; 56 57 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, 58 DMA_TO_DEVICE); 59 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 60 t->dma_addr = addr; 61 62 return t; 63 } 64 65 static struct mt76_txwi_cache * 66 __mt76_get_txwi(struct mt76_dev *dev) 67 { 68 struct mt76_txwi_cache *t = NULL; 69 70 spin_lock(&dev->lock); 71 if (!list_empty(&dev->txwi_cache)) { 72 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 73 list); 74 list_del(&t->list); 75 } 76 spin_unlock(&dev->lock); 77 78 return t; 79 } 80 81 static struct mt76_txwi_cache * 82 mt76_get_txwi(struct mt76_dev *dev) 83 { 84 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 85 86 if (t) 87 return t; 88 89 return mt76_alloc_txwi(dev); 90 } 91 92 void 93 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 94 { 95 if (!t) 96 return; 97 98 spin_lock(&dev->lock); 99 list_add(&t->list, &dev->txwi_cache); 100 spin_unlock(&dev->lock); 101 } 102 EXPORT_SYMBOL_GPL(mt76_put_txwi); 103 104 static void 105 mt76_free_pending_txwi(struct mt76_dev *dev) 106 { 107 struct mt76_txwi_cache *t; 108 109 local_bh_disable(); 110 while ((t = __mt76_get_txwi(dev)) != NULL) { 111 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 112 DMA_TO_DEVICE); 113 kfree(mt76_get_txwi_ptr(dev, t)); 114 } 115 local_bh_enable(); 116 } 117 118 static void 119 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 120 { 121 Q_WRITE(dev, q, desc_base, q->desc_dma); 122 Q_WRITE(dev, q, ring_size, q->ndesc); 123 q->head = Q_READ(dev, q, dma_idx); 124 q->tail = q->head; 125 } 126 127 static void 128 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 129 { 130 int i; 131 132 if (!q || !q->ndesc) 133 return; 134 135 /* clear descriptors */ 136 for (i = 0; i < q->ndesc; i++) 137 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 138 139 Q_WRITE(dev, q, cpu_idx, 0); 140 Q_WRITE(dev, q, dma_idx, 0); 141 mt76_dma_sync_idx(dev, q); 142 } 143 144 static int 145 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 146 struct mt76_queue_buf *buf, int nbufs, u32 info, 147 struct sk_buff *skb, void *txwi) 148 { 149 struct mt76_queue_entry *entry; 150 struct mt76_desc *desc; 151 u32 ctrl; 152 int i, idx = -1; 153 154 if (txwi) { 155 q->entry[q->head].txwi = DMA_DUMMY_DATA; 156 q->entry[q->head].skip_buf0 = true; 157 } 158 159 for (i = 0; i < nbufs; i += 2, buf += 2) { 160 u32 buf0 = buf[0].addr, buf1 = 0; 161 162 idx = q->head; 163 q->head = (q->head + 1) % q->ndesc; 164 165 desc = &q->desc[idx]; 166 entry = &q->entry[idx]; 167 168 if (buf[0].skip_unmap) 169 entry->skip_buf0 = true; 170 entry->skip_buf1 = i == nbufs - 1; 171 172 entry->dma_addr[0] = buf[0].addr; 173 entry->dma_len[0] = buf[0].len; 174 175 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 176 if (i < nbufs - 1) { 177 entry->dma_addr[1] = buf[1].addr; 178 entry->dma_len[1] = buf[1].len; 179 buf1 = buf[1].addr; 180 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 181 if (buf[1].skip_unmap) 182 entry->skip_buf1 = true; 183 } 184 185 if (i == nbufs - 1) 186 ctrl |= MT_DMA_CTL_LAST_SEC0; 187 else if (i == nbufs - 2) 188 ctrl |= MT_DMA_CTL_LAST_SEC1; 189 190 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 191 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 192 WRITE_ONCE(desc->info, cpu_to_le32(info)); 193 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 194 195 q->queued++; 196 } 197 198 q->entry[idx].txwi = txwi; 199 q->entry[idx].skb = skb; 200 q->entry[idx].wcid = 0xffff; 201 202 return idx; 203 } 204 205 static void 206 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, 207 struct mt76_queue_entry *prev_e) 208 { 209 struct mt76_queue_entry *e = &q->entry[idx]; 210 211 if (!e->skip_buf0) 212 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], 213 DMA_TO_DEVICE); 214 215 if (!e->skip_buf1) 216 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], 217 DMA_TO_DEVICE); 218 219 if (e->txwi == DMA_DUMMY_DATA) 220 e->txwi = NULL; 221 222 if (e->skb == DMA_DUMMY_DATA) 223 e->skb = NULL; 224 225 *prev_e = *e; 226 memset(e, 0, sizeof(*e)); 227 } 228 229 static void 230 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 231 { 232 wmb(); 233 Q_WRITE(dev, q, cpu_idx, q->head); 234 } 235 236 static void 237 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) 238 { 239 struct mt76_queue_entry entry; 240 int last; 241 242 if (!q || !q->ndesc) 243 return; 244 245 spin_lock_bh(&q->cleanup_lock); 246 if (flush) 247 last = -1; 248 else 249 last = Q_READ(dev, q, dma_idx); 250 251 while (q->queued > 0 && q->tail != last) { 252 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 253 mt76_queue_tx_complete(dev, q, &entry); 254 255 if (entry.txwi) { 256 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) 257 mt76_put_txwi(dev, entry.txwi); 258 } 259 260 if (!flush && q->tail == last) 261 last = Q_READ(dev, q, dma_idx); 262 } 263 spin_unlock_bh(&q->cleanup_lock); 264 265 if (flush) { 266 spin_lock_bh(&q->lock); 267 mt76_dma_sync_idx(dev, q); 268 mt76_dma_kick_queue(dev, q); 269 spin_unlock_bh(&q->lock); 270 } 271 272 if (!q->queued) 273 wake_up(&dev->tx_wait); 274 } 275 276 static void * 277 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, 278 int *len, u32 *info, bool *more) 279 { 280 struct mt76_queue_entry *e = &q->entry[idx]; 281 struct mt76_desc *desc = &q->desc[idx]; 282 dma_addr_t buf_addr; 283 void *buf = e->buf; 284 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); 285 286 buf_addr = e->dma_addr[0]; 287 if (len) { 288 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); 289 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); 290 *more = !(ctl & MT_DMA_CTL_LAST_SEC0); 291 } 292 293 if (info) 294 *info = le32_to_cpu(desc->info); 295 296 dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE); 297 e->buf = NULL; 298 299 return buf; 300 } 301 302 static void * 303 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 304 int *len, u32 *info, bool *more) 305 { 306 int idx = q->tail; 307 308 *more = false; 309 if (!q->queued) 310 return NULL; 311 312 if (flush) 313 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); 314 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) 315 return NULL; 316 317 q->tail = (q->tail + 1) % q->ndesc; 318 q->queued--; 319 320 return mt76_dma_get_buf(dev, q, idx, len, info, more); 321 } 322 323 static int 324 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 325 struct sk_buff *skb, u32 tx_info) 326 { 327 struct mt76_queue_buf buf = {}; 328 dma_addr_t addr; 329 330 if (q->queued + 1 >= q->ndesc - 1) 331 goto error; 332 333 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, 334 DMA_TO_DEVICE); 335 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 336 goto error; 337 338 buf.addr = addr; 339 buf.len = skb->len; 340 341 spin_lock_bh(&q->lock); 342 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 343 mt76_dma_kick_queue(dev, q); 344 spin_unlock_bh(&q->lock); 345 346 return 0; 347 348 error: 349 dev_kfree_skb(skb); 350 return -ENOMEM; 351 } 352 353 static int 354 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 355 enum mt76_txq_id qid, struct sk_buff *skb, 356 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 357 { 358 struct ieee80211_tx_status status = { 359 .sta = sta, 360 }; 361 struct mt76_tx_info tx_info = { 362 .skb = skb, 363 }; 364 struct ieee80211_hw *hw; 365 int len, n = 0, ret = -ENOMEM; 366 struct mt76_txwi_cache *t; 367 struct sk_buff *iter; 368 dma_addr_t addr; 369 u8 *txwi; 370 371 t = mt76_get_txwi(dev); 372 if (!t) 373 goto free_skb; 374 375 txwi = mt76_get_txwi_ptr(dev, t); 376 377 skb->prev = skb->next = NULL; 378 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) 379 mt76_insert_hdr_pad(skb); 380 381 len = skb_headlen(skb); 382 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); 383 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 384 goto free; 385 386 tx_info.buf[n].addr = t->dma_addr; 387 tx_info.buf[n++].len = dev->drv->txwi_size; 388 tx_info.buf[n].addr = addr; 389 tx_info.buf[n++].len = len; 390 391 skb_walk_frags(skb, iter) { 392 if (n == ARRAY_SIZE(tx_info.buf)) 393 goto unmap; 394 395 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, 396 DMA_TO_DEVICE); 397 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 398 goto unmap; 399 400 tx_info.buf[n].addr = addr; 401 tx_info.buf[n++].len = iter->len; 402 } 403 tx_info.nbuf = n; 404 405 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 406 ret = -ENOMEM; 407 goto unmap; 408 } 409 410 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 411 DMA_TO_DEVICE); 412 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); 413 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 414 DMA_TO_DEVICE); 415 if (ret < 0) 416 goto unmap; 417 418 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, 419 tx_info.info, tx_info.skb, t); 420 421 unmap: 422 for (n--; n > 0; n--) 423 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, 424 tx_info.buf[n].len, DMA_TO_DEVICE); 425 426 free: 427 #ifdef CONFIG_NL80211_TESTMODE 428 /* fix tx_done accounting on queue overflow */ 429 if (mt76_is_testmode_skb(dev, skb, &hw)) { 430 struct mt76_phy *phy = hw->priv; 431 432 if (tx_info.skb == phy->test.tx_skb) 433 phy->test.tx_done--; 434 } 435 #endif 436 437 mt76_put_txwi(dev, t); 438 439 free_skb: 440 status.skb = tx_info.skb; 441 hw = mt76_tx_status_get_hw(dev, tx_info.skb); 442 ieee80211_tx_status_ext(hw, &status); 443 444 return ret; 445 } 446 447 static int 448 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) 449 { 450 dma_addr_t addr; 451 void *buf; 452 int frames = 0; 453 int len = SKB_WITH_OVERHEAD(q->buf_size); 454 int offset = q->buf_offset; 455 456 if (!q->ndesc) 457 return 0; 458 459 spin_lock_bh(&q->lock); 460 461 while (q->queued < q->ndesc - 1) { 462 struct mt76_queue_buf qbuf; 463 464 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); 465 if (!buf) 466 break; 467 468 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); 469 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { 470 skb_free_frag(buf); 471 break; 472 } 473 474 qbuf.addr = addr + offset; 475 qbuf.len = len - offset; 476 qbuf.skip_unmap = false; 477 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); 478 frames++; 479 } 480 481 if (frames) 482 mt76_dma_kick_queue(dev, q); 483 484 spin_unlock_bh(&q->lock); 485 486 return frames; 487 } 488 489 static int 490 mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q) 491 { 492 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 493 struct mtk_wed_device *wed = &dev->mmio.wed; 494 int ret, type, ring; 495 u8 flags = q->flags; 496 497 if (!mtk_wed_device_active(wed)) 498 q->flags &= ~MT_QFLAG_WED; 499 500 if (!(q->flags & MT_QFLAG_WED)) 501 return 0; 502 503 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); 504 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); 505 506 switch (type) { 507 case MT76_WED_Q_TX: 508 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs); 509 if (!ret) 510 q->wed_regs = wed->tx_ring[ring].reg_base; 511 break; 512 case MT76_WED_Q_TXFREE: 513 /* WED txfree queue needs ring to be initialized before setup */ 514 q->flags = 0; 515 mt76_dma_queue_reset(dev, q); 516 mt76_dma_rx_fill(dev, q); 517 q->flags = flags; 518 519 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); 520 if (!ret) 521 q->wed_regs = wed->txfree_ring.reg_base; 522 break; 523 default: 524 ret = -EINVAL; 525 } 526 527 return ret; 528 #else 529 return 0; 530 #endif 531 } 532 533 static int 534 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 535 int idx, int n_desc, int bufsize, 536 u32 ring_base) 537 { 538 int ret, size; 539 540 spin_lock_init(&q->lock); 541 spin_lock_init(&q->cleanup_lock); 542 543 #if defined(__linux__) 544 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; 545 #elif defined(__FreeBSD__) 546 q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE); 547 #endif 548 q->ndesc = n_desc; 549 q->buf_size = bufsize; 550 q->hw_idx = idx; 551 552 size = q->ndesc * sizeof(struct mt76_desc); 553 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); 554 if (!q->desc) 555 return -ENOMEM; 556 557 size = q->ndesc * sizeof(*q->entry); 558 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); 559 if (!q->entry) 560 return -ENOMEM; 561 562 ret = mt76_dma_wed_setup(dev, q); 563 if (ret) 564 return ret; 565 566 if (q->flags != MT_WED_Q_TXFREE) 567 mt76_dma_queue_reset(dev, q); 568 569 return 0; 570 } 571 572 static void 573 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) 574 { 575 struct page *page; 576 void *buf; 577 bool more; 578 579 if (!q->ndesc) 580 return; 581 582 spin_lock_bh(&q->lock); 583 do { 584 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); 585 if (!buf) 586 break; 587 588 skb_free_frag(buf); 589 } while (1); 590 spin_unlock_bh(&q->lock); 591 592 if (!q->rx_page.va) 593 return; 594 595 page = virt_to_page(q->rx_page.va); 596 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); 597 memset(&q->rx_page, 0, sizeof(q->rx_page)); 598 } 599 600 static void 601 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) 602 { 603 struct mt76_queue *q = &dev->q_rx[qid]; 604 int i; 605 606 if (!q->ndesc) 607 return; 608 609 for (i = 0; i < q->ndesc; i++) 610 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 611 612 mt76_dma_rx_cleanup(dev, q); 613 mt76_dma_sync_idx(dev, q); 614 mt76_dma_rx_fill(dev, q); 615 616 if (!q->rx_head) 617 return; 618 619 dev_kfree_skb(q->rx_head); 620 q->rx_head = NULL; 621 } 622 623 static void 624 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 625 int len, bool more) 626 { 627 struct sk_buff *skb = q->rx_head; 628 struct skb_shared_info *shinfo = skb_shinfo(skb); 629 int nr_frags = shinfo->nr_frags; 630 631 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { 632 struct page *page = virt_to_head_page(data); 633 #if defined(__linux__) 634 int offset = data - page_address(page) + q->buf_offset; 635 #elif defined(__FreeBSD__) 636 int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset; 637 #endif 638 639 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); 640 } else { 641 skb_free_frag(data); 642 } 643 644 if (more) 645 return; 646 647 q->rx_head = NULL; 648 if (nr_frags < ARRAY_SIZE(shinfo->frags)) 649 dev->drv->rx_skb(dev, q - dev->q_rx, skb); 650 else 651 dev_kfree_skb(skb); 652 } 653 654 static int 655 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) 656 { 657 int len, data_len, done = 0, dma_idx; 658 struct sk_buff *skb; 659 unsigned char *data; 660 bool check_ddone = false; 661 bool more; 662 663 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && 664 q->flags == MT_WED_Q_TXFREE) { 665 dma_idx = Q_READ(dev, q, dma_idx); 666 check_ddone = true; 667 } 668 669 while (done < budget) { 670 u32 info; 671 672 if (check_ddone) { 673 if (q->tail == dma_idx) 674 dma_idx = Q_READ(dev, q, dma_idx); 675 676 if (q->tail == dma_idx) 677 break; 678 } 679 680 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); 681 if (!data) 682 break; 683 684 if (q->rx_head) 685 data_len = q->buf_size; 686 else 687 data_len = SKB_WITH_OVERHEAD(q->buf_size); 688 689 if (data_len < len + q->buf_offset) { 690 dev_kfree_skb(q->rx_head); 691 q->rx_head = NULL; 692 goto free_frag; 693 } 694 695 if (q->rx_head) { 696 mt76_add_fragment(dev, q, data, len, more); 697 continue; 698 } 699 700 if (!more && dev->drv->rx_check && 701 !(dev->drv->rx_check(dev, data, len))) 702 goto free_frag; 703 704 skb = build_skb(data, q->buf_size); 705 if (!skb) 706 goto free_frag; 707 708 skb_reserve(skb, q->buf_offset); 709 710 *(u32 *)skb->cb = info; 711 712 __skb_put(skb, len); 713 done++; 714 715 if (more) { 716 q->rx_head = skb; 717 continue; 718 } 719 720 dev->drv->rx_skb(dev, q - dev->q_rx, skb); 721 continue; 722 723 free_frag: 724 skb_free_frag(data); 725 } 726 727 mt76_dma_rx_fill(dev, q); 728 return done; 729 } 730 731 int mt76_dma_rx_poll(struct napi_struct *napi, int budget) 732 { 733 struct mt76_dev *dev; 734 int qid, done = 0, cur; 735 736 dev = container_of(napi->dev, struct mt76_dev, napi_dev); 737 qid = napi - dev->napi; 738 739 rcu_read_lock(); 740 741 do { 742 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 743 mt76_rx_poll_complete(dev, qid, napi); 744 done += cur; 745 } while (cur && done < budget); 746 747 rcu_read_unlock(); 748 749 if (done < budget && napi_complete(napi)) 750 dev->drv->rx_poll_complete(dev, qid); 751 752 return done; 753 } 754 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); 755 756 static int 757 mt76_dma_init(struct mt76_dev *dev, 758 int (*poll)(struct napi_struct *napi, int budget)) 759 { 760 int i; 761 762 init_dummy_netdev(&dev->napi_dev); 763 init_dummy_netdev(&dev->tx_napi_dev); 764 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", 765 wiphy_name(dev->hw->wiphy)); 766 dev->napi_dev.threaded = 1; 767 768 mt76_for_each_q_rx(dev, i) { 769 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); 770 mt76_dma_rx_fill(dev, &dev->q_rx[i]); 771 napi_enable(&dev->napi[i]); 772 } 773 774 return 0; 775 } 776 777 static const struct mt76_queue_ops mt76_dma_ops = { 778 .init = mt76_dma_init, 779 .alloc = mt76_dma_alloc_queue, 780 .reset_q = mt76_dma_queue_reset, 781 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, 782 .tx_queue_skb = mt76_dma_tx_queue_skb, 783 .tx_cleanup = mt76_dma_tx_cleanup, 784 .rx_cleanup = mt76_dma_rx_cleanup, 785 .rx_reset = mt76_dma_rx_reset, 786 .kick = mt76_dma_kick_queue, 787 }; 788 789 void mt76_dma_attach(struct mt76_dev *dev) 790 { 791 dev->queue_ops = &mt76_dma_ops; 792 } 793 EXPORT_SYMBOL_GPL(mt76_dma_attach); 794 795 void mt76_dma_cleanup(struct mt76_dev *dev) 796 { 797 int i; 798 799 mt76_worker_disable(&dev->tx_worker); 800 netif_napi_del(&dev->tx_napi); 801 802 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 803 struct mt76_phy *phy = dev->phys[i]; 804 int j; 805 806 if (!phy) 807 continue; 808 809 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) 810 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); 811 } 812 813 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) 814 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); 815 816 mt76_for_each_q_rx(dev, i) { 817 netif_napi_del(&dev->napi[i]); 818 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); 819 } 820 821 mt76_free_pending_txwi(dev); 822 823 if (mtk_wed_device_active(&dev->mmio.wed)) 824 mtk_wed_device_detach(&dev->mmio.wed); 825 } 826 EXPORT_SYMBOL_GPL(mt76_dma_cleanup); 827