1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/dma-mapping.h> 7 #include "mt76.h" 8 #include "dma.h" 9 10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) 11 12 #define Q_READ(_dev, _q, _field) ({ \ 13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 14 u32 _val; \ 15 if ((_q)->flags & MT_QFLAG_WED) \ 16 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ 17 ((_q)->wed_regs + \ 18 _offset)); \ 19 else \ 20 _val = readl(&(_q)->regs->_field); \ 21 _val; \ 22 }) 23 24 #define Q_WRITE(_dev, _q, _field, _val) do { \ 25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 26 if ((_q)->flags & MT_QFLAG_WED) \ 27 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ 28 ((_q)->wed_regs + _offset), \ 29 _val); \ 30 else \ 31 writel(_val, &(_q)->regs->_field); \ 32 } while (0) 33 34 #else 35 36 #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) 37 #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) 38 39 #endif 40 41 static struct mt76_txwi_cache * 42 mt76_alloc_txwi(struct mt76_dev *dev) 43 { 44 struct mt76_txwi_cache *t; 45 dma_addr_t addr; 46 u8 *txwi; 47 int size; 48 49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 50 txwi = kzalloc(size, GFP_ATOMIC); 51 if (!txwi) 52 return NULL; 53 54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, 55 DMA_TO_DEVICE); 56 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 57 t->dma_addr = addr; 58 59 return t; 60 } 61 62 static struct mt76_txwi_cache * 63 mt76_alloc_rxwi(struct mt76_dev *dev) 64 { 65 struct mt76_txwi_cache *t; 66 67 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); 68 if (!t) 69 return NULL; 70 71 t->ptr = NULL; 72 return t; 73 } 74 75 static struct mt76_txwi_cache * 76 __mt76_get_txwi(struct mt76_dev *dev) 77 { 78 struct mt76_txwi_cache *t = NULL; 79 80 spin_lock(&dev->lock); 81 if (!list_empty(&dev->txwi_cache)) { 82 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 83 list); 84 list_del(&t->list); 85 } 86 spin_unlock(&dev->lock); 87 88 return t; 89 } 90 91 static struct mt76_txwi_cache * 92 __mt76_get_rxwi(struct mt76_dev *dev) 93 { 94 struct mt76_txwi_cache *t = NULL; 95 96 spin_lock(&dev->wed_lock); 97 if (!list_empty(&dev->rxwi_cache)) { 98 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, 99 list); 100 list_del(&t->list); 101 } 102 spin_unlock(&dev->wed_lock); 103 104 return t; 105 } 106 107 static struct mt76_txwi_cache * 108 mt76_get_txwi(struct mt76_dev *dev) 109 { 110 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 111 112 if (t) 113 return t; 114 115 return mt76_alloc_txwi(dev); 116 } 117 118 struct mt76_txwi_cache * 119 mt76_get_rxwi(struct mt76_dev *dev) 120 { 121 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); 122 123 if (t) 124 return t; 125 126 return mt76_alloc_rxwi(dev); 127 } 128 EXPORT_SYMBOL_GPL(mt76_get_rxwi); 129 130 void 131 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 132 { 133 if (!t) 134 return; 135 136 spin_lock(&dev->lock); 137 list_add(&t->list, &dev->txwi_cache); 138 spin_unlock(&dev->lock); 139 } 140 EXPORT_SYMBOL_GPL(mt76_put_txwi); 141 142 void 143 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 144 { 145 if (!t) 146 return; 147 148 spin_lock(&dev->wed_lock); 149 list_add(&t->list, &dev->rxwi_cache); 150 spin_unlock(&dev->wed_lock); 151 } 152 EXPORT_SYMBOL_GPL(mt76_put_rxwi); 153 154 static void 155 mt76_free_pending_txwi(struct mt76_dev *dev) 156 { 157 struct mt76_txwi_cache *t; 158 159 local_bh_disable(); 160 while ((t = __mt76_get_txwi(dev)) != NULL) { 161 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 162 DMA_TO_DEVICE); 163 kfree(mt76_get_txwi_ptr(dev, t)); 164 } 165 local_bh_enable(); 166 } 167 168 static void 169 mt76_free_pending_rxwi(struct mt76_dev *dev) 170 { 171 struct mt76_txwi_cache *t; 172 173 local_bh_disable(); 174 while ((t = __mt76_get_rxwi(dev)) != NULL) { 175 if (t->ptr) 176 skb_free_frag(t->ptr); 177 kfree(t); 178 } 179 local_bh_enable(); 180 } 181 182 static void 183 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 184 { 185 Q_WRITE(dev, q, desc_base, q->desc_dma); 186 Q_WRITE(dev, q, ring_size, q->ndesc); 187 q->head = Q_READ(dev, q, dma_idx); 188 q->tail = q->head; 189 } 190 191 static void 192 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 193 { 194 int i; 195 196 if (!q || !q->ndesc) 197 return; 198 199 /* clear descriptors */ 200 for (i = 0; i < q->ndesc; i++) 201 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 202 203 Q_WRITE(dev, q, cpu_idx, 0); 204 Q_WRITE(dev, q, dma_idx, 0); 205 mt76_dma_sync_idx(dev, q); 206 } 207 208 static int 209 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 210 struct mt76_queue_buf *buf, int nbufs, u32 info, 211 struct sk_buff *skb, void *txwi) 212 { 213 struct mt76_queue_entry *entry; 214 struct mt76_desc *desc; 215 u32 ctrl; 216 int i, idx = -1; 217 218 for (i = 0; i < nbufs; i += 2, buf += 2) { 219 u32 buf0 = buf[0].addr, buf1 = 0; 220 221 idx = q->head; 222 q->head = (q->head + 1) % q->ndesc; 223 224 desc = &q->desc[idx]; 225 entry = &q->entry[idx]; 226 227 if ((q->flags & MT_QFLAG_WED) && 228 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 229 struct mt76_txwi_cache *t = txwi; 230 int rx_token; 231 232 if (!t) 233 return -ENOMEM; 234 235 rx_token = mt76_rx_token_consume(dev, (void *)skb, t, 236 buf[0].addr); 237 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 238 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) | 239 MT_DMA_CTL_TO_HOST; 240 } else { 241 if (txwi) { 242 q->entry[q->head].txwi = DMA_DUMMY_DATA; 243 q->entry[q->head].skip_buf0 = true; 244 } 245 246 if (buf[0].skip_unmap) 247 entry->skip_buf0 = true; 248 entry->skip_buf1 = i == nbufs - 1; 249 250 entry->dma_addr[0] = buf[0].addr; 251 entry->dma_len[0] = buf[0].len; 252 253 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 254 if (i < nbufs - 1) { 255 entry->dma_addr[1] = buf[1].addr; 256 entry->dma_len[1] = buf[1].len; 257 buf1 = buf[1].addr; 258 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 259 if (buf[1].skip_unmap) 260 entry->skip_buf1 = true; 261 } 262 263 if (i == nbufs - 1) 264 ctrl |= MT_DMA_CTL_LAST_SEC0; 265 else if (i == nbufs - 2) 266 ctrl |= MT_DMA_CTL_LAST_SEC1; 267 } 268 269 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 270 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 271 WRITE_ONCE(desc->info, cpu_to_le32(info)); 272 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 273 274 q->queued++; 275 } 276 277 q->entry[idx].txwi = txwi; 278 q->entry[idx].skb = skb; 279 q->entry[idx].wcid = 0xffff; 280 281 return idx; 282 } 283 284 static void 285 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, 286 struct mt76_queue_entry *prev_e) 287 { 288 struct mt76_queue_entry *e = &q->entry[idx]; 289 290 if (!e->skip_buf0) 291 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], 292 DMA_TO_DEVICE); 293 294 if (!e->skip_buf1) 295 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], 296 DMA_TO_DEVICE); 297 298 if (e->txwi == DMA_DUMMY_DATA) 299 e->txwi = NULL; 300 301 if (e->skb == DMA_DUMMY_DATA) 302 e->skb = NULL; 303 304 *prev_e = *e; 305 memset(e, 0, sizeof(*e)); 306 } 307 308 static void 309 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 310 { 311 wmb(); 312 Q_WRITE(dev, q, cpu_idx, q->head); 313 } 314 315 static void 316 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) 317 { 318 struct mt76_queue_entry entry; 319 int last; 320 321 if (!q || !q->ndesc) 322 return; 323 324 spin_lock_bh(&q->cleanup_lock); 325 if (flush) 326 last = -1; 327 else 328 last = Q_READ(dev, q, dma_idx); 329 330 while (q->queued > 0 && q->tail != last) { 331 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 332 mt76_queue_tx_complete(dev, q, &entry); 333 334 if (entry.txwi) { 335 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) 336 mt76_put_txwi(dev, entry.txwi); 337 } 338 339 if (!flush && q->tail == last) 340 last = Q_READ(dev, q, dma_idx); 341 } 342 spin_unlock_bh(&q->cleanup_lock); 343 344 if (flush) { 345 spin_lock_bh(&q->lock); 346 mt76_dma_sync_idx(dev, q); 347 mt76_dma_kick_queue(dev, q); 348 spin_unlock_bh(&q->lock); 349 } 350 351 if (!q->queued) 352 wake_up(&dev->tx_wait); 353 } 354 355 static void * 356 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, 357 int *len, u32 *info, bool *more, bool *drop) 358 { 359 struct mt76_queue_entry *e = &q->entry[idx]; 360 struct mt76_desc *desc = &q->desc[idx]; 361 void *buf; 362 363 if (len) { 364 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 365 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); 366 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); 367 } 368 369 if (info) 370 *info = le32_to_cpu(desc->info); 371 372 if ((q->flags & MT_QFLAG_WED) && 373 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 374 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, 375 le32_to_cpu(desc->buf1)); 376 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); 377 378 if (!t) 379 return NULL; 380 381 dma_unmap_single(dev->dma_dev, t->dma_addr, 382 SKB_WITH_OVERHEAD(q->buf_size), 383 DMA_FROM_DEVICE); 384 385 buf = t->ptr; 386 t->dma_addr = 0; 387 t->ptr = NULL; 388 389 mt76_put_rxwi(dev, t); 390 391 if (drop) { 392 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 393 394 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | 395 MT_DMA_CTL_DROP)); 396 } 397 } else { 398 buf = e->buf; 399 e->buf = NULL; 400 dma_unmap_single(dev->dma_dev, e->dma_addr[0], 401 SKB_WITH_OVERHEAD(q->buf_size), 402 DMA_FROM_DEVICE); 403 } 404 405 return buf; 406 } 407 408 static void * 409 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 410 int *len, u32 *info, bool *more, bool *drop) 411 { 412 int idx = q->tail; 413 414 *more = false; 415 if (!q->queued) 416 return NULL; 417 418 if (flush) 419 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); 420 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) 421 return NULL; 422 423 q->tail = (q->tail + 1) % q->ndesc; 424 q->queued--; 425 426 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); 427 } 428 429 static int 430 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 431 struct sk_buff *skb, u32 tx_info) 432 { 433 struct mt76_queue_buf buf = {}; 434 dma_addr_t addr; 435 436 if (q->queued + 1 >= q->ndesc - 1) 437 goto error; 438 439 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, 440 DMA_TO_DEVICE); 441 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 442 goto error; 443 444 buf.addr = addr; 445 buf.len = skb->len; 446 447 spin_lock_bh(&q->lock); 448 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 449 mt76_dma_kick_queue(dev, q); 450 spin_unlock_bh(&q->lock); 451 452 return 0; 453 454 error: 455 dev_kfree_skb(skb); 456 return -ENOMEM; 457 } 458 459 static int 460 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 461 enum mt76_txq_id qid, struct sk_buff *skb, 462 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 463 { 464 struct ieee80211_tx_status status = { 465 .sta = sta, 466 }; 467 struct mt76_tx_info tx_info = { 468 .skb = skb, 469 }; 470 struct ieee80211_hw *hw; 471 int len, n = 0, ret = -ENOMEM; 472 struct mt76_txwi_cache *t; 473 struct sk_buff *iter; 474 dma_addr_t addr; 475 u8 *txwi; 476 477 t = mt76_get_txwi(dev); 478 if (!t) 479 goto free_skb; 480 481 txwi = mt76_get_txwi_ptr(dev, t); 482 483 skb->prev = skb->next = NULL; 484 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) 485 mt76_insert_hdr_pad(skb); 486 487 len = skb_headlen(skb); 488 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); 489 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 490 goto free; 491 492 tx_info.buf[n].addr = t->dma_addr; 493 tx_info.buf[n++].len = dev->drv->txwi_size; 494 tx_info.buf[n].addr = addr; 495 tx_info.buf[n++].len = len; 496 497 skb_walk_frags(skb, iter) { 498 if (n == ARRAY_SIZE(tx_info.buf)) 499 goto unmap; 500 501 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, 502 DMA_TO_DEVICE); 503 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 504 goto unmap; 505 506 tx_info.buf[n].addr = addr; 507 tx_info.buf[n++].len = iter->len; 508 } 509 tx_info.nbuf = n; 510 511 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 512 ret = -ENOMEM; 513 goto unmap; 514 } 515 516 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 517 DMA_TO_DEVICE); 518 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); 519 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 520 DMA_TO_DEVICE); 521 if (ret < 0) 522 goto unmap; 523 524 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, 525 tx_info.info, tx_info.skb, t); 526 527 unmap: 528 for (n--; n > 0; n--) 529 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, 530 tx_info.buf[n].len, DMA_TO_DEVICE); 531 532 free: 533 #ifdef CONFIG_NL80211_TESTMODE 534 /* fix tx_done accounting on queue overflow */ 535 if (mt76_is_testmode_skb(dev, skb, &hw)) { 536 struct mt76_phy *phy = hw->priv; 537 538 if (tx_info.skb == phy->test.tx_skb) 539 phy->test.tx_done--; 540 } 541 #endif 542 543 mt76_put_txwi(dev, t); 544 545 free_skb: 546 status.skb = tx_info.skb; 547 hw = mt76_tx_status_get_hw(dev, tx_info.skb); 548 ieee80211_tx_status_ext(hw, &status); 549 550 return ret; 551 } 552 553 static struct page_frag_cache * 554 mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q) 555 { 556 struct page_frag_cache *rx_page = &q->rx_page; 557 558 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 559 if ((q->flags & MT_QFLAG_WED) && 560 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) 561 rx_page = &dev->mmio.wed.rx_buf_ring.rx_page; 562 #endif 563 return rx_page; 564 } 565 566 static int 567 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) 568 { 569 struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q); 570 int len = SKB_WITH_OVERHEAD(q->buf_size); 571 int frames = 0, offset = q->buf_offset; 572 dma_addr_t addr; 573 574 if (!q->ndesc) 575 return 0; 576 577 spin_lock_bh(&q->lock); 578 579 while (q->queued < q->ndesc - 1) { 580 struct mt76_txwi_cache *t = NULL; 581 struct mt76_queue_buf qbuf; 582 void *buf = NULL; 583 584 if ((q->flags & MT_QFLAG_WED) && 585 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 586 t = mt76_get_rxwi(dev); 587 if (!t) 588 break; 589 } 590 591 buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC); 592 if (!buf) 593 break; 594 595 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE); 596 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { 597 skb_free_frag(buf); 598 break; 599 } 600 601 qbuf.addr = addr + offset; 602 qbuf.len = len - offset; 603 qbuf.skip_unmap = false; 604 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t); 605 frames++; 606 } 607 608 if (frames) 609 mt76_dma_kick_queue(dev, q); 610 611 spin_unlock_bh(&q->lock); 612 613 return frames; 614 } 615 616 static int 617 mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q) 618 { 619 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 620 struct mtk_wed_device *wed = &dev->mmio.wed; 621 int ret, type, ring; 622 u8 flags = q->flags; 623 624 if (!mtk_wed_device_active(wed)) 625 q->flags &= ~MT_QFLAG_WED; 626 627 if (!(q->flags & MT_QFLAG_WED)) 628 return 0; 629 630 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); 631 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); 632 633 switch (type) { 634 case MT76_WED_Q_TX: 635 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, false); 636 if (!ret) 637 q->wed_regs = wed->tx_ring[ring].reg_base; 638 break; 639 case MT76_WED_Q_TXFREE: 640 /* WED txfree queue needs ring to be initialized before setup */ 641 q->flags = 0; 642 mt76_dma_queue_reset(dev, q); 643 mt76_dma_rx_fill(dev, q); 644 q->flags = flags; 645 646 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); 647 if (!ret) 648 q->wed_regs = wed->txfree_ring.reg_base; 649 break; 650 case MT76_WED_Q_RX: 651 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, false); 652 if (!ret) 653 q->wed_regs = wed->rx_ring[ring].reg_base; 654 break; 655 default: 656 ret = -EINVAL; 657 } 658 659 return ret; 660 #else 661 return 0; 662 #endif 663 } 664 665 static int 666 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 667 int idx, int n_desc, int bufsize, 668 u32 ring_base) 669 { 670 int ret, size; 671 672 spin_lock_init(&q->lock); 673 spin_lock_init(&q->cleanup_lock); 674 675 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; 676 q->ndesc = n_desc; 677 q->buf_size = bufsize; 678 q->hw_idx = idx; 679 680 size = q->ndesc * sizeof(struct mt76_desc); 681 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); 682 if (!q->desc) 683 return -ENOMEM; 684 685 size = q->ndesc * sizeof(*q->entry); 686 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); 687 if (!q->entry) 688 return -ENOMEM; 689 690 ret = mt76_dma_wed_setup(dev, q); 691 if (ret) 692 return ret; 693 694 if (q->flags != MT_WED_Q_TXFREE) 695 mt76_dma_queue_reset(dev, q); 696 697 return 0; 698 } 699 700 static void 701 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) 702 { 703 struct page *page; 704 void *buf; 705 bool more; 706 707 if (!q->ndesc) 708 return; 709 710 spin_lock_bh(&q->lock); 711 do { 712 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); 713 if (!buf) 714 break; 715 716 skb_free_frag(buf); 717 } while (1); 718 spin_unlock_bh(&q->lock); 719 720 if (!q->rx_page.va) 721 return; 722 723 page = virt_to_page(q->rx_page.va); 724 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); 725 memset(&q->rx_page, 0, sizeof(q->rx_page)); 726 } 727 728 static void 729 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) 730 { 731 struct mt76_queue *q = &dev->q_rx[qid]; 732 int i; 733 734 if (!q->ndesc) 735 return; 736 737 for (i = 0; i < q->ndesc; i++) 738 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 739 740 mt76_dma_rx_cleanup(dev, q); 741 mt76_dma_sync_idx(dev, q); 742 mt76_dma_rx_fill(dev, q); 743 744 if (!q->rx_head) 745 return; 746 747 dev_kfree_skb(q->rx_head); 748 q->rx_head = NULL; 749 } 750 751 static void 752 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 753 int len, bool more, u32 info) 754 { 755 struct sk_buff *skb = q->rx_head; 756 struct skb_shared_info *shinfo = skb_shinfo(skb); 757 int nr_frags = shinfo->nr_frags; 758 759 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { 760 struct page *page = virt_to_head_page(data); 761 int offset = data - page_address(page) + q->buf_offset; 762 763 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); 764 } else { 765 skb_free_frag(data); 766 } 767 768 if (more) 769 return; 770 771 q->rx_head = NULL; 772 if (nr_frags < ARRAY_SIZE(shinfo->frags)) 773 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 774 else 775 dev_kfree_skb(skb); 776 } 777 778 static int 779 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) 780 { 781 int len, data_len, done = 0, dma_idx; 782 struct sk_buff *skb; 783 unsigned char *data; 784 bool check_ddone = false; 785 bool more; 786 787 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && 788 q->flags == MT_WED_Q_TXFREE) { 789 dma_idx = Q_READ(dev, q, dma_idx); 790 check_ddone = true; 791 } 792 793 while (done < budget) { 794 bool drop = false; 795 u32 info; 796 797 if (check_ddone) { 798 if (q->tail == dma_idx) 799 dma_idx = Q_READ(dev, q, dma_idx); 800 801 if (q->tail == dma_idx) 802 break; 803 } 804 805 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, 806 &drop); 807 if (!data) 808 break; 809 810 if (drop) 811 goto free_frag; 812 813 if (q->rx_head) 814 data_len = q->buf_size; 815 else 816 data_len = SKB_WITH_OVERHEAD(q->buf_size); 817 818 if (data_len < len + q->buf_offset) { 819 dev_kfree_skb(q->rx_head); 820 q->rx_head = NULL; 821 goto free_frag; 822 } 823 824 if (q->rx_head) { 825 mt76_add_fragment(dev, q, data, len, more, info); 826 continue; 827 } 828 829 if (!more && dev->drv->rx_check && 830 !(dev->drv->rx_check(dev, data, len))) 831 goto free_frag; 832 833 skb = build_skb(data, q->buf_size); 834 if (!skb) 835 goto free_frag; 836 837 skb_reserve(skb, q->buf_offset); 838 839 *(u32 *)skb->cb = info; 840 841 __skb_put(skb, len); 842 done++; 843 844 if (more) { 845 q->rx_head = skb; 846 continue; 847 } 848 849 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 850 continue; 851 852 free_frag: 853 skb_free_frag(data); 854 } 855 856 mt76_dma_rx_fill(dev, q); 857 return done; 858 } 859 860 int mt76_dma_rx_poll(struct napi_struct *napi, int budget) 861 { 862 struct mt76_dev *dev; 863 int qid, done = 0, cur; 864 865 dev = container_of(napi->dev, struct mt76_dev, napi_dev); 866 qid = napi - dev->napi; 867 868 rcu_read_lock(); 869 870 do { 871 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 872 mt76_rx_poll_complete(dev, qid, napi); 873 done += cur; 874 } while (cur && done < budget); 875 876 rcu_read_unlock(); 877 878 if (done < budget && napi_complete(napi)) 879 dev->drv->rx_poll_complete(dev, qid); 880 881 return done; 882 } 883 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); 884 885 static int 886 mt76_dma_init(struct mt76_dev *dev, 887 int (*poll)(struct napi_struct *napi, int budget)) 888 { 889 int i; 890 891 init_dummy_netdev(&dev->napi_dev); 892 init_dummy_netdev(&dev->tx_napi_dev); 893 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", 894 wiphy_name(dev->hw->wiphy)); 895 dev->napi_dev.threaded = 1; 896 897 mt76_for_each_q_rx(dev, i) { 898 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); 899 mt76_dma_rx_fill(dev, &dev->q_rx[i]); 900 napi_enable(&dev->napi[i]); 901 } 902 903 return 0; 904 } 905 906 static const struct mt76_queue_ops mt76_dma_ops = { 907 .init = mt76_dma_init, 908 .alloc = mt76_dma_alloc_queue, 909 .reset_q = mt76_dma_queue_reset, 910 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, 911 .tx_queue_skb = mt76_dma_tx_queue_skb, 912 .tx_cleanup = mt76_dma_tx_cleanup, 913 .rx_cleanup = mt76_dma_rx_cleanup, 914 .rx_reset = mt76_dma_rx_reset, 915 .kick = mt76_dma_kick_queue, 916 }; 917 918 void mt76_dma_attach(struct mt76_dev *dev) 919 { 920 dev->queue_ops = &mt76_dma_ops; 921 } 922 EXPORT_SYMBOL_GPL(mt76_dma_attach); 923 924 void mt76_dma_cleanup(struct mt76_dev *dev) 925 { 926 int i; 927 928 mt76_worker_disable(&dev->tx_worker); 929 netif_napi_del(&dev->tx_napi); 930 931 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 932 struct mt76_phy *phy = dev->phys[i]; 933 int j; 934 935 if (!phy) 936 continue; 937 938 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) 939 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); 940 } 941 942 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) 943 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); 944 945 mt76_for_each_q_rx(dev, i) { 946 struct mt76_queue *q = &dev->q_rx[i]; 947 948 netif_napi_del(&dev->napi[i]); 949 if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags)) 950 mt76_dma_rx_cleanup(dev, q); 951 } 952 953 mt76_free_pending_txwi(dev); 954 mt76_free_pending_rxwi(dev); 955 956 if (mtk_wed_device_active(&dev->mmio.wed)) 957 mtk_wed_device_detach(&dev->mmio.wed); 958 } 959 EXPORT_SYMBOL_GPL(mt76_dma_cleanup); 960