1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/dma-mapping.h> 7 #include "mt76.h" 8 #include "dma.h" 9 10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) 11 12 #define Q_READ(_dev, _q, _field) ({ \ 13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 14 u32 _val; \ 15 if ((_q)->flags & MT_QFLAG_WED) \ 16 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ 17 ((_q)->wed_regs + \ 18 _offset)); \ 19 else \ 20 _val = readl(&(_q)->regs->_field); \ 21 _val; \ 22 }) 23 24 #define Q_WRITE(_dev, _q, _field, _val) do { \ 25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 26 if ((_q)->flags & MT_QFLAG_WED) \ 27 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ 28 ((_q)->wed_regs + _offset), \ 29 _val); \ 30 else \ 31 writel(_val, &(_q)->regs->_field); \ 32 } while (0) 33 34 #else 35 36 #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) 37 #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) 38 39 #endif 40 41 static struct mt76_txwi_cache * 42 mt76_alloc_txwi(struct mt76_dev *dev) 43 { 44 struct mt76_txwi_cache *t; 45 dma_addr_t addr; 46 u8 *txwi; 47 int size; 48 49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 50 txwi = kzalloc(size, GFP_ATOMIC); 51 if (!txwi) 52 return NULL; 53 54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, 55 DMA_TO_DEVICE); 56 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { 57 kfree(txwi); 58 return NULL; 59 } 60 61 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 62 t->dma_addr = addr; 63 64 return t; 65 } 66 67 static struct mt76_txwi_cache * 68 mt76_alloc_rxwi(struct mt76_dev *dev) 69 { 70 struct mt76_txwi_cache *t; 71 72 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); 73 if (!t) 74 return NULL; 75 76 t->ptr = NULL; 77 return t; 78 } 79 80 static struct mt76_txwi_cache * 81 __mt76_get_txwi(struct mt76_dev *dev) 82 { 83 struct mt76_txwi_cache *t = NULL; 84 85 spin_lock(&dev->lock); 86 if (!list_empty(&dev->txwi_cache)) { 87 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 88 list); 89 list_del(&t->list); 90 } 91 spin_unlock(&dev->lock); 92 93 return t; 94 } 95 96 static struct mt76_txwi_cache * 97 __mt76_get_rxwi(struct mt76_dev *dev) 98 { 99 struct mt76_txwi_cache *t = NULL; 100 101 spin_lock_bh(&dev->wed_lock); 102 if (!list_empty(&dev->rxwi_cache)) { 103 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, 104 list); 105 list_del(&t->list); 106 } 107 spin_unlock_bh(&dev->wed_lock); 108 109 return t; 110 } 111 112 static struct mt76_txwi_cache * 113 mt76_get_txwi(struct mt76_dev *dev) 114 { 115 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 116 117 if (t) 118 return t; 119 120 return mt76_alloc_txwi(dev); 121 } 122 123 struct mt76_txwi_cache * 124 mt76_get_rxwi(struct mt76_dev *dev) 125 { 126 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); 127 128 if (t) 129 return t; 130 131 return mt76_alloc_rxwi(dev); 132 } 133 EXPORT_SYMBOL_GPL(mt76_get_rxwi); 134 135 void 136 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 137 { 138 if (!t) 139 return; 140 141 spin_lock(&dev->lock); 142 list_add(&t->list, &dev->txwi_cache); 143 spin_unlock(&dev->lock); 144 } 145 EXPORT_SYMBOL_GPL(mt76_put_txwi); 146 147 void 148 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 149 { 150 if (!t) 151 return; 152 153 spin_lock_bh(&dev->wed_lock); 154 list_add(&t->list, &dev->rxwi_cache); 155 spin_unlock_bh(&dev->wed_lock); 156 } 157 EXPORT_SYMBOL_GPL(mt76_put_rxwi); 158 159 static void 160 mt76_free_pending_txwi(struct mt76_dev *dev) 161 { 162 struct mt76_txwi_cache *t; 163 164 local_bh_disable(); 165 while ((t = __mt76_get_txwi(dev)) != NULL) { 166 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 167 DMA_TO_DEVICE); 168 kfree(mt76_get_txwi_ptr(dev, t)); 169 } 170 local_bh_enable(); 171 } 172 173 void 174 mt76_free_pending_rxwi(struct mt76_dev *dev) 175 { 176 struct mt76_txwi_cache *t; 177 178 local_bh_disable(); 179 while ((t = __mt76_get_rxwi(dev)) != NULL) { 180 if (t->ptr) 181 mt76_put_page_pool_buf(t->ptr, false); 182 kfree(t); 183 } 184 local_bh_enable(); 185 } 186 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); 187 188 static void 189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 190 { 191 Q_WRITE(dev, q, desc_base, q->desc_dma); 192 Q_WRITE(dev, q, ring_size, q->ndesc); 193 q->head = Q_READ(dev, q, dma_idx); 194 q->tail = q->head; 195 } 196 197 static void 198 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 199 { 200 int i; 201 202 if (!q || !q->ndesc) 203 return; 204 205 /* clear descriptors */ 206 for (i = 0; i < q->ndesc; i++) 207 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 208 209 Q_WRITE(dev, q, cpu_idx, 0); 210 Q_WRITE(dev, q, dma_idx, 0); 211 mt76_dma_sync_idx(dev, q); 212 } 213 214 static int 215 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, 216 struct mt76_queue_buf *buf, void *data) 217 { 218 struct mt76_desc *desc = &q->desc[q->head]; 219 struct mt76_queue_entry *entry = &q->entry[q->head]; 220 struct mt76_txwi_cache *txwi = NULL; 221 u32 buf1 = 0, ctrl; 222 int idx = q->head; 223 int rx_token; 224 225 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 226 227 if (mt76_queue_is_wed_rx(q)) { 228 txwi = mt76_get_rxwi(dev); 229 if (!txwi) 230 return -ENOMEM; 231 232 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); 233 if (rx_token < 0) { 234 mt76_put_rxwi(dev, txwi); 235 return -ENOMEM; 236 } 237 238 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 239 ctrl |= MT_DMA_CTL_TO_HOST; 240 } 241 242 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); 243 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 244 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 245 WRITE_ONCE(desc->info, 0); 246 247 entry->dma_addr[0] = buf->addr; 248 entry->dma_len[0] = buf->len; 249 entry->txwi = txwi; 250 entry->buf = data; 251 entry->wcid = 0xffff; 252 entry->skip_buf1 = true; 253 q->head = (q->head + 1) % q->ndesc; 254 q->queued++; 255 256 return idx; 257 } 258 259 static int 260 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 261 struct mt76_queue_buf *buf, int nbufs, u32 info, 262 struct sk_buff *skb, void *txwi) 263 { 264 struct mt76_queue_entry *entry; 265 struct mt76_desc *desc; 266 int i, idx = -1; 267 u32 ctrl, next; 268 269 if (txwi) { 270 q->entry[q->head].txwi = DMA_DUMMY_DATA; 271 q->entry[q->head].skip_buf0 = true; 272 } 273 274 for (i = 0; i < nbufs; i += 2, buf += 2) { 275 u32 buf0 = buf[0].addr, buf1 = 0; 276 277 idx = q->head; 278 next = (q->head + 1) % q->ndesc; 279 280 desc = &q->desc[idx]; 281 entry = &q->entry[idx]; 282 283 if (buf[0].skip_unmap) 284 entry->skip_buf0 = true; 285 entry->skip_buf1 = i == nbufs - 1; 286 287 entry->dma_addr[0] = buf[0].addr; 288 entry->dma_len[0] = buf[0].len; 289 290 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 291 if (i < nbufs - 1) { 292 entry->dma_addr[1] = buf[1].addr; 293 entry->dma_len[1] = buf[1].len; 294 buf1 = buf[1].addr; 295 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 296 if (buf[1].skip_unmap) 297 entry->skip_buf1 = true; 298 } 299 300 if (i == nbufs - 1) 301 ctrl |= MT_DMA_CTL_LAST_SEC0; 302 else if (i == nbufs - 2) 303 ctrl |= MT_DMA_CTL_LAST_SEC1; 304 305 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 306 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 307 WRITE_ONCE(desc->info, cpu_to_le32(info)); 308 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 309 310 q->head = next; 311 q->queued++; 312 } 313 314 q->entry[idx].txwi = txwi; 315 q->entry[idx].skb = skb; 316 q->entry[idx].wcid = 0xffff; 317 318 return idx; 319 } 320 321 static void 322 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, 323 struct mt76_queue_entry *prev_e) 324 { 325 struct mt76_queue_entry *e = &q->entry[idx]; 326 327 if (!e->skip_buf0) 328 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], 329 DMA_TO_DEVICE); 330 331 if (!e->skip_buf1) 332 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], 333 DMA_TO_DEVICE); 334 335 if (e->txwi == DMA_DUMMY_DATA) 336 e->txwi = NULL; 337 338 *prev_e = *e; 339 memset(e, 0, sizeof(*e)); 340 } 341 342 static void 343 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 344 { 345 wmb(); 346 Q_WRITE(dev, q, cpu_idx, q->head); 347 } 348 349 static void 350 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) 351 { 352 struct mt76_queue_entry entry; 353 int last; 354 355 if (!q || !q->ndesc) 356 return; 357 358 spin_lock_bh(&q->cleanup_lock); 359 if (flush) 360 last = -1; 361 else 362 last = Q_READ(dev, q, dma_idx); 363 364 while (q->queued > 0 && q->tail != last) { 365 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 366 mt76_queue_tx_complete(dev, q, &entry); 367 368 if (entry.txwi) { 369 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) 370 mt76_put_txwi(dev, entry.txwi); 371 } 372 373 if (!flush && q->tail == last) 374 last = Q_READ(dev, q, dma_idx); 375 } 376 spin_unlock_bh(&q->cleanup_lock); 377 378 if (flush) { 379 spin_lock_bh(&q->lock); 380 mt76_dma_sync_idx(dev, q); 381 mt76_dma_kick_queue(dev, q); 382 spin_unlock_bh(&q->lock); 383 } 384 385 if (!q->queued) 386 wake_up(&dev->tx_wait); 387 } 388 389 static void * 390 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, 391 int *len, u32 *info, bool *more, bool *drop) 392 { 393 struct mt76_queue_entry *e = &q->entry[idx]; 394 struct mt76_desc *desc = &q->desc[idx]; 395 void *buf; 396 397 if (len) { 398 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 399 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); 400 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); 401 } 402 403 if (info) 404 *info = le32_to_cpu(desc->info); 405 406 if (mt76_queue_is_wed_rx(q)) { 407 u32 buf1 = le32_to_cpu(desc->buf1); 408 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); 409 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); 410 411 if (!t) 412 return NULL; 413 414 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, 415 SKB_WITH_OVERHEAD(q->buf_size), 416 page_pool_get_dma_dir(q->page_pool)); 417 418 buf = t->ptr; 419 t->dma_addr = 0; 420 t->ptr = NULL; 421 422 mt76_put_rxwi(dev, t); 423 424 if (drop) { 425 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 426 427 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | 428 MT_DMA_CTL_DROP)); 429 430 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); 431 } 432 } else { 433 buf = e->buf; 434 e->buf = NULL; 435 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], 436 SKB_WITH_OVERHEAD(q->buf_size), 437 page_pool_get_dma_dir(q->page_pool)); 438 } 439 440 return buf; 441 } 442 443 static void * 444 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 445 int *len, u32 *info, bool *more, bool *drop) 446 { 447 int idx = q->tail; 448 449 *more = false; 450 if (!q->queued) 451 return NULL; 452 453 if (flush) 454 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); 455 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) 456 return NULL; 457 458 q->tail = (q->tail + 1) % q->ndesc; 459 q->queued--; 460 461 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); 462 } 463 464 static int 465 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 466 struct sk_buff *skb, u32 tx_info) 467 { 468 struct mt76_queue_buf buf = {}; 469 dma_addr_t addr; 470 471 if (test_bit(MT76_MCU_RESET, &dev->phy.state)) 472 goto error; 473 474 if (q->queued + 1 >= q->ndesc - 1) 475 goto error; 476 477 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, 478 DMA_TO_DEVICE); 479 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 480 goto error; 481 482 buf.addr = addr; 483 buf.len = skb->len; 484 485 spin_lock_bh(&q->lock); 486 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 487 mt76_dma_kick_queue(dev, q); 488 spin_unlock_bh(&q->lock); 489 490 return 0; 491 492 error: 493 dev_kfree_skb(skb); 494 return -ENOMEM; 495 } 496 497 static int 498 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 499 enum mt76_txq_id qid, struct sk_buff *skb, 500 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 501 { 502 struct ieee80211_tx_status status = { 503 .sta = sta, 504 }; 505 struct mt76_tx_info tx_info = { 506 .skb = skb, 507 }; 508 struct ieee80211_hw *hw; 509 int len, n = 0, ret = -ENOMEM; 510 struct mt76_txwi_cache *t; 511 struct sk_buff *iter; 512 dma_addr_t addr; 513 u8 *txwi; 514 515 if (test_bit(MT76_RESET, &dev->phy.state)) 516 goto free_skb; 517 518 t = mt76_get_txwi(dev); 519 if (!t) 520 goto free_skb; 521 522 txwi = mt76_get_txwi_ptr(dev, t); 523 524 skb->prev = skb->next = NULL; 525 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) 526 mt76_insert_hdr_pad(skb); 527 528 len = skb_headlen(skb); 529 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); 530 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 531 goto free; 532 533 tx_info.buf[n].addr = t->dma_addr; 534 tx_info.buf[n++].len = dev->drv->txwi_size; 535 tx_info.buf[n].addr = addr; 536 tx_info.buf[n++].len = len; 537 538 skb_walk_frags(skb, iter) { 539 if (n == ARRAY_SIZE(tx_info.buf)) 540 goto unmap; 541 542 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, 543 DMA_TO_DEVICE); 544 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 545 goto unmap; 546 547 tx_info.buf[n].addr = addr; 548 tx_info.buf[n++].len = iter->len; 549 } 550 tx_info.nbuf = n; 551 552 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 553 ret = -ENOMEM; 554 goto unmap; 555 } 556 557 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 558 DMA_TO_DEVICE); 559 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); 560 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 561 DMA_TO_DEVICE); 562 if (ret < 0) 563 goto unmap; 564 565 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, 566 tx_info.info, tx_info.skb, t); 567 568 unmap: 569 for (n--; n > 0; n--) 570 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, 571 tx_info.buf[n].len, DMA_TO_DEVICE); 572 573 free: 574 #ifdef CONFIG_NL80211_TESTMODE 575 /* fix tx_done accounting on queue overflow */ 576 if (mt76_is_testmode_skb(dev, skb, &hw)) { 577 struct mt76_phy *phy = hw->priv; 578 579 if (tx_info.skb == phy->test.tx_skb) 580 phy->test.tx_done--; 581 } 582 #endif 583 584 mt76_put_txwi(dev, t); 585 586 free_skb: 587 status.skb = tx_info.skb; 588 hw = mt76_tx_status_get_hw(dev, tx_info.skb); 589 spin_lock_bh(&dev->rx_lock); 590 ieee80211_tx_status_ext(hw, &status); 591 spin_unlock_bh(&dev->rx_lock); 592 593 return ret; 594 } 595 596 static int 597 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 598 bool allow_direct) 599 { 600 int len = SKB_WITH_OVERHEAD(q->buf_size); 601 int frames = 0; 602 603 if (!q->ndesc) 604 return 0; 605 606 spin_lock_bh(&q->lock); 607 608 while (q->queued < q->ndesc - 1) { 609 enum dma_data_direction dir; 610 struct mt76_queue_buf qbuf; 611 dma_addr_t addr; 612 int offset; 613 void *buf; 614 615 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); 616 if (!buf) 617 break; 618 619 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; 620 dir = page_pool_get_dma_dir(q->page_pool); 621 dma_sync_single_for_device(dev->dma_dev, addr, len, dir); 622 623 qbuf.addr = addr + q->buf_offset; 624 qbuf.len = len - q->buf_offset; 625 qbuf.skip_unmap = false; 626 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { 627 mt76_put_page_pool_buf(buf, allow_direct); 628 break; 629 } 630 frames++; 631 } 632 633 if (frames) 634 mt76_dma_kick_queue(dev, q); 635 636 spin_unlock_bh(&q->lock); 637 638 return frames; 639 } 640 641 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) 642 { 643 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 644 struct mtk_wed_device *wed = &dev->mmio.wed; 645 int ret, type, ring; 646 u8 flags; 647 648 if (!q || !q->ndesc) 649 return -EINVAL; 650 651 flags = q->flags; 652 if (!mtk_wed_device_active(wed)) 653 q->flags &= ~MT_QFLAG_WED; 654 655 if (!(q->flags & MT_QFLAG_WED)) 656 return 0; 657 658 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); 659 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); 660 661 switch (type) { 662 case MT76_WED_Q_TX: 663 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset); 664 if (!ret) 665 q->wed_regs = wed->tx_ring[ring].reg_base; 666 break; 667 case MT76_WED_Q_TXFREE: 668 /* WED txfree queue needs ring to be initialized before setup */ 669 q->flags = 0; 670 mt76_dma_queue_reset(dev, q); 671 mt76_dma_rx_fill(dev, q, false); 672 q->flags = flags; 673 674 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); 675 if (!ret) 676 q->wed_regs = wed->txfree_ring.reg_base; 677 break; 678 case MT76_WED_Q_RX: 679 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); 680 if (!ret) 681 q->wed_regs = wed->rx_ring[ring].reg_base; 682 break; 683 default: 684 ret = -EINVAL; 685 } 686 687 return ret; 688 #else 689 return 0; 690 #endif 691 } 692 EXPORT_SYMBOL_GPL(mt76_dma_wed_setup); 693 694 static int 695 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 696 int idx, int n_desc, int bufsize, 697 u32 ring_base) 698 { 699 int ret, size; 700 701 spin_lock_init(&q->lock); 702 spin_lock_init(&q->cleanup_lock); 703 704 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; 705 q->ndesc = n_desc; 706 q->buf_size = bufsize; 707 q->hw_idx = idx; 708 709 size = q->ndesc * sizeof(struct mt76_desc); 710 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); 711 if (!q->desc) 712 return -ENOMEM; 713 714 size = q->ndesc * sizeof(*q->entry); 715 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); 716 if (!q->entry) 717 return -ENOMEM; 718 719 ret = mt76_create_page_pool(dev, q); 720 if (ret) 721 return ret; 722 723 ret = mt76_dma_wed_setup(dev, q, false); 724 if (ret) 725 return ret; 726 727 if (q->flags != MT_WED_Q_TXFREE) 728 mt76_dma_queue_reset(dev, q); 729 730 return 0; 731 } 732 733 static void 734 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) 735 { 736 void *buf; 737 bool more; 738 739 if (!q->ndesc) 740 return; 741 742 do { 743 spin_lock_bh(&q->lock); 744 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); 745 spin_unlock_bh(&q->lock); 746 747 if (!buf) 748 break; 749 750 mt76_put_page_pool_buf(buf, false); 751 } while (1); 752 753 spin_lock_bh(&q->lock); 754 if (q->rx_head) { 755 dev_kfree_skb(q->rx_head); 756 q->rx_head = NULL; 757 } 758 759 spin_unlock_bh(&q->lock); 760 } 761 762 static void 763 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) 764 { 765 struct mt76_queue *q = &dev->q_rx[qid]; 766 int i; 767 768 if (!q->ndesc) 769 return; 770 771 for (i = 0; i < q->ndesc; i++) 772 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 773 774 mt76_dma_rx_cleanup(dev, q); 775 776 /* reset WED rx queues */ 777 mt76_dma_wed_setup(dev, q, true); 778 if (q->flags != MT_WED_Q_TXFREE) { 779 mt76_dma_sync_idx(dev, q); 780 mt76_dma_rx_fill(dev, q, false); 781 } 782 } 783 784 static void 785 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 786 int len, bool more, u32 info) 787 { 788 struct sk_buff *skb = q->rx_head; 789 struct skb_shared_info *shinfo = skb_shinfo(skb); 790 int nr_frags = shinfo->nr_frags; 791 792 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { 793 struct page *page = virt_to_head_page(data); 794 int offset = data - page_address(page) + q->buf_offset; 795 796 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); 797 } else { 798 mt76_put_page_pool_buf(data, true); 799 } 800 801 if (more) 802 return; 803 804 q->rx_head = NULL; 805 if (nr_frags < ARRAY_SIZE(shinfo->frags)) 806 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 807 else 808 dev_kfree_skb(skb); 809 } 810 811 static int 812 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) 813 { 814 int len, data_len, done = 0, dma_idx; 815 struct sk_buff *skb; 816 unsigned char *data; 817 bool check_ddone = false; 818 bool more; 819 820 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && 821 q->flags == MT_WED_Q_TXFREE) { 822 dma_idx = Q_READ(dev, q, dma_idx); 823 check_ddone = true; 824 } 825 826 while (done < budget) { 827 bool drop = false; 828 u32 info; 829 830 if (check_ddone) { 831 if (q->tail == dma_idx) 832 dma_idx = Q_READ(dev, q, dma_idx); 833 834 if (q->tail == dma_idx) 835 break; 836 } 837 838 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, 839 &drop); 840 if (!data) 841 break; 842 843 if (drop) 844 goto free_frag; 845 846 if (q->rx_head) 847 data_len = q->buf_size; 848 else 849 data_len = SKB_WITH_OVERHEAD(q->buf_size); 850 851 if (data_len < len + q->buf_offset) { 852 dev_kfree_skb(q->rx_head); 853 q->rx_head = NULL; 854 goto free_frag; 855 } 856 857 if (q->rx_head) { 858 mt76_add_fragment(dev, q, data, len, more, info); 859 continue; 860 } 861 862 if (!more && dev->drv->rx_check && 863 !(dev->drv->rx_check(dev, data, len))) 864 goto free_frag; 865 866 skb = napi_build_skb(data, q->buf_size); 867 if (!skb) 868 goto free_frag; 869 870 skb_reserve(skb, q->buf_offset); 871 skb_mark_for_recycle(skb); 872 873 *(u32 *)skb->cb = info; 874 875 __skb_put(skb, len); 876 done++; 877 878 if (more) { 879 q->rx_head = skb; 880 continue; 881 } 882 883 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 884 continue; 885 886 free_frag: 887 mt76_put_page_pool_buf(data, true); 888 } 889 890 mt76_dma_rx_fill(dev, q, true); 891 return done; 892 } 893 894 int mt76_dma_rx_poll(struct napi_struct *napi, int budget) 895 { 896 struct mt76_dev *dev; 897 int qid, done = 0, cur; 898 899 dev = container_of(napi->dev, struct mt76_dev, napi_dev); 900 qid = napi - dev->napi; 901 902 rcu_read_lock(); 903 904 do { 905 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 906 mt76_rx_poll_complete(dev, qid, napi); 907 done += cur; 908 } while (cur && done < budget); 909 910 rcu_read_unlock(); 911 912 if (done < budget && napi_complete(napi)) 913 dev->drv->rx_poll_complete(dev, qid); 914 915 return done; 916 } 917 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); 918 919 static int 920 mt76_dma_init(struct mt76_dev *dev, 921 int (*poll)(struct napi_struct *napi, int budget)) 922 { 923 int i; 924 925 init_dummy_netdev(&dev->napi_dev); 926 init_dummy_netdev(&dev->tx_napi_dev); 927 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", 928 wiphy_name(dev->hw->wiphy)); 929 dev->napi_dev.threaded = 1; 930 init_completion(&dev->mmio.wed_reset); 931 init_completion(&dev->mmio.wed_reset_complete); 932 933 mt76_for_each_q_rx(dev, i) { 934 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); 935 mt76_dma_rx_fill(dev, &dev->q_rx[i], false); 936 napi_enable(&dev->napi[i]); 937 } 938 939 return 0; 940 } 941 942 static const struct mt76_queue_ops mt76_dma_ops = { 943 .init = mt76_dma_init, 944 .alloc = mt76_dma_alloc_queue, 945 .reset_q = mt76_dma_queue_reset, 946 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, 947 .tx_queue_skb = mt76_dma_tx_queue_skb, 948 .tx_cleanup = mt76_dma_tx_cleanup, 949 .rx_cleanup = mt76_dma_rx_cleanup, 950 .rx_reset = mt76_dma_rx_reset, 951 .kick = mt76_dma_kick_queue, 952 }; 953 954 void mt76_dma_attach(struct mt76_dev *dev) 955 { 956 dev->queue_ops = &mt76_dma_ops; 957 } 958 EXPORT_SYMBOL_GPL(mt76_dma_attach); 959 960 void mt76_dma_cleanup(struct mt76_dev *dev) 961 { 962 int i; 963 964 mt76_worker_disable(&dev->tx_worker); 965 netif_napi_del(&dev->tx_napi); 966 967 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 968 struct mt76_phy *phy = dev->phys[i]; 969 int j; 970 971 if (!phy) 972 continue; 973 974 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) 975 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); 976 } 977 978 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) 979 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); 980 981 mt76_for_each_q_rx(dev, i) { 982 struct mt76_queue *q = &dev->q_rx[i]; 983 984 netif_napi_del(&dev->napi[i]); 985 mt76_dma_rx_cleanup(dev, q); 986 987 page_pool_destroy(q->page_pool); 988 } 989 990 mt76_free_pending_txwi(dev); 991 mt76_free_pending_rxwi(dev); 992 993 if (mtk_wed_device_active(&dev->mmio.wed)) 994 mtk_wed_device_detach(&dev->mmio.wed); 995 } 996 EXPORT_SYMBOL_GPL(mt76_dma_cleanup); 997