1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/dma-mapping.h> 7 #if defined(__FreeBSD__) 8 #include <linux/cache.h> 9 #include <net/page_pool.h> 10 #endif 11 #include "mt76.h" 12 #include "dma.h" 13 14 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) 15 16 #define Q_READ(_q, _field) ({ \ 17 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 18 u32 _val; \ 19 if ((_q)->flags & MT_QFLAG_WED) \ 20 _val = mtk_wed_device_reg_read((_q)->wed, \ 21 ((_q)->wed_regs + \ 22 _offset)); \ 23 else \ 24 _val = readl(&(_q)->regs->_field); \ 25 _val; \ 26 }) 27 28 #define Q_WRITE(_q, _field, _val) do { \ 29 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 30 if ((_q)->flags & MT_QFLAG_WED) \ 31 mtk_wed_device_reg_write((_q)->wed, \ 32 ((_q)->wed_regs + _offset), \ 33 _val); \ 34 else \ 35 writel(_val, &(_q)->regs->_field); \ 36 } while (0) 37 38 #else 39 40 #define Q_READ(_q, _field) readl(&(_q)->regs->_field) 41 #define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field) 42 43 #endif 44 45 static struct mt76_txwi_cache * 46 mt76_alloc_txwi(struct mt76_dev *dev) 47 { 48 struct mt76_txwi_cache *t; 49 dma_addr_t addr; 50 u8 *txwi; 51 int size; 52 53 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 54 txwi = kzalloc(size, GFP_ATOMIC); 55 if (!txwi) 56 return NULL; 57 58 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, 59 DMA_TO_DEVICE); 60 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) { 61 kfree(txwi); 62 return NULL; 63 } 64 65 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 66 t->dma_addr = addr; 67 68 return t; 69 } 70 71 static struct mt76_txwi_cache * 72 mt76_alloc_rxwi(struct mt76_dev *dev) 73 { 74 struct mt76_txwi_cache *t; 75 76 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); 77 if (!t) 78 return NULL; 79 80 t->ptr = NULL; 81 return t; 82 } 83 84 static struct mt76_txwi_cache * 85 __mt76_get_txwi(struct mt76_dev *dev) 86 { 87 struct mt76_txwi_cache *t = NULL; 88 89 spin_lock(&dev->lock); 90 if (!list_empty(&dev->txwi_cache)) { 91 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 92 list); 93 list_del(&t->list); 94 } 95 spin_unlock(&dev->lock); 96 97 return t; 98 } 99 100 static struct mt76_txwi_cache * 101 __mt76_get_rxwi(struct mt76_dev *dev) 102 { 103 struct mt76_txwi_cache *t = NULL; 104 105 spin_lock_bh(&dev->wed_lock); 106 if (!list_empty(&dev->rxwi_cache)) { 107 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, 108 list); 109 list_del(&t->list); 110 } 111 spin_unlock_bh(&dev->wed_lock); 112 113 return t; 114 } 115 116 static struct mt76_txwi_cache * 117 mt76_get_txwi(struct mt76_dev *dev) 118 { 119 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 120 121 if (t) 122 return t; 123 124 return mt76_alloc_txwi(dev); 125 } 126 127 struct mt76_txwi_cache * 128 mt76_get_rxwi(struct mt76_dev *dev) 129 { 130 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); 131 132 if (t) 133 return t; 134 135 return mt76_alloc_rxwi(dev); 136 } 137 EXPORT_SYMBOL_GPL(mt76_get_rxwi); 138 139 void 140 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 141 { 142 if (!t) 143 return; 144 145 spin_lock(&dev->lock); 146 list_add(&t->list, &dev->txwi_cache); 147 spin_unlock(&dev->lock); 148 } 149 EXPORT_SYMBOL_GPL(mt76_put_txwi); 150 151 void 152 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 153 { 154 if (!t) 155 return; 156 157 spin_lock_bh(&dev->wed_lock); 158 list_add(&t->list, &dev->rxwi_cache); 159 spin_unlock_bh(&dev->wed_lock); 160 } 161 EXPORT_SYMBOL_GPL(mt76_put_rxwi); 162 163 static void 164 mt76_free_pending_txwi(struct mt76_dev *dev) 165 { 166 struct mt76_txwi_cache *t; 167 168 local_bh_disable(); 169 while ((t = __mt76_get_txwi(dev)) != NULL) { 170 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 171 DMA_TO_DEVICE); 172 kfree(mt76_get_txwi_ptr(dev, t)); 173 } 174 local_bh_enable(); 175 } 176 177 void 178 mt76_free_pending_rxwi(struct mt76_dev *dev) 179 { 180 struct mt76_txwi_cache *t; 181 182 local_bh_disable(); 183 while ((t = __mt76_get_rxwi(dev)) != NULL) { 184 if (t->ptr) 185 mt76_put_page_pool_buf(t->ptr, false); 186 kfree(t); 187 } 188 local_bh_enable(); 189 } 190 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); 191 192 static void 193 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 194 { 195 Q_WRITE(q, desc_base, q->desc_dma); 196 if (q->flags & MT_QFLAG_WED_RRO_EN) 197 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); 198 else 199 Q_WRITE(q, ring_size, q->ndesc); 200 q->head = Q_READ(q, dma_idx); 201 q->tail = q->head; 202 } 203 204 void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, 205 bool reset_idx) 206 { 207 if (!q || !q->ndesc) 208 return; 209 210 if (!mt76_queue_is_wed_rro_ind(q)) { 211 int i; 212 213 /* clear descriptors */ 214 for (i = 0; i < q->ndesc; i++) 215 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 216 } 217 218 if (reset_idx) { 219 Q_WRITE(q, cpu_idx, 0); 220 Q_WRITE(q, dma_idx, 0); 221 } 222 mt76_dma_sync_idx(dev, q); 223 } 224 225 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 226 { 227 __mt76_dma_queue_reset(dev, q, true); 228 } 229 230 static int 231 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, 232 struct mt76_queue_buf *buf, void *data) 233 { 234 struct mt76_queue_entry *entry = &q->entry[q->head]; 235 struct mt76_txwi_cache *txwi = NULL; 236 struct mt76_desc *desc; 237 int idx = q->head; 238 u32 buf1 = 0, ctrl; 239 int rx_token; 240 241 if (mt76_queue_is_wed_rro_ind(q)) { 242 struct mt76_wed_rro_desc *rro_desc; 243 244 rro_desc = (struct mt76_wed_rro_desc *)q->desc; 245 data = &rro_desc[q->head]; 246 goto done; 247 } 248 249 desc = &q->desc[q->head]; 250 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 251 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 252 buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32); 253 #endif 254 255 if (mt76_queue_is_wed_rx(q)) { 256 txwi = mt76_get_rxwi(dev); 257 if (!txwi) 258 return -ENOMEM; 259 260 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); 261 if (rx_token < 0) { 262 mt76_put_rxwi(dev, txwi); 263 return -ENOMEM; 264 } 265 266 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 267 ctrl |= MT_DMA_CTL_TO_HOST; 268 } 269 270 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); 271 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 272 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 273 WRITE_ONCE(desc->info, 0); 274 275 done: 276 entry->dma_addr[0] = buf->addr; 277 entry->dma_len[0] = buf->len; 278 entry->txwi = txwi; 279 entry->buf = data; 280 entry->wcid = 0xffff; 281 entry->skip_buf1 = true; 282 q->head = (q->head + 1) % q->ndesc; 283 q->queued++; 284 285 return idx; 286 } 287 288 static int 289 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 290 struct mt76_queue_buf *buf, int nbufs, u32 info, 291 struct sk_buff *skb, void *txwi) 292 { 293 struct mt76_queue_entry *entry; 294 struct mt76_desc *desc; 295 int i, idx = -1; 296 u32 ctrl, next; 297 298 if (txwi) { 299 q->entry[q->head].txwi = DMA_DUMMY_DATA; 300 q->entry[q->head].skip_buf0 = true; 301 } 302 303 for (i = 0; i < nbufs; i += 2, buf += 2) { 304 u32 buf0 = buf[0].addr, buf1 = 0; 305 306 idx = q->head; 307 next = (q->head + 1) % q->ndesc; 308 309 desc = &q->desc[idx]; 310 entry = &q->entry[idx]; 311 312 if (buf[0].skip_unmap) 313 entry->skip_buf0 = true; 314 entry->skip_buf1 = i == nbufs - 1; 315 316 entry->dma_addr[0] = buf[0].addr; 317 entry->dma_len[0] = buf[0].len; 318 319 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 320 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 321 info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32); 322 #endif 323 if (i < nbufs - 1) { 324 entry->dma_addr[1] = buf[1].addr; 325 entry->dma_len[1] = buf[1].len; 326 buf1 = buf[1].addr; 327 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 328 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 329 info |= FIELD_PREP(MT_DMA_CTL_SDP1_H, 330 buf[1].addr >> 32); 331 #endif 332 if (buf[1].skip_unmap) 333 entry->skip_buf1 = true; 334 } 335 336 if (i == nbufs - 1) 337 ctrl |= MT_DMA_CTL_LAST_SEC0; 338 else if (i == nbufs - 2) 339 ctrl |= MT_DMA_CTL_LAST_SEC1; 340 341 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 342 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 343 WRITE_ONCE(desc->info, cpu_to_le32(info)); 344 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 345 346 q->head = next; 347 q->queued++; 348 } 349 350 q->entry[idx].txwi = txwi; 351 q->entry[idx].skb = skb; 352 q->entry[idx].wcid = 0xffff; 353 354 return idx; 355 } 356 357 static void 358 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, 359 struct mt76_queue_entry *prev_e) 360 { 361 struct mt76_queue_entry *e = &q->entry[idx]; 362 363 if (!e->skip_buf0) 364 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], 365 DMA_TO_DEVICE); 366 367 if (!e->skip_buf1) 368 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], 369 DMA_TO_DEVICE); 370 371 if (e->txwi == DMA_DUMMY_DATA) 372 e->txwi = NULL; 373 374 *prev_e = *e; 375 memset(e, 0, sizeof(*e)); 376 } 377 378 static void 379 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 380 { 381 wmb(); 382 Q_WRITE(q, cpu_idx, q->head); 383 } 384 385 static void 386 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) 387 { 388 struct mt76_queue_entry entry; 389 int last; 390 391 if (!q || !q->ndesc) 392 return; 393 394 spin_lock_bh(&q->cleanup_lock); 395 if (flush) 396 last = -1; 397 else 398 last = Q_READ(q, dma_idx); 399 400 while (q->queued > 0 && q->tail != last) { 401 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 402 mt76_queue_tx_complete(dev, q, &entry); 403 404 if (entry.txwi) { 405 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) 406 mt76_put_txwi(dev, entry.txwi); 407 } 408 409 if (!flush && q->tail == last) 410 last = Q_READ(q, dma_idx); 411 } 412 spin_unlock_bh(&q->cleanup_lock); 413 414 if (flush) { 415 spin_lock_bh(&q->lock); 416 mt76_dma_sync_idx(dev, q); 417 mt76_dma_kick_queue(dev, q); 418 spin_unlock_bh(&q->lock); 419 } 420 421 if (!q->queued) 422 wake_up(&dev->tx_wait); 423 } 424 425 static void * 426 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, 427 int *len, u32 *info, bool *more, bool *drop) 428 { 429 struct mt76_queue_entry *e = &q->entry[idx]; 430 struct mt76_desc *desc = &q->desc[idx]; 431 u32 ctrl, desc_info, buf1; 432 void *buf = e->buf; 433 434 if (mt76_queue_is_wed_rro_ind(q)) 435 goto done; 436 437 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 438 if (len) { 439 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); 440 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); 441 } 442 443 desc_info = le32_to_cpu(desc->info); 444 if (info) 445 *info = desc_info; 446 447 buf1 = le32_to_cpu(desc->buf1); 448 mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info); 449 450 if (mt76_queue_is_wed_rx(q)) { 451 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); 452 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); 453 454 if (!t) 455 return NULL; 456 457 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, 458 SKB_WITH_OVERHEAD(q->buf_size), 459 page_pool_get_dma_dir(q->page_pool)); 460 461 buf = t->ptr; 462 t->dma_addr = 0; 463 t->ptr = NULL; 464 465 mt76_put_rxwi(dev, t); 466 if (drop) 467 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); 468 } else { 469 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], 470 SKB_WITH_OVERHEAD(q->buf_size), 471 page_pool_get_dma_dir(q->page_pool)); 472 } 473 474 done: 475 e->buf = NULL; 476 return buf; 477 } 478 479 static void * 480 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 481 int *len, u32 *info, bool *more, bool *drop) 482 { 483 int idx = q->tail; 484 485 *more = false; 486 if (!q->queued) 487 return NULL; 488 489 if (mt76_queue_is_wed_rro_data(q)) 490 return NULL; 491 492 if (!mt76_queue_is_wed_rro_ind(q)) { 493 if (flush) 494 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); 495 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) 496 return NULL; 497 } 498 499 q->tail = (q->tail + 1) % q->ndesc; 500 q->queued--; 501 502 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); 503 } 504 505 static int 506 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 507 struct sk_buff *skb, u32 tx_info) 508 { 509 struct mt76_queue_buf buf = {}; 510 dma_addr_t addr; 511 512 if (test_bit(MT76_MCU_RESET, &dev->phy.state)) 513 goto error; 514 515 if (q->queued + 1 >= q->ndesc - 1) 516 goto error; 517 518 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, 519 DMA_TO_DEVICE); 520 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 521 goto error; 522 523 buf.addr = addr; 524 buf.len = skb->len; 525 526 spin_lock_bh(&q->lock); 527 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 528 mt76_dma_kick_queue(dev, q); 529 spin_unlock_bh(&q->lock); 530 531 return 0; 532 533 error: 534 dev_kfree_skb(skb); 535 return -ENOMEM; 536 } 537 538 static int 539 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, 540 enum mt76_txq_id qid, struct sk_buff *skb, 541 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 542 { 543 struct ieee80211_tx_status status = { 544 .sta = sta, 545 }; 546 struct mt76_tx_info tx_info = { 547 .skb = skb, 548 }; 549 struct mt76_dev *dev = phy->dev; 550 struct ieee80211_hw *hw; 551 int len, n = 0, ret = -ENOMEM; 552 struct mt76_txwi_cache *t; 553 struct sk_buff *iter; 554 dma_addr_t addr; 555 u8 *txwi; 556 557 if (test_bit(MT76_RESET, &phy->state)) 558 goto free_skb; 559 560 t = mt76_get_txwi(dev); 561 if (!t) 562 goto free_skb; 563 564 txwi = mt76_get_txwi_ptr(dev, t); 565 566 skb->prev = skb->next = NULL; 567 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) 568 mt76_insert_hdr_pad(skb); 569 570 len = skb_headlen(skb); 571 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); 572 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 573 goto free; 574 575 tx_info.buf[n].addr = t->dma_addr; 576 tx_info.buf[n++].len = dev->drv->txwi_size; 577 tx_info.buf[n].addr = addr; 578 tx_info.buf[n++].len = len; 579 580 skb_walk_frags(skb, iter) { 581 if (n == ARRAY_SIZE(tx_info.buf)) 582 goto unmap; 583 584 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, 585 DMA_TO_DEVICE); 586 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 587 goto unmap; 588 589 tx_info.buf[n].addr = addr; 590 tx_info.buf[n++].len = iter->len; 591 } 592 tx_info.nbuf = n; 593 594 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 595 ret = -ENOMEM; 596 goto unmap; 597 } 598 599 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 600 DMA_TO_DEVICE); 601 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); 602 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 603 DMA_TO_DEVICE); 604 if (ret < 0) 605 goto unmap; 606 607 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, 608 tx_info.info, tx_info.skb, t); 609 610 unmap: 611 for (n--; n > 0; n--) 612 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, 613 tx_info.buf[n].len, DMA_TO_DEVICE); 614 615 free: 616 #ifdef CONFIG_NL80211_TESTMODE 617 /* fix tx_done accounting on queue overflow */ 618 if (mt76_is_testmode_skb(dev, skb, &hw)) { 619 struct mt76_phy *phy = hw->priv; 620 621 if (tx_info.skb == phy->test.tx_skb) 622 phy->test.tx_done--; 623 } 624 #endif 625 626 mt76_put_txwi(dev, t); 627 628 free_skb: 629 status.skb = tx_info.skb; 630 hw = mt76_tx_status_get_hw(dev, tx_info.skb); 631 spin_lock_bh(&dev->rx_lock); 632 ieee80211_tx_status_ext(hw, &status); 633 spin_unlock_bh(&dev->rx_lock); 634 635 return ret; 636 } 637 638 static int 639 mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q, 640 bool allow_direct) 641 { 642 int len = SKB_WITH_OVERHEAD(q->buf_size); 643 int frames = 0; 644 645 if (!q->ndesc) 646 return 0; 647 648 while (q->queued < q->ndesc - 1) { 649 struct mt76_queue_buf qbuf = {}; 650 enum dma_data_direction dir; 651 dma_addr_t addr; 652 int offset; 653 void *buf = NULL; 654 655 if (mt76_queue_is_wed_rro_ind(q)) 656 goto done; 657 658 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); 659 if (!buf) 660 break; 661 662 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; 663 dir = page_pool_get_dma_dir(q->page_pool); 664 dma_sync_single_for_device(dev->dma_dev, addr, len, dir); 665 666 qbuf.addr = addr + q->buf_offset; 667 done: 668 qbuf.len = len - q->buf_offset; 669 qbuf.skip_unmap = false; 670 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { 671 mt76_put_page_pool_buf(buf, allow_direct); 672 break; 673 } 674 frames++; 675 } 676 677 if (frames || mt76_queue_is_wed_rx(q)) 678 mt76_dma_kick_queue(dev, q); 679 680 return frames; 681 } 682 683 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 684 bool allow_direct) 685 { 686 int frames; 687 688 if (!q->ndesc) 689 return 0; 690 691 spin_lock_bh(&q->lock); 692 frames = mt76_dma_rx_fill_buf(dev, q, allow_direct); 693 spin_unlock_bh(&q->lock); 694 695 return frames; 696 } 697 698 static int 699 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 700 int idx, int n_desc, int bufsize, 701 u32 ring_base) 702 { 703 int ret, size; 704 705 spin_lock_init(&q->lock); 706 spin_lock_init(&q->cleanup_lock); 707 708 #if defined(__linux__) 709 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; 710 #elif defined(__FreeBSD__) 711 q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE); 712 #endif 713 q->ndesc = n_desc; 714 q->buf_size = bufsize; 715 q->hw_idx = idx; 716 717 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) 718 : sizeof(struct mt76_desc); 719 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, 720 &q->desc_dma, GFP_KERNEL); 721 if (!q->desc) 722 return -ENOMEM; 723 724 if (mt76_queue_is_wed_rro_ind(q)) { 725 struct mt76_wed_rro_desc *rro_desc; 726 int i; 727 728 rro_desc = (struct mt76_wed_rro_desc *)q->desc; 729 for (i = 0; i < q->ndesc; i++) { 730 struct mt76_wed_rro_ind *cmd; 731 732 cmd = (struct mt76_wed_rro_ind *)&rro_desc[i]; 733 cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1; 734 } 735 } 736 737 size = q->ndesc * sizeof(*q->entry); 738 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); 739 if (!q->entry) 740 return -ENOMEM; 741 742 ret = mt76_create_page_pool(dev, q); 743 if (ret) 744 return ret; 745 746 ret = mt76_wed_dma_setup(dev, q, false); 747 if (ret) 748 return ret; 749 750 if (mtk_wed_device_active(&dev->mmio.wed)) { 751 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || 752 mt76_queue_is_wed_tx_free(q)) 753 return 0; 754 } 755 756 mt76_dma_queue_reset(dev, q); 757 758 return 0; 759 } 760 761 static void 762 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) 763 { 764 void *buf; 765 bool more; 766 767 if (!q->ndesc) 768 return; 769 770 do { 771 spin_lock_bh(&q->lock); 772 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); 773 spin_unlock_bh(&q->lock); 774 775 if (!buf) 776 break; 777 778 if (!mt76_queue_is_wed_rro(q)) 779 mt76_put_page_pool_buf(buf, false); 780 } while (1); 781 782 spin_lock_bh(&q->lock); 783 if (q->rx_head) { 784 dev_kfree_skb(q->rx_head); 785 q->rx_head = NULL; 786 } 787 788 spin_unlock_bh(&q->lock); 789 } 790 791 static void 792 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) 793 { 794 struct mt76_queue *q = &dev->q_rx[qid]; 795 796 if (!q->ndesc) 797 return; 798 799 if (!mt76_queue_is_wed_rro_ind(q)) { 800 int i; 801 802 for (i = 0; i < q->ndesc; i++) 803 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 804 } 805 806 mt76_dma_rx_cleanup(dev, q); 807 808 /* reset WED rx queues */ 809 mt76_wed_dma_setup(dev, q, true); 810 811 if (mt76_queue_is_wed_tx_free(q)) 812 return; 813 814 if (mtk_wed_device_active(&dev->mmio.wed) && 815 mt76_queue_is_wed_rro(q)) 816 return; 817 818 mt76_dma_sync_idx(dev, q); 819 mt76_dma_rx_fill_buf(dev, q, false); 820 } 821 822 static void 823 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 824 int len, bool more, u32 info, bool allow_direct) 825 { 826 struct sk_buff *skb = q->rx_head; 827 struct skb_shared_info *shinfo = skb_shinfo(skb); 828 int nr_frags = shinfo->nr_frags; 829 830 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { 831 struct page *page = virt_to_head_page(data); 832 #if defined(__linux__) 833 int offset = data - page_address(page) + q->buf_offset; 834 #elif defined(__FreeBSD__) 835 int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset; 836 #endif 837 838 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); 839 } else { 840 mt76_put_page_pool_buf(data, allow_direct); 841 } 842 843 if (more) 844 return; 845 846 q->rx_head = NULL; 847 if (nr_frags < ARRAY_SIZE(shinfo->frags)) 848 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 849 else 850 dev_kfree_skb(skb); 851 } 852 853 static int 854 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) 855 { 856 int len, data_len, done = 0, dma_idx; 857 struct sk_buff *skb; 858 unsigned char *data; 859 bool check_ddone = false; 860 bool allow_direct = !mt76_queue_is_wed_rx(q); 861 bool more; 862 863 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && 864 mt76_queue_is_wed_tx_free(q)) { 865 dma_idx = Q_READ(q, dma_idx); 866 check_ddone = true; 867 } 868 869 while (done < budget) { 870 bool drop = false; 871 u32 info; 872 873 if (check_ddone) { 874 if (q->tail == dma_idx) 875 dma_idx = Q_READ(q, dma_idx); 876 877 if (q->tail == dma_idx) 878 break; 879 } 880 881 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, 882 &drop); 883 if (!data) 884 break; 885 886 if (drop) 887 goto free_frag; 888 889 if (q->rx_head) 890 data_len = q->buf_size; 891 else 892 data_len = SKB_WITH_OVERHEAD(q->buf_size); 893 894 if (data_len < len + q->buf_offset) { 895 dev_kfree_skb(q->rx_head); 896 q->rx_head = NULL; 897 goto free_frag; 898 } 899 900 if (q->rx_head) { 901 mt76_add_fragment(dev, q, data, len, more, info, 902 allow_direct); 903 continue; 904 } 905 906 if (!more && dev->drv->rx_check && 907 !(dev->drv->rx_check(dev, data, len))) 908 goto free_frag; 909 910 skb = napi_build_skb(data, q->buf_size); 911 if (!skb) 912 goto free_frag; 913 914 skb_reserve(skb, q->buf_offset); 915 skb_mark_for_recycle(skb); 916 917 *(u32 *)skb->cb = info; 918 919 __skb_put(skb, len); 920 done++; 921 922 if (more) { 923 q->rx_head = skb; 924 continue; 925 } 926 927 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 928 continue; 929 930 free_frag: 931 mt76_put_page_pool_buf(data, allow_direct); 932 } 933 934 mt76_dma_rx_fill(dev, q, true); 935 return done; 936 } 937 938 int mt76_dma_rx_poll(struct napi_struct *napi, int budget) 939 { 940 struct mt76_dev *dev; 941 int qid, done = 0, cur; 942 943 dev = mt76_priv(napi->dev); 944 qid = napi - dev->napi; 945 946 rcu_read_lock(); 947 948 do { 949 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 950 mt76_rx_poll_complete(dev, qid, napi); 951 done += cur; 952 } while (cur && done < budget); 953 954 rcu_read_unlock(); 955 956 if (done < budget && napi_complete(napi)) 957 dev->drv->rx_poll_complete(dev, qid); 958 959 return done; 960 } 961 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); 962 963 static int 964 mt76_dma_init(struct mt76_dev *dev, 965 int (*poll)(struct napi_struct *napi, int budget)) 966 { 967 struct mt76_dev **priv; 968 int i; 969 970 dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); 971 if (!dev->napi_dev) 972 return -ENOMEM; 973 974 /* napi_dev private data points to mt76_dev parent, so, mt76_dev 975 * can be retrieved given napi_dev 976 */ 977 priv = netdev_priv(dev->napi_dev); 978 *priv = dev; 979 980 dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); 981 if (!dev->tx_napi_dev) { 982 free_netdev(dev->napi_dev); 983 return -ENOMEM; 984 } 985 priv = netdev_priv(dev->tx_napi_dev); 986 *priv = dev; 987 988 snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s", 989 wiphy_name(dev->hw->wiphy)); 990 dev->napi_dev->threaded = 1; 991 init_completion(&dev->mmio.wed_reset); 992 init_completion(&dev->mmio.wed_reset_complete); 993 994 mt76_for_each_q_rx(dev, i) { 995 netif_napi_add(dev->napi_dev, &dev->napi[i], poll); 996 mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false); 997 napi_enable(&dev->napi[i]); 998 } 999 1000 return 0; 1001 } 1002 1003 static const struct mt76_queue_ops mt76_dma_ops = { 1004 .init = mt76_dma_init, 1005 .alloc = mt76_dma_alloc_queue, 1006 .reset_q = mt76_dma_queue_reset, 1007 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, 1008 .tx_queue_skb = mt76_dma_tx_queue_skb, 1009 .tx_cleanup = mt76_dma_tx_cleanup, 1010 .rx_cleanup = mt76_dma_rx_cleanup, 1011 .rx_reset = mt76_dma_rx_reset, 1012 .kick = mt76_dma_kick_queue, 1013 }; 1014 1015 void mt76_dma_attach(struct mt76_dev *dev) 1016 { 1017 dev->queue_ops = &mt76_dma_ops; 1018 } 1019 EXPORT_SYMBOL_GPL(mt76_dma_attach); 1020 1021 void mt76_dma_cleanup(struct mt76_dev *dev) 1022 { 1023 int i; 1024 1025 mt76_worker_disable(&dev->tx_worker); 1026 netif_napi_del(&dev->tx_napi); 1027 1028 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 1029 struct mt76_phy *phy = dev->phys[i]; 1030 int j; 1031 1032 if (!phy) 1033 continue; 1034 1035 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) 1036 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); 1037 } 1038 1039 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) 1040 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); 1041 1042 mt76_for_each_q_rx(dev, i) { 1043 struct mt76_queue *q = &dev->q_rx[i]; 1044 1045 if (mtk_wed_device_active(&dev->mmio.wed) && 1046 mt76_queue_is_wed_rro(q)) 1047 continue; 1048 1049 netif_napi_del(&dev->napi[i]); 1050 mt76_dma_rx_cleanup(dev, q); 1051 1052 page_pool_destroy(q->page_pool); 1053 } 1054 1055 if (mtk_wed_device_active(&dev->mmio.wed)) 1056 mtk_wed_device_detach(&dev->mmio.wed); 1057 1058 if (mtk_wed_device_active(&dev->mmio.wed_hif2)) 1059 mtk_wed_device_detach(&dev->mmio.wed_hif2); 1060 1061 mt76_free_pending_txwi(dev); 1062 mt76_free_pending_rxwi(dev); 1063 free_netdev(dev->napi_dev); 1064 free_netdev(dev->tx_napi_dev); 1065 } 1066 EXPORT_SYMBOL_GPL(mt76_dma_cleanup); 1067