Lines Matching refs:q
188 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
190 Q_WRITE(dev, q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
191 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx()
192 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx()
193 q->tail = q->head; in mt76_dma_sync_idx()
197 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
201 if (!q || !q->ndesc) in mt76_dma_queue_reset()
205 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset()
206 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset()
208 Q_WRITE(dev, q, cpu_idx, 0); in mt76_dma_queue_reset()
209 Q_WRITE(dev, q, dma_idx, 0); in mt76_dma_queue_reset()
210 mt76_dma_sync_idx(dev, q); in mt76_dma_queue_reset()
214 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
217 struct mt76_desc *desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
218 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
221 int idx = q->head; in mt76_dma_add_rx_buf()
226 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_add_rx_buf()
252 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
253 q->queued++; in mt76_dma_add_rx_buf()
259 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
269 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
270 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
276 idx = q->head; in mt76_dma_add_buf()
277 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
279 desc = &q->desc[idx]; in mt76_dma_add_buf()
280 entry = &q->entry[idx]; in mt76_dma_add_buf()
309 q->head = next; in mt76_dma_add_buf()
310 q->queued++; in mt76_dma_add_buf()
313 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
314 q->entry[idx].skb = skb; in mt76_dma_add_buf()
315 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
321 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
324 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
345 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
348 Q_WRITE(dev, q, cpu_idx, q->head); in mt76_dma_kick_queue()
352 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
357 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
360 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
364 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
366 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
367 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
368 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
375 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
376 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
378 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
381 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
382 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
383 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
384 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
387 if (!q->queued) in mt76_dma_tx_cleanup()
392 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
395 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
396 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
408 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_get_buf()
417 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
418 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
438 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
439 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
446 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
449 int idx = q->tail; in mt76_dma_dequeue()
452 if (!q->queued) in mt76_dma_dequeue()
456 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
457 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
460 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
461 q->queued--; in mt76_dma_dequeue()
463 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); in mt76_dma_dequeue()
467 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
476 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
487 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
488 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
489 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
490 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
500 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
554 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
567 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
599 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
602 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill()
605 if (!q->ndesc) in mt76_dma_rx_fill()
608 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
610 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
617 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill()
622 dir = page_pool_get_dma_dir(q->page_pool); in mt76_dma_rx_fill()
625 qbuf.addr = addr + q->buf_offset; in mt76_dma_rx_fill()
626 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill()
628 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill()
636 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
638 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
643 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) in mt76_dma_wed_setup() argument
650 if (!q || !q->ndesc) in mt76_dma_wed_setup()
653 flags = q->flags; in mt76_dma_wed_setup()
655 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
657 if (!(q->flags & MT_QFLAG_WED)) in mt76_dma_wed_setup()
660 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); in mt76_dma_wed_setup()
661 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); in mt76_dma_wed_setup()
665 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset); in mt76_dma_wed_setup()
667 q->wed_regs = wed->tx_ring[ring].reg_base; in mt76_dma_wed_setup()
671 q->flags = 0; in mt76_dma_wed_setup()
672 mt76_dma_queue_reset(dev, q); in mt76_dma_wed_setup()
673 mt76_dma_rx_fill(dev, q, false); in mt76_dma_wed_setup()
674 q->flags = flags; in mt76_dma_wed_setup()
676 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); in mt76_dma_wed_setup()
678 q->wed_regs = wed->txfree_ring.reg_base; in mt76_dma_wed_setup()
681 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); in mt76_dma_wed_setup()
683 q->wed_regs = wed->rx_ring[ring].reg_base; in mt76_dma_wed_setup()
697 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
703 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
704 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
707 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
709 q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE); in mt76_dma_alloc_queue()
711 q->ndesc = n_desc; in mt76_dma_alloc_queue()
712 q->buf_size = bufsize; in mt76_dma_alloc_queue()
713 q->hw_idx = idx; in mt76_dma_alloc_queue()
715 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
716 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
717 if (!q->desc) in mt76_dma_alloc_queue()
720 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
721 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
722 if (!q->entry) in mt76_dma_alloc_queue()
725 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
729 ret = mt76_dma_wed_setup(dev, q, false); in mt76_dma_alloc_queue()
733 if (q->flags != MT_WED_Q_TXFREE) in mt76_dma_alloc_queue()
734 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
740 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
745 if (!q->ndesc) in mt76_dma_rx_cleanup()
748 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
751 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
758 if (q->rx_head) { in mt76_dma_rx_cleanup()
759 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
760 q->rx_head = NULL; in mt76_dma_rx_cleanup()
763 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
769 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
772 if (!q->ndesc) in mt76_dma_rx_reset()
775 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
776 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
778 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
781 mt76_dma_wed_setup(dev, q, true); in mt76_dma_rx_reset()
782 if (q->flags != MT_WED_Q_TXFREE) { in mt76_dma_rx_reset()
783 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
784 mt76_dma_rx_fill(dev, q, false); in mt76_dma_rx_reset()
789 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
792 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
799 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
801 int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset; in mt76_add_fragment()
804 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
812 q->rx_head = NULL; in mt76_add_fragment()
814 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
820 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
829 q->flags == MT_WED_Q_TXFREE) { in mt76_dma_rx_process()
830 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
839 if (q->tail == dma_idx) in mt76_dma_rx_process()
840 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
842 if (q->tail == dma_idx) in mt76_dma_rx_process()
846 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
854 if (q->rx_head) in mt76_dma_rx_process()
855 data_len = q->buf_size; in mt76_dma_rx_process()
857 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
859 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
860 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
861 q->rx_head = NULL; in mt76_dma_rx_process()
865 if (q->rx_head) { in mt76_dma_rx_process()
866 mt76_add_fragment(dev, q, data, len, more, info); in mt76_dma_rx_process()
874 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
878 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
887 q->rx_head = skb; in mt76_dma_rx_process()
891 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
898 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
990 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup() local
993 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
995 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()