Lines Matching full:q
193 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
195 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
196 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx()
197 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx()
199 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
200 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx()
201 q->tail = q->head; in mt76_dma_sync_idx()
204 void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, in __mt76_dma_queue_reset() argument
207 if (!q || !q->ndesc) in __mt76_dma_queue_reset()
210 if (!mt76_queue_is_wed_rro_ind(q)) { in __mt76_dma_queue_reset()
214 for (i = 0; i < q->ndesc; i++) in __mt76_dma_queue_reset()
215 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in __mt76_dma_queue_reset()
219 Q_WRITE(q, cpu_idx, 0); in __mt76_dma_queue_reset()
220 Q_WRITE(q, dma_idx, 0); in __mt76_dma_queue_reset()
222 mt76_dma_sync_idx(dev, q); in __mt76_dma_queue_reset()
225 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
227 __mt76_dma_queue_reset(dev, q, true); in mt76_dma_queue_reset()
231 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
234 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
237 int idx = q->head; in mt76_dma_add_rx_buf()
241 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_add_rx_buf()
244 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_add_rx_buf()
245 data = &rro_desc[q->head]; in mt76_dma_add_rx_buf()
249 desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
255 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_add_rx_buf()
282 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
283 q->queued++; in mt76_dma_add_rx_buf()
289 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
299 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
300 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
306 idx = q->head; in mt76_dma_add_buf()
307 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
309 desc = &q->desc[idx]; in mt76_dma_add_buf()
310 entry = &q->entry[idx]; in mt76_dma_add_buf()
346 q->head = next; in mt76_dma_add_buf()
347 q->queued++; in mt76_dma_add_buf()
350 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
351 q->entry[idx].skb = skb; in mt76_dma_add_buf()
352 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
358 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
361 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
379 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
382 Q_WRITE(q, cpu_idx, q->head); in mt76_dma_kick_queue()
386 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
391 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
394 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
398 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
400 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
401 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
402 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
409 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
410 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
412 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
415 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
416 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
417 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
418 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
421 if (!q->queued) in mt76_dma_tx_cleanup()
426 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
429 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
430 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
434 if (mt76_queue_is_wed_rro_ind(q)) in mt76_dma_get_buf()
450 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_get_buf()
458 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
459 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
470 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
471 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
480 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
483 int idx = q->tail; in mt76_dma_dequeue()
486 if (!q->queued) in mt76_dma_dequeue()
489 if (mt76_queue_is_wed_rro_data(q)) in mt76_dma_dequeue()
492 if (!mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_dequeue()
494 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
495 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
499 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
500 q->queued--; in mt76_dma_dequeue()
502 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); in mt76_dma_dequeue()
506 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
515 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
526 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
527 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
528 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
529 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
539 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
594 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
607 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
639 mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill_buf() argument
642 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill_buf()
645 if (!q->ndesc) in mt76_dma_rx_fill_buf()
648 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill_buf()
653 if (mt76_queue_is_wed_rro_ind(q)) in mt76_dma_rx_fill_buf()
656 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill_buf()
661 offset + q->buf_offset; in mt76_dma_rx_fill_buf()
663 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill_buf()
665 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill_buf()
672 if (frames || mt76_queue_is_wed_rx(q)) in mt76_dma_rx_fill_buf()
673 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill_buf()
678 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
683 if (!q->ndesc) in mt76_dma_rx_fill()
686 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
687 frames = mt76_dma_rx_fill_buf(dev, q, allow_direct); in mt76_dma_rx_fill()
688 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
694 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
700 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
701 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
704 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
706 q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE); in mt76_dma_alloc_queue()
708 q->ndesc = n_desc; in mt76_dma_alloc_queue()
709 q->buf_size = bufsize; in mt76_dma_alloc_queue()
710 q->hw_idx = idx; in mt76_dma_alloc_queue()
712 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) in mt76_dma_alloc_queue()
714 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, in mt76_dma_alloc_queue()
715 &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
716 if (!q->desc) in mt76_dma_alloc_queue()
719 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_alloc_queue()
723 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_alloc_queue()
724 for (i = 0; i < q->ndesc; i++) { in mt76_dma_alloc_queue()
732 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
733 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
734 if (!q->entry) in mt76_dma_alloc_queue()
737 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
741 ret = mt76_wed_dma_setup(dev, q, false); in mt76_dma_alloc_queue()
746 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || in mt76_dma_alloc_queue()
747 mt76_queue_is_wed_tx_free(q)) in mt76_dma_alloc_queue()
751 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
757 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
762 if (!q->ndesc) in mt76_dma_rx_cleanup()
766 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
767 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
768 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
773 if (!mt76_queue_is_wed_rro(q)) in mt76_dma_rx_cleanup()
777 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
778 if (q->rx_head) { in mt76_dma_rx_cleanup()
779 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
780 q->rx_head = NULL; in mt76_dma_rx_cleanup()
783 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
789 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
791 if (!q->ndesc) in mt76_dma_rx_reset()
794 if (!mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_rx_reset()
797 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
798 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
801 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
804 mt76_wed_dma_setup(dev, q, true); in mt76_dma_rx_reset()
806 if (mt76_queue_is_wed_tx_free(q)) in mt76_dma_rx_reset()
810 mt76_queue_is_wed_rro(q)) in mt76_dma_rx_reset()
813 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
814 mt76_dma_rx_fill_buf(dev, q, false); in mt76_dma_rx_reset()
818 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
821 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
828 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
830 int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset; in mt76_add_fragment()
833 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
841 q->rx_head = NULL; in mt76_add_fragment()
843 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
849 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
855 bool allow_direct = !mt76_queue_is_wed_rx(q); in mt76_dma_rx_process()
859 mt76_queue_is_wed_tx_free(q)) { in mt76_dma_rx_process()
860 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
869 if (q->tail == dma_idx) in mt76_dma_rx_process()
870 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
872 if (q->tail == dma_idx) in mt76_dma_rx_process()
876 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
884 if (q->rx_head) in mt76_dma_rx_process()
885 data_len = q->buf_size; in mt76_dma_rx_process()
887 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
889 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
890 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
891 q->rx_head = NULL; in mt76_dma_rx_process()
895 if (q->rx_head) { in mt76_dma_rx_process()
896 mt76_add_fragment(dev, q, data, len, more, info, in mt76_dma_rx_process()
905 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
909 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
918 q->rx_head = skb; in mt76_dma_rx_process()
922 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
929 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
1039 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup() local
1042 mt76_queue_is_wed_rro(q)) in mt76_dma_cleanup()
1046 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
1048 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()