Lines Matching full:q
320 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, in mt76u_fill_rx_sg() argument
329 data = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76u_fill_rx_sg()
333 sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, in mt76u_fill_rx_sg()
346 urb->transfer_buffer_length = urb->num_sgs * q->buf_size; in mt76u_fill_rx_sg()
353 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, in mt76u_refill_rx() argument
356 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76u_refill_rx()
360 return mt76u_fill_rx_sg(dev, q, urb, nsgs); in mt76u_refill_rx()
362 urb->transfer_buffer_length = q->buf_size; in mt76u_refill_rx()
363 urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76u_refill_rx()
390 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q, in mt76u_rx_urb_alloc() argument
393 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76u_rx_urb_alloc()
401 return mt76u_refill_rx(dev, q, e->urb, sg_size); in mt76u_rx_urb_alloc()
438 mt76u_get_next_rx_entry(struct mt76_queue *q) in mt76u_get_next_rx_entry() argument
443 spin_lock_irqsave(&q->lock, flags); in mt76u_get_next_rx_entry()
444 if (q->queued > 0) { in mt76u_get_next_rx_entry()
445 urb = q->entry[q->tail].urb; in mt76u_get_next_rx_entry()
446 q->tail = (q->tail + 1) % q->ndesc; in mt76u_get_next_rx_entry()
447 q->queued--; in mt76u_get_next_rx_entry()
449 spin_unlock_irqrestore(&q->lock, flags); in mt76u_get_next_rx_entry()
558 struct mt76_queue *q = urb->context; in mt76u_complete_rx() local
577 spin_lock_irqsave(&q->lock, flags); in mt76u_complete_rx()
578 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch")) in mt76u_complete_rx()
581 q->head = (q->head + 1) % q->ndesc; in mt76u_complete_rx()
582 q->queued++; in mt76u_complete_rx()
585 spin_unlock_irqrestore(&q->lock, flags); in mt76u_complete_rx()
602 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76u_process_rx_queue() argument
604 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76u_process_rx_queue()
609 urb = mt76u_get_next_rx_entry(q); in mt76u_process_rx_queue()
613 count = mt76u_process_rx_entry(dev, urb, q->buf_size); in mt76u_process_rx_queue()
615 err = mt76u_refill_rx(dev, q, urb, count); in mt76u_process_rx_queue()
643 struct mt76_queue *q = &dev->q_rx[qid]; in mt76u_submit_rx_buffers() local
647 spin_lock_irqsave(&q->lock, flags); in mt76u_submit_rx_buffers()
648 for (i = 0; i < q->ndesc; i++) { in mt76u_submit_rx_buffers()
649 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb); in mt76u_submit_rx_buffers()
653 q->head = q->tail = 0; in mt76u_submit_rx_buffers()
654 q->queued = 0; in mt76u_submit_rx_buffers()
655 spin_unlock_irqrestore(&q->lock, flags); in mt76u_submit_rx_buffers()
663 struct mt76_queue *q = &dev->q_rx[qid]; in mt76u_alloc_rx_queue() local
666 err = mt76_create_page_pool(dev, q); in mt76u_alloc_rx_queue()
670 spin_lock_init(&q->lock); in mt76u_alloc_rx_queue()
671 q->entry = devm_kcalloc(dev->dev, in mt76u_alloc_rx_queue()
672 MT_NUM_RX_ENTRIES, sizeof(*q->entry), in mt76u_alloc_rx_queue()
674 if (!q->entry) in mt76u_alloc_rx_queue()
677 q->ndesc = MT_NUM_RX_ENTRIES; in mt76u_alloc_rx_queue()
678 q->buf_size = PAGE_SIZE; in mt76u_alloc_rx_queue()
680 for (i = 0; i < q->ndesc; i++) { in mt76u_alloc_rx_queue()
681 err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]); in mt76u_alloc_rx_queue()
696 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76u_free_rx_queue() argument
700 for (i = 0; i < q->ndesc; i++) { in mt76u_free_rx_queue()
701 if (!q->entry[i].urb) in mt76u_free_rx_queue()
704 mt76u_urb_free(q->entry[i].urb); in mt76u_free_rx_queue()
705 q->entry[i].urb = NULL; in mt76u_free_rx_queue()
707 page_pool_destroy(q->page_pool); in mt76u_free_rx_queue()
708 q->page_pool = NULL; in mt76u_free_rx_queue()
728 struct mt76_queue *q = &dev->q_rx[i]; in mt76u_stop_rx() local
731 for (j = 0; j < q->ndesc; j++) in mt76u_stop_rx()
732 usb_poison_urb(q->entry[j].urb); in mt76u_stop_rx()
742 struct mt76_queue *q = &dev->q_rx[i]; in mt76u_resume_rx() local
745 for (j = 0; j < q->ndesc; j++) in mt76u_resume_rx()
746 usb_unpoison_urb(q->entry[j].urb); in mt76u_resume_rx()
764 struct mt76_queue *q; in mt76u_status_worker() local
771 q = dev->phy.q_tx[i]; in mt76u_status_worker()
772 if (!q) in mt76u_status_worker()
775 while (q->queued > 0) { in mt76u_status_worker()
776 if (!q->entry[q->tail].done) in mt76u_status_worker()
779 entry = q->entry[q->tail]; in mt76u_status_worker()
780 q->entry[q->tail].done = false; in mt76u_status_worker()
782 mt76_queue_tx_complete(dev, q, &entry); in mt76u_status_worker()
785 if (!q->queued) in mt76u_status_worker()
853 mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, in mt76u_tx_queue_skb() argument
861 u16 idx = q->head; in mt76u_tx_queue_skb()
864 if (q->queued == q->ndesc) in mt76u_tx_queue_skb()
872 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); in mt76u_tx_queue_skb()
876 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb, in mt76u_tx_queue_skb()
877 mt76u_complete_tx, &q->entry[idx]); in mt76u_tx_queue_skb()
879 q->head = (q->head + 1) % q->ndesc; in mt76u_tx_queue_skb()
880 q->entry[idx].skb = tx_info.skb; in mt76u_tx_queue_skb()
881 q->entry[idx].wcid = 0xffff; in mt76u_tx_queue_skb()
882 q->queued++; in mt76u_tx_queue_skb()
887 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) in mt76u_tx_kick() argument
892 while (q->first != q->head) { in mt76u_tx_kick()
893 urb = q->entry[q->first].urb; in mt76u_tx_kick()
905 q->first = (q->first + 1) % q->ndesc; in mt76u_tx_kick()
910 mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid) in mt76u_ac_to_hwq() argument
924 q->hw_idx = lmac_queue_map[ac]; in mt76u_ac_to_hwq()
925 q->ep = q->hw_idx + 1; in mt76u_ac_to_hwq()
930 q->hw_idx = mt76_ac_to_hwq(ac); in mt76u_ac_to_hwq()
931 q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1; in mt76u_ac_to_hwq()
934 q->hw_idx = mt76_ac_to_hwq(ac); in mt76u_ac_to_hwq()
935 q->ep = q->hw_idx + 1; in mt76u_ac_to_hwq()
945 struct mt76_queue *q; in mt76u_alloc_tx() local
948 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); in mt76u_alloc_tx()
949 if (!q) in mt76u_alloc_tx()
952 spin_lock_init(&q->lock); in mt76u_alloc_tx()
953 mt76u_ac_to_hwq(dev, q, i); in mt76u_alloc_tx()
954 dev->phy.q_tx[i] = q; in mt76u_alloc_tx()
956 q->entry = devm_kcalloc(dev->dev, in mt76u_alloc_tx()
957 MT_NUM_TX_ENTRIES, sizeof(*q->entry), in mt76u_alloc_tx()
959 if (!q->entry) in mt76u_alloc_tx()
962 q->ndesc = MT_NUM_TX_ENTRIES; in mt76u_alloc_tx()
963 for (j = 0; j < q->ndesc; j++) { in mt76u_alloc_tx()
964 err = mt76u_urb_alloc(dev, &q->entry[j], in mt76u_alloc_tx()
980 struct mt76_queue *q; in mt76u_free_tx() local
983 q = dev->phy.q_tx[i]; in mt76u_free_tx()
984 if (!q) in mt76u_free_tx()
987 for (j = 0; j < q->ndesc; j++) { in mt76u_free_tx()
988 usb_free_urb(q->entry[j].urb); in mt76u_free_tx()
989 q->entry[j].urb = NULL; in mt76u_free_tx()
1004 struct mt76_queue *q; in mt76u_stop_tx() local
1010 q = dev->phy.q_tx[i]; in mt76u_stop_tx()
1011 if (!q) in mt76u_stop_tx()
1014 for (j = 0; j < q->ndesc; j++) in mt76u_stop_tx()
1015 usb_kill_urb(q->entry[j].urb); in mt76u_stop_tx()
1024 q = dev->phy.q_tx[i]; in mt76u_stop_tx()
1025 if (!q) in mt76u_stop_tx()
1028 while (q->queued > 0) { in mt76u_stop_tx()
1029 entry = q->entry[q->tail]; in mt76u_stop_tx()
1030 q->entry[q->tail].done = false; in mt76u_stop_tx()
1031 mt76_queue_tx_complete(dev, q, &entry); in mt76u_stop_tx()