Lines Matching refs:urb

320 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,  in mt76u_fill_rx_sg()  argument
333 sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size, in mt76u_fill_rx_sg()
340 for (j = nsgs; j < urb->num_sgs; j++) in mt76u_fill_rx_sg()
341 mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false); in mt76u_fill_rx_sg()
342 urb->num_sgs = i; in mt76u_fill_rx_sg()
345 urb->num_sgs = max_t(int, i, urb->num_sgs); in mt76u_fill_rx_sg()
346 urb->transfer_buffer_length = urb->num_sgs * q->buf_size; in mt76u_fill_rx_sg()
347 sg_init_marker(urb->sg, urb->num_sgs); in mt76u_fill_rx_sg()
354 struct urb *urb, int nsgs) in mt76u_refill_rx() argument
360 return mt76u_fill_rx_sg(dev, q, urb, nsgs); in mt76u_refill_rx()
362 urb->transfer_buffer_length = q->buf_size; in mt76u_refill_rx()
363 urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76u_refill_rx()
365 return urb->transfer_buffer ? 0 : -ENOMEM; in mt76u_refill_rx()
372 unsigned int size = sizeof(struct urb); in mt76u_urb_alloc()
377 e->urb = kzalloc(size, GFP_KERNEL); in mt76u_urb_alloc()
378 if (!e->urb) in mt76u_urb_alloc()
381 usb_init_urb(e->urb); in mt76u_urb_alloc()
384 e->urb->sg = (struct scatterlist *)(e->urb + 1); in mt76u_urb_alloc()
401 return mt76u_refill_rx(dev, q, e->urb, sg_size); in mt76u_rx_urb_alloc()
404 static void mt76u_urb_free(struct urb *urb) in mt76u_urb_free() argument
408 for (i = 0; i < urb->num_sgs; i++) in mt76u_urb_free()
409 mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false); in mt76u_urb_free()
411 if (urb->transfer_buffer) in mt76u_urb_free()
412 mt76_put_page_pool_buf(urb->transfer_buffer, false); in mt76u_urb_free()
414 usb_free_urb(urb); in mt76u_urb_free()
419 struct urb *urb, usb_complete_t complete_fn, in mt76u_fill_bulk_urb() argument
431 urb->dev = udev; in mt76u_fill_bulk_urb()
432 urb->pipe = pipe; in mt76u_fill_bulk_urb()
433 urb->complete = complete_fn; in mt76u_fill_bulk_urb()
434 urb->context = context; in mt76u_fill_bulk_urb()
437 static struct urb *
440 struct urb *urb = NULL; in mt76u_get_next_rx_entry() local
445 urb = q->entry[q->tail].urb; in mt76u_get_next_rx_entry()
451 return urb; in mt76u_get_next_rx_entry()
512 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb, in mt76u_process_rx_entry() argument
515 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer; in mt76u_process_rx_entry()
516 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length; in mt76u_process_rx_entry()
523 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length); in mt76u_process_rx_entry()
539 while (len > 0 && nsgs < urb->num_sgs) { in mt76u_process_rx_entry()
540 data_len = min_t(int, len, urb->sg[nsgs].length); in mt76u_process_rx_entry()
542 sg_page(&urb->sg[nsgs]), in mt76u_process_rx_entry()
543 urb->sg[nsgs].offset, data_len, in mt76u_process_rx_entry()
555 static void mt76u_complete_rx(struct urb *urb) in mt76u_complete_rx() argument
557 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); in mt76u_complete_rx()
558 struct mt76_queue *q = urb->context; in mt76u_complete_rx()
561 trace_rx_urb(dev, urb); in mt76u_complete_rx()
563 switch (urb->status) { in mt76u_complete_rx()
571 urb->status); in mt76u_complete_rx()
578 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch")) in mt76u_complete_rx()
590 struct urb *urb) in mt76u_submit_rx_buf() argument
594 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb, in mt76u_submit_rx_buf()
596 trace_submit_urb(dev, urb); in mt76u_submit_rx_buf()
598 return usb_submit_urb(urb, GFP_ATOMIC); in mt76u_submit_rx_buf()
605 struct urb *urb; in mt76u_process_rx_queue() local
609 urb = mt76u_get_next_rx_entry(q); in mt76u_process_rx_queue()
610 if (!urb) in mt76u_process_rx_queue()
613 count = mt76u_process_rx_entry(dev, urb, q->buf_size); in mt76u_process_rx_queue()
615 err = mt76u_refill_rx(dev, q, urb, count); in mt76u_process_rx_queue()
619 mt76u_submit_rx_buf(dev, qid, urb); in mt76u_process_rx_queue()
649 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb); in mt76u_submit_rx_buffers()
701 if (!q->entry[i].urb) in mt76u_free_rx_queue()
704 mt76u_urb_free(q->entry[i].urb); in mt76u_free_rx_queue()
705 q->entry[i].urb = NULL; in mt76u_free_rx_queue()
732 usb_poison_urb(q->entry[j].urb); in mt76u_stop_rx()
746 usb_unpoison_urb(q->entry[j].urb); in mt76u_resume_rx()
821 static void mt76u_complete_tx(struct urb *urb) in mt76u_complete_tx() argument
823 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev); in mt76u_complete_tx()
824 struct mt76_queue_entry *e = urb->context; in mt76u_complete_tx()
826 if (mt76u_urb_error(urb)) in mt76u_complete_tx()
827 dev_err(dev->dev, "tx urb failed: %d\n", urb->status); in mt76u_complete_tx()
835 struct urb *urb) in mt76u_tx_setup_buffers() argument
837 urb->transfer_buffer_length = skb->len; in mt76u_tx_setup_buffers()
840 urb->transfer_buffer = skb->data; in mt76u_tx_setup_buffers()
844 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE); in mt76u_tx_setup_buffers()
845 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); in mt76u_tx_setup_buffers()
846 if (!urb->num_sgs) in mt76u_tx_setup_buffers()
849 return urb->num_sgs; in mt76u_tx_setup_buffers()
872 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); in mt76u_tx_queue_skb()
876 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb, in mt76u_tx_queue_skb()
889 struct urb *urb; in mt76u_tx_kick() local
893 urb = q->entry[q->first].urb; in mt76u_tx_kick()
895 trace_submit_urb(dev, urb); in mt76u_tx_kick()
896 err = usb_submit_urb(urb, GFP_ATOMIC); in mt76u_tx_kick()
988 usb_free_urb(q->entry[j].urb); in mt76u_free_tx()
989 q->entry[j].urb = NULL; in mt76u_free_tx()
1015 usb_kill_urb(q->entry[j].urb); in mt76u_stop_tx()