Lines Matching full:q
306 struct mt76_queue *q = &dev->q_rx[qid]; in mt76s_alloc_rx_queue() local
308 spin_lock_init(&q->lock); in mt76s_alloc_rx_queue()
309 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_rx_queue()
310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_rx_queue()
312 if (!q->entry) in mt76s_alloc_rx_queue()
315 q->ndesc = MT76S_NUM_RX_ENTRIES; in mt76s_alloc_rx_queue()
316 q->head = q->tail = 0; in mt76s_alloc_rx_queue()
317 q->queued = 0; in mt76s_alloc_rx_queue()
325 struct mt76_queue *q; in mt76s_alloc_tx_queue() local
327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); in mt76s_alloc_tx_queue()
328 if (!q) in mt76s_alloc_tx_queue()
331 spin_lock_init(&q->lock); in mt76s_alloc_tx_queue()
332 q->entry = devm_kcalloc(dev->dev, in mt76s_alloc_tx_queue()
333 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry), in mt76s_alloc_tx_queue()
335 if (!q->entry) in mt76s_alloc_tx_queue()
338 q->ndesc = MT76S_NUM_TX_ENTRIES; in mt76s_alloc_tx_queue()
340 return q; in mt76s_alloc_tx_queue()
345 struct mt76_queue *q; in mt76s_alloc_tx() local
349 q = mt76s_alloc_tx_queue(dev); in mt76s_alloc_tx()
350 if (IS_ERR(q)) in mt76s_alloc_tx()
351 return PTR_ERR(q); in mt76s_alloc_tx()
353 dev->phy.q_tx[i] = q; in mt76s_alloc_tx()
356 q = mt76s_alloc_tx_queue(dev); in mt76s_alloc_tx()
357 if (IS_ERR(q)) in mt76s_alloc_tx()
358 return PTR_ERR(q); in mt76s_alloc_tx()
360 dev->q_mcu[MT_MCUQ_WM] = q; in mt76s_alloc_tx()
367 mt76s_get_next_rx_entry(struct mt76_queue *q) in mt76s_get_next_rx_entry() argument
371 spin_lock_bh(&q->lock); in mt76s_get_next_rx_entry()
372 if (q->queued > 0) { in mt76s_get_next_rx_entry()
373 e = &q->entry[q->tail]; in mt76s_get_next_rx_entry()
374 q->tail = (q->tail + 1) % q->ndesc; in mt76s_get_next_rx_entry()
375 q->queued--; in mt76s_get_next_rx_entry()
377 spin_unlock_bh(&q->lock); in mt76s_get_next_rx_entry()
383 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76s_process_rx_queue() argument
385 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; in mt76s_process_rx_queue()
394 e = mt76s_get_next_rx_entry(q); in mt76s_process_rx_queue()
429 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76s_process_tx_queue() argument
435 if (!q) in mt76s_process_tx_queue()
438 mcu = q == dev->q_mcu[MT_MCUQ_WM]; in mt76s_process_tx_queue()
439 while (q->queued > 0) { in mt76s_process_tx_queue()
440 if (!q->entry[q->tail].done) in mt76s_process_tx_queue()
443 entry = q->entry[q->tail]; in mt76s_process_tx_queue()
444 q->entry[q->tail].done = false; in mt76s_process_tx_queue()
451 mt76_queue_tx_complete(dev, q, &entry); in mt76s_process_tx_queue()
455 if (!q->queued) in mt76s_process_tx_queue()
517 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76s_tx_queue_skb() argument
525 u16 idx = q->head; in mt76s_tx_queue_skb()
527 if (q->queued == q->ndesc) in mt76s_tx_queue_skb()
535 q->entry[q->head].skb = tx_info.skb; in mt76s_tx_queue_skb()
536 q->entry[q->head].buf_sz = len; in mt76s_tx_queue_skb()
537 q->entry[q->head].wcid = 0xffff; in mt76s_tx_queue_skb()
541 q->head = (q->head + 1) % q->ndesc; in mt76s_tx_queue_skb()
542 q->queued++; in mt76s_tx_queue_skb()
548 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76s_tx_queue_skb_raw() argument
553 if (q->queued == q->ndesc) in mt76s_tx_queue_skb_raw()
561 spin_lock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
563 q->entry[q->head].buf_sz = len; in mt76s_tx_queue_skb_raw()
564 q->entry[q->head].skb = skb; in mt76s_tx_queue_skb_raw()
569 q->head = (q->head + 1) % q->ndesc; in mt76s_tx_queue_skb_raw()
570 q->queued++; in mt76s_tx_queue_skb_raw()
572 spin_unlock_bh(&q->lock); in mt76s_tx_queue_skb_raw()
582 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) in mt76s_tx_kick() argument
614 struct mt76_queue *q = &dev->q_rx[i]; in mt76s_deinit() local
617 for (j = 0; j < q->ndesc; j++) { in mt76s_deinit()
618 struct mt76_queue_entry *e = &q->entry[j]; in mt76s_deinit()