Lines Matching refs:txq
266 struct mana_txq *txq; in mana_start_xmit() local
278 txq = &apc->tx_qp[txq_idx].txq; in mana_start_xmit()
279 gdma_sq = txq->gdma_sq; in mana_start_xmit()
281 tx_stats = &txq->stats; in mana_start_xmit()
284 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_start_xmit()
286 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_start_xmit()
287 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_start_xmit()
290 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_start_xmit()
417 skb_queue_tail(&txq->pending_skbs, skb); in mana_start_xmit()
431 (void)skb_dequeue_tail(&txq->pending_skbs); in mana_start_xmit()
438 atomic_inc(&txq->pending_sends); in mana_start_xmit()
448 tx_stats = &txq->stats; in mana_start_xmit()
502 tx_stats = &apc->tx_qp[q].txq.stats; in mana_get_stats64()
521 int txq; in mana_get_tx_queue() local
523 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; in mana_get_tx_queue()
525 if (txq != old_q && sk && sk_fullsock(sk) && in mana_get_tx_queue()
527 sk_tx_queue_set(sk, txq); in mana_get_tx_queue()
529 return txq; in mana_get_tx_queue()
535 int txq; in mana_select_queue() local
540 txq = sk_tx_queue_get(skb->sk); in mana_select_queue()
542 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) { in mana_select_queue()
544 txq = skb_get_rx_queue(skb); in mana_select_queue()
546 txq = mana_get_tx_queue(ndev, skb, txq); in mana_select_queue()
549 return txq; in mana_select_queue()
1605 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq() local
1616 ndev = txq->ndev; in mana_poll_tx_cq()
1668 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num)) in mana_poll_tx_cq()
1671 skb = skb_dequeue(&txq->pending_skbs); in mana_poll_tx_cq()
1688 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
1690 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
1696 net_txq = txq->net_txq; in mana_poll_tx_cq()
1707 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
2078 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) in mana_deinit_txq() argument
2082 if (!txq->gdma_sq) in mana_deinit_txq()
2085 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
2101 if (apc->tx_qp[i].txq.napi_initialized) { in mana_destroy_txq()
2107 apc->tx_qp[i].txq.napi_initialized = false; in mana_destroy_txq()
2113 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
2128 &tx_qp->txq.gdma_sq->head); in mana_create_txq_debugfs()
2130 &tx_qp->txq.gdma_sq->tail); in mana_create_txq_debugfs()
2132 &tx_qp->txq.pending_skbs.qlen); in mana_create_txq_debugfs()
2140 tx_qp->txq.gdma_sq, &mana_dbg_q_fops); in mana_create_txq_debugfs()
2154 struct mana_txq *txq; in mana_create_txq() local
2185 txq = &apc->tx_qp[i].txq; in mana_create_txq()
2187 u64_stats_init(&txq->stats.syncp); in mana_create_txq()
2188 txq->ndev = net; in mana_create_txq()
2189 txq->net_txq = netdev_get_tx_queue(net, i); in mana_create_txq()
2190 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
2191 txq->napi_initialized = false; in mana_create_txq()
2192 skb_queue_head_init(&txq->pending_skbs); in mana_create_txq()
2198 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
2206 cq->txq = txq; in mana_create_txq()
2222 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; in mana_create_txq()
2223 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
2237 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
2240 txq->gdma_sq->mem_info.dma_region_handle = in mana_create_txq()
2245 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
2263 txq->napi_initialized = true; in mana_create_txq()
3030 struct mana_txq *txq; in mana_dealloc_queues() local
3057 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
3059 while (atomic_read(&txq->pending_sends) > 0 && in mana_dealloc_queues()
3064 if (atomic_read(&txq->pending_sends)) { in mana_dealloc_queues()
3068 err, atomic_read(&txq->pending_sends), in mana_dealloc_queues()
3069 txq->gdma_txq_id); in mana_dealloc_queues()
3076 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
3077 while ((skb = skb_dequeue(&txq->pending_skbs))) { in mana_dealloc_queues()
3081 atomic_set(&txq->pending_sends, 0); in mana_dealloc_queues()