Lines Matching full:tx

101 	 * The tx queue len can be adjusted upward while the interface is  in hfi1_ipoib_check_queue_stopped()
103 * The tx queue len can be large enough to overflow the txreq_ring. in hfi1_ipoib_check_queue_stopped()
114 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) in hfi1_ipoib_free_tx() argument
116 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; in hfi1_ipoib_free_tx()
118 if (likely(!tx->sdma_status)) { in hfi1_ipoib_free_tx()
119 dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len); in hfi1_ipoib_free_tx()
124 __func__, tx->sdma_status, in hfi1_ipoib_free_tx()
125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
126 tx->txq->sde->this_idx); in hfi1_ipoib_free_tx()
129 napi_consume_skb(tx->skb, budget); in hfi1_ipoib_free_tx()
130 tx->skb = NULL; in hfi1_ipoib_free_tx()
131 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_free_tx()
138 struct ipoib_txreq *tx; in hfi1_ipoib_drain_tx_ring() local
141 tx = hfi1_txreq_from_idx(tx_ring, i); in hfi1_ipoib_drain_tx_ring()
142 tx->complete = 0; in hfi1_ipoib_drain_tx_ring()
143 dev_kfree_skb_any(tx->skb); in hfi1_ipoib_drain_tx_ring()
144 tx->skb = NULL; in hfi1_ipoib_drain_tx_ring()
145 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_ring()
162 struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head); in hfi1_ipoib_poll_tx_ring() local
167 if (!smp_load_acquire(&tx->complete)) in hfi1_ipoib_poll_tx_ring()
169 tx->complete = 0; in hfi1_ipoib_poll_tx_ring()
170 trace_hfi1_tx_produce(tx, head); in hfi1_ipoib_poll_tx_ring()
171 hfi1_ipoib_free_tx(tx, budget); in hfi1_ipoib_poll_tx_ring()
173 tx = hfi1_txreq_from_idx(tx_ring, head); in hfi1_ipoib_poll_tx_ring()
177 /* Finished freeing tx items so store the head value. */ in hfi1_ipoib_poll_tx_ring()
190 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_sdma_complete() local
192 trace_hfi1_txq_complete(tx->txq); in hfi1_ipoib_sdma_complete()
193 tx->sdma_status = status; in hfi1_ipoib_sdma_complete()
195 smp_store_release(&tx->complete, 1); in hfi1_ipoib_sdma_complete()
196 napi_schedule_irqoff(&tx->txq->napi); in hfi1_ipoib_sdma_complete()
199 static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx, in hfi1_ipoib_build_ulp_payload() argument
203 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_ulp_payload()
204 struct sk_buff *skb = tx->skb; in hfi1_ipoib_build_ulp_payload()
230 static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, in hfi1_ipoib_build_tx_desc() argument
234 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_tx_desc()
235 struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; in hfi1_ipoib_build_tx_desc()
237 sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len; in hfi1_ipoib_build_tx_desc()
253 return hfi1_ipoib_build_ulp_payload(tx, txp); in hfi1_ipoib_build_tx_desc()
256 static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, in hfi1_ipoib_build_ib_tx_headers() argument
259 struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; in hfi1_ipoib_build_ib_tx_headers()
260 struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; in hfi1_ipoib_build_ib_tx_headers()
261 struct sk_buff *skb = tx->skb; in hfi1_ipoib_build_ib_tx_headers()
354 struct ipoib_txreq *tx; in hfi1_ipoib_send_dma_common() local
373 tx = hfi1_txreq_from_idx(tx_ring, tail); in hfi1_ipoib_send_dma_common()
377 tx->txreq.num_desc = 0; in hfi1_ipoib_send_dma_common()
378 tx->txq = txq; in hfi1_ipoib_send_dma_common()
379 tx->skb = skb; in hfi1_ipoib_send_dma_common()
380 INIT_LIST_HEAD(&tx->txreq.list); in hfi1_ipoib_send_dma_common()
382 hfi1_ipoib_build_ib_tx_headers(tx, txp); in hfi1_ipoib_send_dma_common()
384 ret = hfi1_ipoib_build_tx_desc(tx, txp); in hfi1_ipoib_send_dma_common()
396 return tx; in hfi1_ipoib_send_dma_common()
399 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_send_dma_common()
417 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret); in hfi1_ipoib_submit_tx_list()
440 struct ipoib_txreq *tx) in hfi1_ipoib_submit_tx() argument
446 &tx->txreq, in hfi1_ipoib_submit_tx()
462 struct ipoib_txreq *tx; in hfi1_ipoib_send_dma_single() local
465 tx = hfi1_ipoib_send_dma_common(dev, skb, txp); in hfi1_ipoib_send_dma_single()
466 if (IS_ERR(tx)) { in hfi1_ipoib_send_dma_single()
467 int ret = PTR_ERR(tx); in hfi1_ipoib_send_dma_single()
480 trace_hfi1_tx_consume(tx, tx_ring->tail); in hfi1_ipoib_send_dma_single()
481 /* consume tx */ in hfi1_ipoib_send_dma_single()
483 ret = hfi1_ipoib_submit_tx(txq, tx); in hfi1_ipoib_send_dma_single()
487 &tx->sdma_hdr->hdr, in hfi1_ipoib_send_dma_single()
498 /* mark complete and kick napi tx */ in hfi1_ipoib_send_dma_single()
499 smp_store_release(&tx->complete, 1); in hfi1_ipoib_send_dma_single()
500 napi_schedule(&tx->txq->napi); in hfi1_ipoib_send_dma_single()
513 struct ipoib_txreq *tx; in hfi1_ipoib_send_dma_list() local
528 tx = hfi1_ipoib_send_dma_common(dev, skb, txp); in hfi1_ipoib_send_dma_list()
529 if (IS_ERR(tx)) { in hfi1_ipoib_send_dma_list()
530 int ret = PTR_ERR(tx); in hfi1_ipoib_send_dma_list()
543 trace_hfi1_tx_consume(tx, tx_ring->tail); in hfi1_ipoib_send_dma_list()
544 /* consume tx */ in hfi1_ipoib_send_dma_list()
546 list_add_tail(&tx->txreq.list, &txq->tx_list); in hfi1_ipoib_send_dma_list()
551 &tx->sdma_hdr->hdr, in hfi1_ipoib_send_dma_list()
609 * sdma descriptors available to send the packet. It adds Tx queue's wait
655 * This function gets called when SDMA descriptors becomes available and Tx
706 struct ipoib_txreq *tx; in hfi1_ipoib_txreq_init() local
743 kzalloc_node(sizeof(*tx->sdma_hdr), in hfi1_ipoib_txreq_init()
776 struct ipoib_txreq *tx = in hfi1_ipoib_drain_tx_list() local
780 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_list()
781 dev_kfree_skb_any(tx->skb); in hfi1_ipoib_drain_tx_list()
782 tx->skb = NULL; in hfi1_ipoib_drain_tx_list()