Lines Matching defs:tx_ring
39 struct ice_tx_ring *tx_ring;
48 tx_ring = vsi->tx_rings[0];
49 if (!tx_ring || !tx_ring->desc)
51 dev = tx_ring->dev;
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
67 i = tx_ring->next_to_use;
68 first = &tx_ring->tx_buf[i];
69 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
73 i = (i < tx_ring->count) ? i : 0;
74 tx_desc = ICE_TX_DESC(tx_ring, i);
75 tx_buf = &tx_ring->tx_buf[i];
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
102 writel(tx_ring->next_to_use, tx_ring->tail);
149 * @tx_ring: ring to be cleaned
151 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
157 ice_xsk_clean_xdp_ring(tx_ring);
162 if (!tx_ring->tx_buf)
166 for (i = 0; i < tx_ring->count; i++)
167 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
170 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
172 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
175 memset(tx_ring->desc, 0, size);
177 tx_ring->next_to_use = 0;
178 tx_ring->next_to_clean = 0;
180 if (!tx_ring->netdev)
184 netdev_tx_reset_queue(txring_txq(tx_ring));
189 * @tx_ring: Tx descriptor ring for a specific queue
193 void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
197 ice_clean_tx_ring(tx_ring);
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
199 tx_ring->tx_buf = NULL;
201 if (tx_ring->desc) {
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
204 dmam_free_coherent(tx_ring->dev, size,
205 tx_ring->desc, tx_ring->dma);
206 tx_ring->desc = NULL;
212 * @tx_ring: Tx ring to clean
217 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
221 struct ice_vsi *vsi = tx_ring->vsi;
222 s16 i = tx_ring->next_to_clean;
227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
229 tx_buf = &tx_ring->tx_buf[i];
230 tx_desc = ICE_TX_DESC(tx_ring, i);
231 i -= tx_ring->count;
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
264 dma_unmap_single(tx_ring->dev,
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
280 i -= tx_ring->count;
281 tx_buf = tx_ring->tx_buf;
282 tx_desc = ICE_TX_DESC(tx_ring, 0);
287 dma_unmap_page(tx_ring->dev,
294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
301 i -= tx_ring->count;
302 tx_buf = tx_ring->tx_buf;
303 tx_desc = ICE_TX_DESC(tx_ring, 0);
312 i += tx_ring->count;
313 tx_ring->next_to_clean = i;
315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
327 netif_tx_wake_queue(txring_txq(tx_ring));
328 ++tx_ring->ring_stats->tx_stats.restart_q;
337 * @tx_ring: the Tx ring to set up
341 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
343 struct device *dev = tx_ring->dev;
350 WARN_ON(tx_ring->tx_buf);
351 tx_ring->tx_buf =
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
354 if (!tx_ring->tx_buf)
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
362 if (!tx_ring->desc) {
368 tx_ring->next_to_use = 0;
369 tx_ring->next_to_clean = 0;
370 tx_ring->ring_stats->tx_stats.prev_pkt = -1;
374 devm_kfree(dev, tx_ring->tx_buf);
375 tx_ring->tx_buf = NULL;
1356 struct ice_tx_ring *tx_ring;
1358 ice_for_each_tx_ring(tx_ring, *rc) {
1361 ring_stats = tx_ring->ring_stats;
1531 struct ice_tx_ring *tx_ring;
1540 ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1541 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
1545 wd = ice_xmit_zc(tx_ring, xsk_pool);
1546 else if (ice_ring_is_xdp(tx_ring))
1549 wd = ice_clean_tx_irq(tx_ring, budget);
1611 * @tx_ring: the ring to be checked
1616 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1618 netif_tx_stop_queue(txring_txq(tx_ring));
1623 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1627 netif_tx_start_queue(txring_txq(tx_ring));
1628 ++tx_ring->ring_stats->tx_stats.restart_q;
1634 * @tx_ring: the ring to be checked
1639 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1641 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1644 return __ice_maybe_stop_tx(tx_ring, size);
1649 * @tx_ring: ring to send buffer on
1658 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1662 u16 i = tx_ring->next_to_use;
1679 tx_desc = ICE_TX_DESC(tx_ring, i);
1686 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1693 if (dma_mapping_error(tx_ring->dev, dma))
1715 if (i == tx_ring->count) {
1716 tx_desc = ICE_TX_DESC(tx_ring, 0);
1736 if (i == tx_ring->count) {
1737 tx_desc = ICE_TX_DESC(tx_ring, 0);
1744 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1747 tx_buf = &tx_ring->tx_buf[i];
1755 if (i == tx_ring->count)
1774 tx_ring->next_to_use = i;
1776 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1779 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1783 writel(i, tx_ring->tail);
1790 tx_buf = &tx_ring->tx_buf[i];
1791 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1795 i = tx_ring->count;
1799 tx_ring->next_to_use = i;
1812 const struct ice_tx_ring *tx_ring = off->tx_ring;
1962 if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
2021 * @tx_ring: ring to send buffer on
2028 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
2042 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
2048 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2335 * @tx_ring: pointer to the Tx ring to send buffer on
2341 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2355 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2357 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
2370 * @tx_ring: ring to send buffer on
2375 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2378 struct ice_vsi *vsi = tx_ring->vsi;
2384 ice_trace(xmit_frame_ring, tx_ring, skb);
2394 tx_ring->ring_stats->tx_stats.tx_linearize++;
2403 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2405 tx_ring->ring_stats->tx_stats.tx_busy++;
2410 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2412 offload.tx_ring = tx_ring;
2415 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2423 ice_tx_prepare_vlan_flags(tx_ring, first);
2456 ice_tstamp(tx_ring, skb, first, &offload);
2460 u16 i = tx_ring->next_to_use;
2463 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2465 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2474 ice_tx_map(tx_ring, first, &offload);
2478 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2494 struct ice_tx_ring *tx_ring;
2496 tx_ring = vsi->tx_rings[skb->queue_mapping];
2504 return ice_xmit_frame_ring(skb, tx_ring);
2542 * @tx_ring: tx_ring to clean
2544 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2546 struct ice_vsi *vsi = tx_ring->vsi;
2547 s16 i = tx_ring->next_to_clean;
2552 tx_buf = &tx_ring->tx_buf[i];
2553 tx_desc = ICE_TX_DESC(tx_ring, i);
2554 i -= tx_ring->count;
2581 i -= tx_ring->count;
2582 tx_buf = tx_ring->tx_buf;
2583 tx_desc = ICE_TX_DESC(tx_ring, 0);
2588 dma_unmap_single(tx_ring->dev,
2593 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2608 i -= tx_ring->count;
2609 tx_buf = tx_ring->tx_buf;
2610 tx_desc = ICE_TX_DESC(tx_ring, 0);
2616 i += tx_ring->count;
2617 tx_ring->next_to_clean = i;