Lines Matching full:tx

26 	struct gve_tx_ring *tx = &priv->tx[tx_qid];  in gve_xdp_tx_flush()  local
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_tx_flush()
32 * We copy skb payloads into the registered segment before writing Tx
33 * descriptors and ringing the Tx doorbell.
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
135 /* gve_tx_free_fifo - Return space to Tx FIFO
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_xdp_done() argument
168 idx = tx->done & tx->mask; in gve_clean_xdp_done()
169 info = &tx->info[idx]; in gve_clean_xdp_done()
170 tx->done++; in gve_clean_xdp_done()
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_xdp_done()
188 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done()
189 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done()
190 u64_stats_update_begin(&tx->statss); in gve_clean_xdp_done()
191 tx->bytes_done += bytes; in gve_clean_xdp_done()
192 tx->pkt_done += pkts; in gve_clean_xdp_done()
193 u64_stats_update_end(&tx->statss); in gve_clean_xdp_done()
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
203 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_stop_ring_gqi() local
209 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); in gve_tx_stop_ring_gqi()
210 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_stop_ring_gqi()
214 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_free_ring_gqi() argument
218 int idx = tx->q_num; in gve_tx_free_ring_gqi()
223 slots = tx->mask + 1; in gve_tx_free_ring_gqi()
224 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring_gqi()
225 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring_gqi()
226 tx->q_resources = NULL; in gve_tx_free_ring_gqi()
228 if (tx->tx_fifo.qpl) { in gve_tx_free_ring_gqi()
229 if (tx->tx_fifo.base) in gve_tx_free_ring_gqi()
230 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring_gqi()
232 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_free_ring_gqi()
233 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); in gve_tx_free_ring_gqi()
234 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring_gqi()
237 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring_gqi()
238 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring_gqi()
239 tx->desc = NULL; in gve_tx_free_ring_gqi()
241 vfree(tx->info); in gve_tx_free_ring_gqi()
242 tx->info = NULL; in gve_tx_free_ring_gqi()
244 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); in gve_tx_free_ring_gqi()
250 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_start_ring_gqi() local
254 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_start_ring_gqi()
260 struct gve_tx_ring *tx, in gve_tx_alloc_ring_gqi() argument
269 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring_gqi()
270 spin_lock_init(&tx->clean_lock); in gve_tx_alloc_ring_gqi()
271 spin_lock_init(&tx->xdp_lock); in gve_tx_alloc_ring_gqi()
272 tx->q_num = idx; in gve_tx_alloc_ring_gqi()
274 tx->mask = cfg->ring_size - 1; in gve_tx_alloc_ring_gqi()
277 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info)); in gve_tx_alloc_ring_gqi()
278 if (!tx->info) in gve_tx_alloc_ring_gqi()
281 /* alloc tx queue */ in gve_tx_alloc_ring_gqi()
282 bytes = sizeof(*tx->desc) * cfg->ring_size; in gve_tx_alloc_ring_gqi()
283 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring_gqi()
284 if (!tx->desc) in gve_tx_alloc_ring_gqi()
287 tx->raw_addressing = cfg->raw_addressing; in gve_tx_alloc_ring_gqi()
288 tx->dev = hdev; in gve_tx_alloc_ring_gqi()
289 if (!tx->raw_addressing) { in gve_tx_alloc_ring_gqi()
290 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_alloc_ring_gqi()
293 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_tx_alloc_ring_gqi()
295 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring_gqi()
298 /* map Tx FIFO */ in gve_tx_alloc_ring_gqi()
299 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring_gqi()
303 tx->q_resources = in gve_tx_alloc_ring_gqi()
305 sizeof(*tx->q_resources), in gve_tx_alloc_ring_gqi()
306 &tx->q_resources_bus, in gve_tx_alloc_ring_gqi()
308 if (!tx->q_resources) in gve_tx_alloc_ring_gqi()
314 if (!tx->raw_addressing) in gve_tx_alloc_ring_gqi()
315 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring_gqi()
317 if (!tx->raw_addressing) { in gve_tx_alloc_ring_gqi()
318 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); in gve_tx_alloc_ring_gqi()
319 tx->tx_fifo.qpl = NULL; in gve_tx_alloc_ring_gqi()
322 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring_gqi()
323 tx->desc = NULL; in gve_tx_alloc_ring_gqi()
325 vfree(tx->info); in gve_tx_alloc_ring_gqi()
326 tx->info = NULL; in gve_tx_alloc_ring_gqi()
333 struct gve_tx_ring *tx = cfg->tx; in gve_tx_alloc_rings_gqi() local
339 "Cannot alloc more than the max num of Tx rings\n"); in gve_tx_alloc_rings_gqi()
344 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_gqi()
346 if (!tx) in gve_tx_alloc_rings_gqi()
348 } else if (!tx) { in gve_tx_alloc_rings_gqi()
350 "Cannot alloc tx rings from a nonzero start idx without tx array\n"); in gve_tx_alloc_rings_gqi()
355 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i); in gve_tx_alloc_rings_gqi()
358 "Failed to alloc tx ring=%d: err=%d\n", in gve_tx_alloc_rings_gqi()
364 cfg->tx = tx; in gve_tx_alloc_rings_gqi()
369 gve_tx_free_ring_gqi(priv, &tx[j], cfg); in gve_tx_alloc_rings_gqi()
371 kvfree(tx); in gve_tx_alloc_rings_gqi()
378 struct gve_tx_ring *tx = cfg->tx; in gve_tx_free_rings_gqi() local
381 if (!tx) in gve_tx_free_rings_gqi()
385 gve_tx_free_ring_gqi(priv, &tx[i], cfg); in gve_tx_free_rings_gqi()
388 kvfree(tx); in gve_tx_free_rings_gqi()
389 cfg->tx = NULL; in gve_tx_free_rings_gqi()
394 * @tx: tx ring to check
400 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
402 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
405 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
415 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
450 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
454 if (!tx->raw_addressing) in gve_can_tx()
455 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
457 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
463 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_maybe_stop_tx() argument
471 if (!tx->raw_addressing) in gve_maybe_stop_tx()
472 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
474 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
478 spin_lock(&tx->clean_lock); in gve_maybe_stop_tx()
479 nic_done = gve_tx_load_event_counter(priv, tx); in gve_maybe_stop_tx()
480 to_do = nic_done - tx->done; in gve_maybe_stop_tx()
482 /* Only try to clean if there is hope for TX */ in gve_maybe_stop_tx()
483 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { in gve_maybe_stop_tx()
486 gve_clean_tx_done(priv, tx, to_do, false); in gve_maybe_stop_tx()
488 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
493 tx->stop_queue++; in gve_maybe_stop_tx()
494 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
496 spin_unlock(&tx->clean_lock); in gve_maybe_stop_tx()
566 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
573 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
579 info = &tx->info[idx]; in gve_tx_add_skb_copy()
580 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
593 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
594 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
597 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
606 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
608 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
614 next_idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_copy()
615 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); in gve_tx_add_skb_copy()
619 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
620 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
629 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
631 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
640 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
649 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
654 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
655 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
668 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
669 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
670 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
687 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
688 mtd_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
698 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
699 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
708 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
709 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
711 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
712 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
713 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
716 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
717 dma_unmap_len_set(&tx->info[idx], len, len); in gve_tx_add_skb_no_copy()
718 dma_unmap_addr_set(&tx->info[idx], dma, addr); in gve_tx_add_skb_no_copy()
734 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
737 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
744 struct gve_tx_ring *tx; in gve_tx() local
749 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
750 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { in gve_tx()
751 /* We need to ring the txq doorbell -- we have stopped the Tx in gve_tx()
756 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
759 if (tx->raw_addressing) in gve_tx()
760 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
762 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
766 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
768 tx->req += nsegs; in gve_tx()
773 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
779 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
783 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_fill_xdp() argument
788 u32 reqi = tx->req; in gve_tx_fill_xdp()
790 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); in gve_tx_fill_xdp()
793 info = &tx->info[reqi & tx->mask]; in gve_tx_fill_xdp()
798 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, in gve_tx_fill_xdp()
806 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, in gve_tx_fill_xdp()
811 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], in gve_tx_fill_xdp()
816 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, in gve_tx_fill_xdp()
819 tx->tx_fifo.qpl->page_buses, in gve_tx_fill_xdp()
834 struct gve_tx_ring *tx; in gve_xdp_xmit() local
843 tx = &priv->tx[qid]; in gve_xdp_xmit()
845 spin_lock(&tx->xdp_lock); in gve_xdp_xmit()
847 err = gve_xdp_xmit_one(priv, tx, frames[i]->data, in gve_xdp_xmit()
854 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_xmit()
856 spin_unlock(&tx->xdp_lock); in gve_xdp_xmit()
858 u64_stats_update_begin(&tx->statss); in gve_xdp_xmit()
859 tx->xdp_xmit += n; in gve_xdp_xmit()
860 tx->xdp_xmit_errors += n - i; in gve_xdp_xmit()
861 u64_stats_update_end(&tx->statss); in gve_xdp_xmit()
866 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xdp_xmit_one() argument
871 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) in gve_xdp_xmit_one()
874 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); in gve_xdp_xmit_one()
875 tx->req += nsegs; in gve_xdp_xmit_one()
882 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
893 idx = tx->done & tx->mask; in gve_clean_tx_done()
896 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
897 info = &tx->info[idx]; in gve_clean_tx_done()
901 if (tx->raw_addressing) in gve_clean_tx_done()
902 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
903 tx->done++; in gve_clean_tx_done()
910 if (tx->raw_addressing) in gve_clean_tx_done()
916 if (!tx->raw_addressing) in gve_clean_tx_done()
917 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
918 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
919 tx->bytes_done += bytes; in gve_clean_tx_done()
920 tx->pkt_done += pkts; in gve_clean_tx_done()
921 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
922 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
929 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
930 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
931 tx->wake_queue++; in gve_clean_tx_done()
932 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
939 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
941 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); in gve_tx_load_event_counter()
947 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xsk_tx() argument
954 spin_lock(&tx->xdp_lock); in gve_xsk_tx()
956 if (!gve_can_tx(tx, GVE_TX_START_THRESH)) in gve_xsk_tx()
959 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { in gve_xsk_tx()
960 tx->xdp_xsk_done = tx->xdp_xsk_wakeup; in gve_xsk_tx()
964 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx()
965 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); in gve_xsk_tx()
966 tx->req += nsegs; in gve_xsk_tx()
971 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xsk_tx()
972 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx()
974 spin_unlock(&tx->xdp_lock); in gve_xsk_tx()
981 struct gve_tx_ring *tx = block->tx; in gve_xdp_poll() local
987 nic_done = gve_tx_load_event_counter(priv, tx); in gve_xdp_poll()
988 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_xdp_poll()
989 gve_clean_xdp_done(priv, tx, to_do); in gve_xdp_poll()
990 repoll = nic_done != tx->done; in gve_xdp_poll()
992 if (tx->xsk_pool) { in gve_xdp_poll()
993 int sent = gve_xsk_tx(priv, tx, budget); in gve_xdp_poll()
995 u64_stats_update_begin(&tx->statss); in gve_xdp_poll()
996 tx->xdp_xsk_sent += sent; in gve_xdp_poll()
997 u64_stats_update_end(&tx->statss); in gve_xdp_poll()
999 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xdp_poll()
1000 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xdp_poll()
1010 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
1018 /* In TX path, it may try to clean completed pkts in order to xmit, in gve_tx_poll()
1022 spin_lock(&tx->clean_lock); in gve_tx_poll()
1024 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
1025 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
1026 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
1027 spin_unlock(&tx->clean_lock); in gve_tx_poll()
1029 return nic_done != tx->done; in gve_tx_poll()
1032 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_clean_pending() argument
1034 u32 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_clean_pending()
1036 return nic_done != tx->done; in gve_tx_clean_pending()