Lines Matching refs:tx
19 static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count) in gve_has_free_tx_qpl_bufs() argument
23 if (!tx->dqo.qpl) in gve_has_free_tx_qpl_bufs()
26 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs()
27 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs()
28 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
34 tx->dqo_tx.free_tx_qpl_buf_cnt = in gve_has_free_tx_qpl_bufs()
35 atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
37 num_avail = tx->dqo.num_tx_qpl_bufs - in gve_has_free_tx_qpl_bufs()
38 (tx->dqo_tx.alloc_tx_qpl_buf_cnt - in gve_has_free_tx_qpl_bufs()
39 tx->dqo_tx.free_tx_qpl_buf_cnt); in gve_has_free_tx_qpl_bufs()
45 gve_alloc_tx_qpl_buf(struct gve_tx_ring *tx) in gve_alloc_tx_qpl_buf() argument
49 index = tx->dqo_tx.free_tx_qpl_buf_head; in gve_alloc_tx_qpl_buf()
55 tx->dqo_tx.free_tx_qpl_buf_head = in gve_alloc_tx_qpl_buf()
56 atomic_xchg(&tx->dqo_compl.free_tx_qpl_buf_head, -1); in gve_alloc_tx_qpl_buf()
57 index = tx->dqo_tx.free_tx_qpl_buf_head; in gve_alloc_tx_qpl_buf()
64 tx->dqo_tx.free_tx_qpl_buf_head = tx->dqo.tx_qpl_buf_next[index]; in gve_alloc_tx_qpl_buf()
70 gve_free_tx_qpl_bufs(struct gve_tx_ring *tx, in gve_free_tx_qpl_bufs() argument
82 tx->dqo.tx_qpl_buf_next[index] = pkt->tx_qpl_buf_ids[i]; in gve_free_tx_qpl_bufs()
87 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_tx_qpl_buf_head); in gve_free_tx_qpl_bufs()
89 tx->dqo.tx_qpl_buf_next[index] = old_head; in gve_free_tx_qpl_bufs()
90 if (atomic_cmpxchg(&tx->dqo_compl.free_tx_qpl_buf_head, in gve_free_tx_qpl_bufs()
97 atomic_add(pkt->num_bufs, &tx->dqo_compl.free_tx_qpl_buf_cnt); in gve_free_tx_qpl_bufs()
102 static bool gve_has_pending_packet(struct gve_tx_ring *tx) in gve_has_pending_packet() argument
105 if (tx->dqo_tx.free_pending_packets != -1) in gve_has_pending_packet()
109 if (atomic_read_acquire(&tx->dqo_compl.free_pending_packets) != -1) in gve_has_pending_packet()
118 struct gve_tx_ring *tx = &priv->tx[tx_qid]; in gve_xdp_tx_flush_dqo() local
120 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_xdp_tx_flush_dqo()
124 gve_alloc_pending_packet(struct gve_tx_ring *tx) in gve_alloc_pending_packet() argument
129 index = tx->dqo_tx.free_pending_packets; in gve_alloc_pending_packet()
135 tx->dqo_tx.free_pending_packets = in gve_alloc_pending_packet()
136 atomic_xchg(&tx->dqo_compl.free_pending_packets, -1); in gve_alloc_pending_packet()
137 index = tx->dqo_tx.free_pending_packets; in gve_alloc_pending_packet()
143 pending_packet = &tx->dqo.pending_packets[index]; in gve_alloc_pending_packet()
146 tx->dqo_tx.free_pending_packets = pending_packet->next; in gve_alloc_pending_packet()
153 gve_free_pending_packet(struct gve_tx_ring *tx, in gve_free_pending_packet() argument
156 s16 index = pending_packet - tx->dqo.pending_packets; in gve_free_pending_packet()
160 s16 old_head = atomic_read_acquire(&tx->dqo_compl.free_pending_packets); in gve_free_pending_packet()
163 if (atomic_cmpxchg(&tx->dqo_compl.free_pending_packets, in gve_free_pending_packet()
172 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) in gve_tx_clean_pending_packets() argument
176 for (i = 0; i < tx->dqo.num_pending_packets; i++) { in gve_tx_clean_pending_packets()
178 &tx->dqo.pending_packets[i]; in gve_tx_clean_pending_packets()
183 dma_unmap_single(tx->dev, in gve_tx_clean_pending_packets()
188 dma_unmap_page(tx->dev, in gve_tx_clean_pending_packets()
204 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_stop_ring_dqo() local
210 gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); in gve_tx_stop_ring_dqo()
211 if (tx->netdev_txq) in gve_tx_stop_ring_dqo()
212 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_stop_ring_dqo()
213 gve_tx_clean_pending_packets(tx); in gve_tx_stop_ring_dqo()
217 static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_free_ring_dqo() argument
221 int idx = tx->q_num; in gve_tx_free_ring_dqo()
225 if (tx->q_resources) { in gve_tx_free_ring_dqo()
226 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring_dqo()
227 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring_dqo()
228 tx->q_resources = NULL; in gve_tx_free_ring_dqo()
231 if (tx->dqo.compl_ring) { in gve_tx_free_ring_dqo()
232 bytes = sizeof(tx->dqo.compl_ring[0]) * in gve_tx_free_ring_dqo()
233 (tx->dqo.complq_mask + 1); in gve_tx_free_ring_dqo()
234 dma_free_coherent(hdev, bytes, tx->dqo.compl_ring, in gve_tx_free_ring_dqo()
235 tx->complq_bus_dqo); in gve_tx_free_ring_dqo()
236 tx->dqo.compl_ring = NULL; in gve_tx_free_ring_dqo()
239 if (tx->dqo.tx_ring) { in gve_tx_free_ring_dqo()
240 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); in gve_tx_free_ring_dqo()
241 dma_free_coherent(hdev, bytes, tx->dqo.tx_ring, tx->bus); in gve_tx_free_ring_dqo()
242 tx->dqo.tx_ring = NULL; in gve_tx_free_ring_dqo()
245 kvfree(tx->dqo.xsk_reorder_queue); in gve_tx_free_ring_dqo()
246 tx->dqo.xsk_reorder_queue = NULL; in gve_tx_free_ring_dqo()
248 kvfree(tx->dqo.pending_packets); in gve_tx_free_ring_dqo()
249 tx->dqo.pending_packets = NULL; in gve_tx_free_ring_dqo()
251 kvfree(tx->dqo.tx_qpl_buf_next); in gve_tx_free_ring_dqo()
252 tx->dqo.tx_qpl_buf_next = NULL; in gve_tx_free_ring_dqo()
254 if (tx->dqo.qpl) { in gve_tx_free_ring_dqo()
255 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_free_ring_dqo()
256 gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id); in gve_tx_free_ring_dqo()
257 tx->dqo.qpl = NULL; in gve_tx_free_ring_dqo()
263 static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) in gve_tx_qpl_buf_init() argument
266 tx->dqo.qpl->num_entries; in gve_tx_qpl_buf_init()
269 tx->dqo.tx_qpl_buf_next = kvcalloc(num_tx_qpl_bufs, in gve_tx_qpl_buf_init()
270 sizeof(tx->dqo.tx_qpl_buf_next[0]), in gve_tx_qpl_buf_init()
272 if (!tx->dqo.tx_qpl_buf_next) in gve_tx_qpl_buf_init()
275 tx->dqo.num_tx_qpl_bufs = num_tx_qpl_bufs; in gve_tx_qpl_buf_init()
279 tx->dqo.tx_qpl_buf_next[i] = i + 1; in gve_tx_qpl_buf_init()
280 tx->dqo.tx_qpl_buf_next[num_tx_qpl_bufs - 1] = -1; in gve_tx_qpl_buf_init()
282 atomic_set_release(&tx->dqo_compl.free_tx_qpl_buf_head, -1); in gve_tx_qpl_buf_init()
289 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_start_ring_dqo() local
294 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_start_ring_dqo()
300 struct gve_tx_ring *tx, in gve_tx_alloc_ring_dqo() argument
310 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring_dqo()
311 tx->q_num = idx; in gve_tx_alloc_ring_dqo()
312 tx->dev = hdev; in gve_tx_alloc_ring_dqo()
313 spin_lock_init(&tx->dqo_tx.xdp_lock); in gve_tx_alloc_ring_dqo()
314 atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); in gve_tx_alloc_ring_dqo()
317 tx->mask = cfg->ring_size - 1; in gve_tx_alloc_ring_dqo()
318 tx->dqo.complq_mask = tx->mask; in gve_tx_alloc_ring_dqo()
326 num_pending_packets = tx->dqo.complq_mask + 1; in gve_tx_alloc_ring_dqo()
332 (tx->dqo.complq_mask + 1) / GVE_TX_MIN_RE_INTERVAL; in gve_tx_alloc_ring_dqo()
339 tx->dqo.num_pending_packets = min_t(int, num_pending_packets, S16_MAX); in gve_tx_alloc_ring_dqo()
340 tx->dqo.pending_packets = kvcalloc(tx->dqo.num_pending_packets, in gve_tx_alloc_ring_dqo()
341 sizeof(tx->dqo.pending_packets[0]), in gve_tx_alloc_ring_dqo()
343 if (!tx->dqo.pending_packets) in gve_tx_alloc_ring_dqo()
347 for (i = 0; i < tx->dqo.num_pending_packets - 1; i++) in gve_tx_alloc_ring_dqo()
348 tx->dqo.pending_packets[i].next = i + 1; in gve_tx_alloc_ring_dqo()
350 tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1; in gve_tx_alloc_ring_dqo()
351 atomic_set_release(&tx->dqo_compl.free_pending_packets, -1); in gve_tx_alloc_ring_dqo()
355 tx->dqo.xsk_reorder_queue = in gve_tx_alloc_ring_dqo()
356 kvcalloc(tx->dqo.complq_mask + 1, in gve_tx_alloc_ring_dqo()
357 sizeof(tx->dqo.xsk_reorder_queue[0]), in gve_tx_alloc_ring_dqo()
359 if (!tx->dqo.xsk_reorder_queue) in gve_tx_alloc_ring_dqo()
363 tx->dqo_compl.miss_completions.head = -1; in gve_tx_alloc_ring_dqo()
364 tx->dqo_compl.miss_completions.tail = -1; in gve_tx_alloc_ring_dqo()
365 tx->dqo_compl.timed_out_completions.head = -1; in gve_tx_alloc_ring_dqo()
366 tx->dqo_compl.timed_out_completions.tail = -1; in gve_tx_alloc_ring_dqo()
368 bytes = sizeof(tx->dqo.tx_ring[0]) * (tx->mask + 1); in gve_tx_alloc_ring_dqo()
369 tx->dqo.tx_ring = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring_dqo()
370 if (!tx->dqo.tx_ring) in gve_tx_alloc_ring_dqo()
373 bytes = sizeof(tx->dqo.compl_ring[0]) * (tx->dqo.complq_mask + 1); in gve_tx_alloc_ring_dqo()
374 tx->dqo.compl_ring = dma_alloc_coherent(hdev, bytes, in gve_tx_alloc_ring_dqo()
375 &tx->complq_bus_dqo, in gve_tx_alloc_ring_dqo()
377 if (!tx->dqo.compl_ring) in gve_tx_alloc_ring_dqo()
380 tx->q_resources = dma_alloc_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_alloc_ring_dqo()
381 &tx->q_resources_bus, GFP_KERNEL); in gve_tx_alloc_ring_dqo()
382 if (!tx->q_resources) in gve_tx_alloc_ring_dqo()
386 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_alloc_ring_dqo()
389 tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_tx_alloc_ring_dqo()
391 if (!tx->dqo.qpl) in gve_tx_alloc_ring_dqo()
394 if (gve_tx_qpl_buf_init(tx)) in gve_tx_alloc_ring_dqo()
401 gve_tx_free_ring_dqo(priv, tx, cfg); in gve_tx_alloc_ring_dqo()
408 struct gve_tx_ring *tx = cfg->tx; in gve_tx_alloc_rings_dqo() local
420 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_dqo()
422 if (!tx) in gve_tx_alloc_rings_dqo()
426 err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i); in gve_tx_alloc_rings_dqo()
435 cfg->tx = tx; in gve_tx_alloc_rings_dqo()
440 gve_tx_free_ring_dqo(priv, &tx[j], cfg); in gve_tx_alloc_rings_dqo()
441 kvfree(tx); in gve_tx_alloc_rings_dqo()
448 struct gve_tx_ring *tx = cfg->tx; in gve_tx_free_rings_dqo() local
451 if (!tx) in gve_tx_free_rings_dqo()
455 gve_tx_free_ring_dqo(priv, &tx[i], cfg); in gve_tx_free_rings_dqo()
457 kvfree(tx); in gve_tx_free_rings_dqo()
458 cfg->tx = NULL; in gve_tx_free_rings_dqo()
462 static u32 num_avail_tx_slots(const struct gve_tx_ring *tx) in num_avail_tx_slots() argument
464 u32 num_used = (tx->dqo_tx.tail - tx->dqo_tx.head) & tx->mask; in num_avail_tx_slots()
466 return tx->mask - num_used; in num_avail_tx_slots()
470 static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req) in gve_has_tx_slots_available() argument
472 u32 num_avail = num_avail_tx_slots(tx); in gve_has_tx_slots_available()
480 tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head); in gve_has_tx_slots_available()
482 return num_avail_tx_slots(tx) >= slots_req; in gve_has_tx_slots_available()
485 static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx, in gve_has_avail_slots_tx_dqo() argument
488 return gve_has_pending_packet(tx) && in gve_has_avail_slots_tx_dqo()
489 gve_has_tx_slots_available(tx, desc_count) && in gve_has_avail_slots_tx_dqo()
490 gve_has_free_tx_qpl_bufs(tx, buf_count); in gve_has_avail_slots_tx_dqo()
496 static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx, in gve_maybe_stop_tx_dqo() argument
499 if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
503 tx->stop_queue++; in gve_maybe_stop_tx_dqo()
504 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx_dqo()
512 if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count))) in gve_maybe_stop_tx_dqo()
515 netif_tx_start_queue(tx->netdev_txq); in gve_maybe_stop_tx_dqo()
516 tx->wake_queue++; in gve_maybe_stop_tx_dqo()
537 static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx, in gve_tx_fill_pkt_desc_dqo() argument
543 &tx->dqo.tx_ring[*desc_idx].pkt; in gve_tx_fill_pkt_desc_dqo()
558 *desc_idx = (*desc_idx + 1) & tx->mask; in gve_tx_fill_pkt_desc_dqo()
648 static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx) in gve_tx_update_tail() argument
650 u32 last_desc_idx = (desc_idx - 1) & tx->mask; in gve_tx_update_tail()
652 (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask; in gve_tx_update_tail()
655 tx->dqo_tx.tail = desc_idx; in gve_tx_update_tail()
662 tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true; in gve_tx_update_tail()
663 tx->dqo_tx.last_re_idx = last_desc_idx; in gve_tx_update_tail()
667 static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy_dqo() argument
692 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy_dqo()
693 if (unlikely(dma_mapping_error(tx->dev, addr))) in gve_tx_add_skb_no_copy_dqo()
700 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr, in gve_tx_add_skb_no_copy_dqo()
711 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy_dqo()
712 if (unlikely(dma_mapping_error(tx->dev, addr))) in gve_tx_add_skb_no_copy_dqo()
720 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr, in gve_tx_add_skb_no_copy_dqo()
728 dma_unmap_single(tx->dev, in gve_tx_add_skb_no_copy_dqo()
733 dma_unmap_page(tx->dev, in gve_tx_add_skb_no_copy_dqo()
747 static void gve_tx_buf_get_addr(struct gve_tx_ring *tx, in gve_tx_buf_get_addr() argument
754 *va = page_address(tx->dqo.qpl->pages[page_id]) + offset; in gve_tx_buf_get_addr()
755 *dma_addr = tx->dqo.qpl->page_buses[page_id] + offset; in gve_tx_buf_get_addr()
758 static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_copy_dqo() argument
775 index = gve_alloc_tx_qpl_buf(tx); in gve_tx_add_skb_copy_dqo()
779 gve_tx_buf_get_addr(tx, index, &va, &dma_addr); in gve_tx_add_skb_copy_dqo()
785 dma_sync_single_for_device(tx->dev, dma_addr, in gve_tx_add_skb_copy_dqo()
787 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, in gve_tx_add_skb_copy_dqo()
795 ++tx->dqo_tx.alloc_tx_qpl_buf_cnt; in gve_tx_add_skb_copy_dqo()
802 gve_free_tx_qpl_bufs(tx, pkt); in gve_tx_add_skb_copy_dqo()
811 static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx, in gve_tx_add_skb_dqo() argument
815 u32 desc_idx = tx->dqo_tx.tail; in gve_tx_add_skb_dqo()
820 pkt = gve_alloc_pending_packet(tx); in gve_tx_add_skb_dqo()
826 completion_tag = pkt - tx->dqo.pending_packets; in gve_tx_add_skb_dqo()
835 gve_tx_fill_tso_ctx_desc(&tx->dqo.tx_ring[desc_idx].tso_ctx, in gve_tx_add_skb_dqo()
837 desc_idx = (desc_idx + 1) & tx->mask; in gve_tx_add_skb_dqo()
840 gve_tx_fill_general_ctx_desc(&tx->dqo.tx_ring[desc_idx].general_ctx, in gve_tx_add_skb_dqo()
842 desc_idx = (desc_idx + 1) & tx->mask; in gve_tx_add_skb_dqo()
844 if (tx->dqo.qpl) { in gve_tx_add_skb_dqo()
845 if (gve_tx_add_skb_copy_dqo(tx, skb, pkt, in gve_tx_add_skb_dqo()
850 if (gve_tx_add_skb_no_copy_dqo(tx, skb, pkt, in gve_tx_add_skb_dqo()
856 tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; in gve_tx_add_skb_dqo()
858 gve_tx_update_tail(tx, desc_idx); in gve_tx_add_skb_dqo()
863 gve_free_pending_packet(tx, pkt); in gve_tx_add_skb_dqo()
960 static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_try_tx_skb() argument
969 if (tx->dqo.qpl) { in gve_try_tx_skb()
992 if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs, in gve_try_tx_skb()
997 if (unlikely(gve_tx_add_skb_dqo(tx, skb) < 0)) in gve_try_tx_skb()
1000 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_try_tx_skb()
1005 tx->dropped_pkt++; in gve_try_tx_skb()
1010 static void gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring *tx, in gve_xsk_reorder_queue_push_dqo() argument
1013 u32 tail = atomic_read(&tx->dqo_tx.xsk_reorder_queue_tail); in gve_xsk_reorder_queue_push_dqo()
1015 tx->dqo.xsk_reorder_queue[tail] = completion_tag; in gve_xsk_reorder_queue_push_dqo()
1016 tail = (tail + 1) & tx->dqo.complq_mask; in gve_xsk_reorder_queue_push_dqo()
1017 atomic_set_release(&tx->dqo_tx.xsk_reorder_queue_tail, tail); in gve_xsk_reorder_queue_push_dqo()
1021 gve_xsk_reorder_queue_head(struct gve_tx_ring *tx) in gve_xsk_reorder_queue_head() argument
1023 u32 head = tx->dqo_compl.xsk_reorder_queue_head; in gve_xsk_reorder_queue_head()
1025 if (head == tx->dqo_compl.xsk_reorder_queue_tail) { in gve_xsk_reorder_queue_head()
1026 tx->dqo_compl.xsk_reorder_queue_tail = in gve_xsk_reorder_queue_head()
1027 atomic_read_acquire(&tx->dqo_tx.xsk_reorder_queue_tail); in gve_xsk_reorder_queue_head()
1029 if (head == tx->dqo_compl.xsk_reorder_queue_tail) in gve_xsk_reorder_queue_head()
1033 return &tx->dqo.pending_packets[tx->dqo.xsk_reorder_queue[head]]; in gve_xsk_reorder_queue_head()
1036 static void gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring *tx) in gve_xsk_reorder_queue_pop_dqo() argument
1038 tx->dqo_compl.xsk_reorder_queue_head++; in gve_xsk_reorder_queue_pop_dqo()
1039 tx->dqo_compl.xsk_reorder_queue_head &= tx->dqo.complq_mask; in gve_xsk_reorder_queue_pop_dqo()
1046 struct gve_tx_ring *tx; in gve_tx_dqo() local
1048 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx_dqo()
1049 if (unlikely(gve_try_tx_skb(priv, tx, skb) < 0)) { in gve_tx_dqo()
1054 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_tx_dqo()
1058 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx_dqo()
1061 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_tx_dqo()
1065 static bool gve_xsk_tx_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xsk_tx_dqo() argument
1068 struct xsk_buff_pool *pool = tx->xsk_pool; in gve_xsk_tx_dqo()
1073 spin_lock(&tx->dqo_tx.xdp_lock); in gve_xsk_tx_dqo()
1080 if (unlikely(!gve_has_avail_slots_tx_dqo(tx, 1, 1))) { in gve_xsk_tx_dqo()
1088 pkt = gve_alloc_pending_packet(tx); in gve_xsk_tx_dqo()
1091 completion_tag = pkt - tx->dqo.pending_packets; in gve_xsk_tx_dqo()
1096 desc_idx = tx->dqo_tx.tail; in gve_xsk_tx_dqo()
1097 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, in gve_xsk_tx_dqo()
1102 gve_tx_update_tail(tx, desc_idx); in gve_xsk_tx_dqo()
1103 tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs; in gve_xsk_tx_dqo()
1104 gve_xsk_reorder_queue_push_dqo(tx, completion_tag); in gve_xsk_tx_dqo()
1108 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_xsk_tx_dqo()
1112 spin_unlock(&tx->dqo_tx.xdp_lock); in gve_xsk_tx_dqo()
1114 u64_stats_update_begin(&tx->statss); in gve_xsk_tx_dqo()
1115 tx->xdp_xsk_sent += sent; in gve_xsk_tx_dqo()
1116 u64_stats_update_end(&tx->statss); in gve_xsk_tx_dqo()
1121 static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, in add_to_list() argument
1126 index = pending_packet - tx->dqo.pending_packets; in add_to_list()
1132 tx->dqo.pending_packets[old_tail].next = index; in add_to_list()
1138 static void remove_from_list(struct gve_tx_ring *tx, in remove_from_list() argument
1151 tx->dqo.pending_packets[prev_index].next = next_index; in remove_from_list()
1157 tx->dqo.pending_packets[next_index].prev = prev_index; in remove_from_list()
1184 struct gve_tx_ring *tx, bool is_napi, in gve_handle_packet_completion() argument
1190 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { in gve_handle_packet_completion()
1196 pending_packet = &tx->dqo.pending_packets[compl_tag]; in gve_handle_packet_completion()
1206 remove_from_list(tx, in gve_handle_packet_completion()
1207 &tx->dqo_compl.timed_out_completions, in gve_handle_packet_completion()
1209 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1223 remove_from_list(tx, &tx->dqo_compl.miss_completions, in gve_handle_packet_completion()
1234 tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs; in gve_handle_packet_completion()
1238 if (tx->dqo.qpl) in gve_handle_packet_completion()
1239 gve_free_tx_qpl_bufs(tx, pending_packet); in gve_handle_packet_completion()
1241 gve_unmap_packet(tx->dev, pending_packet); in gve_handle_packet_completion()
1247 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1250 gve_unmap_packet(tx->dev, pending_packet); in gve_handle_packet_completion()
1256 gve_free_pending_packet(tx, pending_packet); in gve_handle_packet_completion()
1267 struct gve_tx_ring *tx, u16 compl_tag, in gve_handle_miss_completion() argument
1272 if (unlikely(compl_tag >= tx->dqo.num_pending_packets)) { in gve_handle_miss_completion()
1278 pending_packet = &tx->dqo.pending_packets[compl_tag]; in gve_handle_miss_completion()
1292 add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); in gve_handle_miss_completion()
1299 struct gve_tx_ring *tx) in remove_miss_completions() argument
1304 next_index = tx->dqo_compl.miss_completions.head; in remove_miss_completions()
1306 pending_packet = &tx->dqo.pending_packets[next_index]; in remove_miss_completions()
1312 remove_from_list(tx, &tx->dqo_compl.miss_completions, in remove_miss_completions()
1319 if (tx->dqo.qpl) in remove_miss_completions()
1320 gve_free_tx_qpl_bufs(tx, pending_packet); in remove_miss_completions()
1322 gve_unmap_packet(tx->dev, pending_packet); in remove_miss_completions()
1327 tx->dropped_pkt++; in remove_miss_completions()
1330 (int)(pending_packet - tx->dqo.pending_packets)); in remove_miss_completions()
1339 add_to_list(tx, &tx->dqo_compl.timed_out_completions, in remove_miss_completions()
1345 struct gve_tx_ring *tx) in remove_timed_out_completions() argument
1350 next_index = tx->dqo_compl.timed_out_completions.head; in remove_timed_out_completions()
1352 pending_packet = &tx->dqo.pending_packets[next_index]; in remove_timed_out_completions()
1358 remove_from_list(tx, &tx->dqo_compl.timed_out_completions, in remove_timed_out_completions()
1365 gve_free_pending_packet(tx, pending_packet); in remove_timed_out_completions()
1369 static void gve_tx_process_xsk_completions(struct gve_tx_ring *tx) in gve_tx_process_xsk_completions() argument
1375 gve_xsk_reorder_queue_head(tx); in gve_tx_process_xsk_completions()
1382 gve_xsk_reorder_queue_pop_dqo(tx); in gve_tx_process_xsk_completions()
1383 gve_free_pending_packet(tx, pending_packet); in gve_tx_process_xsk_completions()
1387 xsk_tx_completed(tx->xsk_pool, num_xsks); in gve_tx_process_xsk_completions()
1390 int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done_dqo() argument
1404 &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_clean_tx_done_dqo()
1407 if (compl_desc->generation == tx->dqo_compl.cur_gen_bit) in gve_clean_tx_done_dqo()
1411 prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) & in gve_clean_tx_done_dqo()
1412 tx->dqo.complq_mask]); in gve_clean_tx_done_dqo()
1422 atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head); in gve_clean_tx_done_dqo()
1427 gve_handle_miss_completion(priv, tx, compl_tag, in gve_clean_tx_done_dqo()
1431 gve_handle_packet_completion(priv, tx, !!napi, in gve_clean_tx_done_dqo()
1440 gve_handle_miss_completion(priv, tx, compl_tag, in gve_clean_tx_done_dqo()
1446 gve_handle_packet_completion(priv, tx, !!napi, in gve_clean_tx_done_dqo()
1453 tx->dqo_compl.head = in gve_clean_tx_done_dqo()
1454 (tx->dqo_compl.head + 1) & tx->dqo.complq_mask; in gve_clean_tx_done_dqo()
1456 tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0; in gve_clean_tx_done_dqo()
1460 if (tx->netdev_txq) in gve_clean_tx_done_dqo()
1461 netdev_tx_completed_queue(tx->netdev_txq, in gve_clean_tx_done_dqo()
1465 remove_miss_completions(priv, tx); in gve_clean_tx_done_dqo()
1466 remove_timed_out_completions(priv, tx); in gve_clean_tx_done_dqo()
1468 if (tx->xsk_pool) in gve_clean_tx_done_dqo()
1469 gve_tx_process_xsk_completions(tx); in gve_clean_tx_done_dqo()
1471 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done_dqo()
1472 tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes; in gve_clean_tx_done_dqo()
1473 tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts; in gve_clean_tx_done_dqo()
1474 u64_stats_update_end(&tx->statss); in gve_clean_tx_done_dqo()
1481 struct gve_tx_ring *tx = block->tx; in gve_tx_poll_dqo() local
1485 int num_descs_cleaned = gve_clean_tx_done_dqo(priv, tx, in gve_tx_poll_dqo()
1491 if (netif_tx_queue_stopped(tx->netdev_txq) && in gve_tx_poll_dqo()
1493 tx->wake_queue++; in gve_tx_poll_dqo()
1494 netif_tx_wake_queue(tx->netdev_txq); in gve_tx_poll_dqo()
1499 compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_tx_poll_dqo()
1500 return compl_desc->generation != tx->dqo_compl.cur_gen_bit; in gve_tx_poll_dqo()
1507 struct gve_tx_ring *tx; in gve_xsk_tx_poll_dqo() local
1509 tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)]; in gve_xsk_tx_poll_dqo()
1510 if (tx->xsk_pool) in gve_xsk_tx_poll_dqo()
1511 return gve_xsk_tx_dqo(priv, tx, budget); in gve_xsk_tx_poll_dqo()
1519 struct gve_tx_ring *tx = block->tx; in gve_xdp_poll_dqo() local
1522 gve_clean_tx_done_dqo(priv, tx, &block->napi); in gve_xdp_poll_dqo()
1525 compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head]; in gve_xdp_poll_dqo()
1526 return compl_desc->generation != tx->dqo_compl.cur_gen_bit; in gve_xdp_poll_dqo()
1529 int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xdp_xmit_one_dqo() argument
1533 u32 desc_idx = tx->dqo_tx.tail; in gve_xdp_xmit_one_dqo()
1539 if (unlikely(!gve_has_tx_slots_available(tx, num_descs))) in gve_xdp_xmit_one_dqo()
1542 pkt = gve_alloc_pending_packet(tx); in gve_xdp_xmit_one_dqo()
1549 completion_tag = pkt - tx->dqo.pending_packets; in gve_xdp_xmit_one_dqo()
1552 addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); in gve_xdp_xmit_one_dqo()
1553 err = dma_mapping_error(tx->dev, addr); in gve_xdp_xmit_one_dqo()
1561 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, in gve_xdp_xmit_one_dqo()
1566 gve_tx_update_tail(tx, desc_idx); in gve_xdp_xmit_one_dqo()
1572 gve_free_pending_packet(tx, pkt); in gve_xdp_xmit_one_dqo()
1580 struct gve_tx_ring *tx; in gve_xdp_xmit_dqo() local
1589 tx = &priv->tx[qid]; in gve_xdp_xmit_dqo()
1591 spin_lock(&tx->dqo_tx.xdp_lock); in gve_xdp_xmit_dqo()
1593 err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]); in gve_xdp_xmit_dqo()
1599 gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail); in gve_xdp_xmit_dqo()
1601 spin_unlock(&tx->dqo_tx.xdp_lock); in gve_xdp_xmit_dqo()
1603 u64_stats_update_begin(&tx->statss); in gve_xdp_xmit_dqo()
1604 tx->xdp_xmit += n; in gve_xdp_xmit_dqo()
1605 tx->xdp_xmit_errors += n - i; in gve_xdp_xmit_dqo()
1606 u64_stats_update_end(&tx->statss); in gve_xdp_xmit_dqo()