Lines Matching full:tx

38 gve_unmap_packet(struct gve_tx_ring *tx,  in gve_unmap_packet()  argument
41 bus_dmamap_sync(tx->dqo.buf_dmatag, pending_pkt->dmamap, in gve_unmap_packet()
43 bus_dmamap_unload(tx->dqo.buf_dmatag, pending_pkt->dmamap); in gve_unmap_packet()
54 gve_free_tx_mbufs_dqo(struct gve_tx_ring *tx) in gve_free_tx_mbufs_dqo() argument
59 for (i = 0; i < tx->dqo.num_pending_pkts; i++) { in gve_free_tx_mbufs_dqo()
60 pending_pkt = &tx->dqo.pending_pkts[i]; in gve_free_tx_mbufs_dqo()
64 if (gve_is_qpl(tx->com.priv)) in gve_free_tx_mbufs_dqo()
67 gve_unmap_packet(tx, pending_pkt); in gve_free_tx_mbufs_dqo()
77 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_dqo() local
80 if (tx->dqo.desc_ring != NULL) { in gve_tx_free_ring_dqo()
81 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_dqo()
82 tx->dqo.desc_ring = NULL; in gve_tx_free_ring_dqo()
85 if (tx->dqo.compl_ring != NULL) { in gve_tx_free_ring_dqo()
86 gve_dma_free_coherent(&tx->dqo.compl_ring_mem); in gve_tx_free_ring_dqo()
87 tx->dqo.compl_ring = NULL; in gve_tx_free_ring_dqo()
90 if (tx->dqo.pending_pkts != NULL) { in gve_tx_free_ring_dqo()
91 gve_free_tx_mbufs_dqo(tx); in gve_tx_free_ring_dqo()
93 if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) { in gve_tx_free_ring_dqo()
94 for (j = 0; j < tx->dqo.num_pending_pkts; j++) in gve_tx_free_ring_dqo()
95 if (tx->dqo.pending_pkts[j].state != in gve_tx_free_ring_dqo()
97 bus_dmamap_destroy(tx->dqo.buf_dmatag, in gve_tx_free_ring_dqo()
98 tx->dqo.pending_pkts[j].dmamap); in gve_tx_free_ring_dqo()
101 free(tx->dqo.pending_pkts, M_GVE); in gve_tx_free_ring_dqo()
102 tx->dqo.pending_pkts = NULL; in gve_tx_free_ring_dqo()
105 if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) in gve_tx_free_ring_dqo()
106 bus_dma_tag_destroy(tx->dqo.buf_dmatag); in gve_tx_free_ring_dqo()
108 if (gve_is_qpl(priv) && tx->dqo.qpl_bufs != NULL) { in gve_tx_free_ring_dqo()
109 free(tx->dqo.qpl_bufs, M_GVE); in gve_tx_free_ring_dqo()
110 tx->dqo.qpl_bufs = NULL; in gve_tx_free_ring_dqo()
115 gve_tx_alloc_rda_fields_dqo(struct gve_tx_ring *tx) in gve_tx_alloc_rda_fields_dqo() argument
117 struct gve_priv *priv = tx->com.priv; in gve_tx_alloc_rda_fields_dqo()
122 * DMA tag for mapping Tx mbufs in gve_tx_alloc_rda_fields_dqo()
138 &tx->dqo.buf_dmatag); in gve_tx_alloc_rda_fields_dqo()
145 for (j = 0; j < tx->dqo.num_pending_pkts; j++) { in gve_tx_alloc_rda_fields_dqo()
146 err = bus_dmamap_create(tx->dqo.buf_dmatag, 0, in gve_tx_alloc_rda_fields_dqo()
147 &tx->dqo.pending_pkts[j].dmamap); in gve_tx_alloc_rda_fields_dqo()
154 tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE; in gve_tx_alloc_rda_fields_dqo()
163 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring_dqo() local
170 CACHE_LINE_SIZE, &tx->desc_ring_mem); in gve_tx_alloc_ring_dqo()
173 "Failed to alloc desc ring for tx ring %d", i); in gve_tx_alloc_ring_dqo()
176 tx->dqo.desc_ring = tx->desc_ring_mem.cpu_addr; in gve_tx_alloc_ring_dqo()
181 CACHE_LINE_SIZE, &tx->dqo.compl_ring_mem); in gve_tx_alloc_ring_dqo()
184 "Failed to alloc compl ring for tx ring %d", i); in gve_tx_alloc_ring_dqo()
187 tx->dqo.compl_ring = tx->dqo.compl_ring_mem.cpu_addr; in gve_tx_alloc_ring_dqo()
205 tx->dqo.num_pending_pkts = num_pending_pkts; in gve_tx_alloc_ring_dqo()
206 tx->dqo.pending_pkts = malloc( in gve_tx_alloc_ring_dqo()
213 tx->com.qpl = &priv->qpls[i]; in gve_tx_alloc_ring_dqo()
215 tx->com.qpl->num_pages; in gve_tx_alloc_ring_dqo()
217 tx->dqo.qpl_bufs = malloc( in gve_tx_alloc_ring_dqo()
218 sizeof(*tx->dqo.qpl_bufs) * qpl_buf_cnt, in gve_tx_alloc_ring_dqo()
221 gve_tx_alloc_rda_fields_dqo(tx); in gve_tx_alloc_ring_dqo()
249 gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, in gve_tx_fill_pkt_desc_dqo() argument
255 &tx->dqo.desc_ring[*desc_idx].pkt; in gve_tx_fill_pkt_desc_dqo()
270 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_pkt_desc_dqo()
377 gve_tx_fill_ctx_descs(struct gve_tx_ring *tx, struct mbuf *mbuf, in gve_tx_fill_ctx_descs() argument
394 tx->stats.tx_delayed_pkt_tsoerr, 1); in gve_tx_fill_ctx_descs()
399 tso_desc = &tx->dqo.desc_ring[*desc_idx].tso_ctx; in gve_tx_fill_ctx_descs()
402 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_ctx_descs()
404 counter_u64_add_protected(tx->stats.tso_packet_cnt, 1); in gve_tx_fill_ctx_descs()
408 gen_desc = &tx->dqo.desc_ring[*desc_idx].general_ctx; in gve_tx_fill_ctx_descs()
410 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_ctx_descs()
415 gve_map_mbuf_dqo(struct gve_tx_ring *tx, in gve_map_mbuf_dqo() argument
422 err = bus_dmamap_load_mbuf_sg(tx->dqo.buf_dmatag, dmamap, in gve_map_mbuf_dqo()
434 tx->stats.tx_mbuf_collapse, 1); in gve_map_mbuf_dqo()
443 tx->stats.tx_mbuf_defrag, 1); in gve_map_mbuf_dqo()
451 tx->stats.tx_mbuf_defrag_err, 1); in gve_map_mbuf_dqo()
460 return (gve_map_mbuf_dqo(tx, mbuf, dmamap, in gve_map_mbuf_dqo()
466 tx->stats.tx_mbuf_dmamap_enomem_err, 1); in gve_map_mbuf_dqo()
477 counter_u64_add_protected(tx->stats.tx_mbuf_dmamap_err, 1); in gve_map_mbuf_dqo()
483 num_avail_desc_ring_slots(const struct gve_tx_ring *tx) in num_avail_desc_ring_slots() argument
485 uint32_t num_used = (tx->dqo.desc_tail - tx->dqo.desc_head) & in num_avail_desc_ring_slots()
486 tx->dqo.desc_mask; in num_avail_desc_ring_slots()
488 return (tx->dqo.desc_mask - num_used); in num_avail_desc_ring_slots()
492 gve_alloc_pending_packet(struct gve_tx_ring *tx) in gve_alloc_pending_packet() argument
494 int32_t index = tx->dqo.free_pending_pkts_csm; in gve_alloc_pending_packet()
502 tx->dqo.free_pending_pkts_csm = atomic_swap_32( in gve_alloc_pending_packet()
503 &tx->dqo.free_pending_pkts_prd, -1); in gve_alloc_pending_packet()
505 index = tx->dqo.free_pending_pkts_csm; in gve_alloc_pending_packet()
510 pending_pkt = &tx->dqo.pending_pkts[index]; in gve_alloc_pending_packet()
513 tx->dqo.free_pending_pkts_csm = pending_pkt->next; in gve_alloc_pending_packet()
520 gve_free_pending_packet(struct gve_tx_ring *tx, in gve_free_pending_packet() argument
523 int index = pending_pkt - tx->dqo.pending_pkts; in gve_free_pending_packet()
530 old_head = atomic_load_acq_32(&tx->dqo.free_pending_pkts_prd); in gve_free_pending_packet()
533 if (atomic_cmpset_32(&tx->dqo.free_pending_pkts_prd, in gve_free_pending_packet()
545 gve_tx_has_desc_room_dqo(struct gve_tx_ring *tx, int needed_descs) in gve_tx_has_desc_room_dqo() argument
547 if (needed_descs <= num_avail_desc_ring_slots(tx)) in gve_tx_has_desc_room_dqo()
550 tx->dqo.desc_head = atomic_load_acq_32(&tx->dqo.hw_tx_head); in gve_tx_has_desc_room_dqo()
551 if (needed_descs > num_avail_desc_ring_slots(tx)) { in gve_tx_has_desc_room_dqo()
554 tx->stats.tx_delayed_pkt_nospace_descring, 1); in gve_tx_has_desc_room_dqo()
563 gve_tx_request_desc_compl(struct gve_tx_ring *tx, uint32_t desc_idx) in gve_tx_request_desc_compl() argument
568 last_desc_idx = (desc_idx - 1) & tx->dqo.desc_mask; in gve_tx_request_desc_compl()
570 (last_desc_idx - tx->dqo.last_re_idx) & tx->dqo.desc_mask; in gve_tx_request_desc_compl()
574 tx->dqo.desc_ring[last_desc_idx].pkt.report_event = true; in gve_tx_request_desc_compl()
575 tx->dqo.last_re_idx = last_desc_idx; in gve_tx_request_desc_compl()
580 gve_tx_have_enough_qpl_bufs(struct gve_tx_ring *tx, int num_bufs) in gve_tx_have_enough_qpl_bufs() argument
582 uint32_t available = tx->dqo.qpl_bufs_produced_cached - in gve_tx_have_enough_qpl_bufs()
583 tx->dqo.qpl_bufs_consumed; in gve_tx_have_enough_qpl_bufs()
588 tx->dqo.qpl_bufs_produced_cached = atomic_load_acq_32( in gve_tx_have_enough_qpl_bufs()
589 &tx->dqo.qpl_bufs_produced); in gve_tx_have_enough_qpl_bufs()
590 available = tx->dqo.qpl_bufs_produced_cached - in gve_tx_have_enough_qpl_bufs()
591 tx->dqo.qpl_bufs_consumed; in gve_tx_have_enough_qpl_bufs()
599 gve_tx_alloc_qpl_buf(struct gve_tx_ring *tx) in gve_tx_alloc_qpl_buf() argument
601 int32_t buf = tx->dqo.free_qpl_bufs_csm; in gve_tx_alloc_qpl_buf()
604 tx->dqo.free_qpl_bufs_csm = atomic_swap_32( in gve_tx_alloc_qpl_buf()
605 &tx->dqo.free_qpl_bufs_prd, -1); in gve_tx_alloc_qpl_buf()
606 buf = tx->dqo.free_qpl_bufs_csm; in gve_tx_alloc_qpl_buf()
611 tx->dqo.free_qpl_bufs_csm = tx->dqo.qpl_bufs[buf]; in gve_tx_alloc_qpl_buf()
612 tx->dqo.qpl_bufs_consumed++; in gve_tx_alloc_qpl_buf()
617 * Tx buffer i corresponds to
622 gve_tx_buf_get_addr_dqo(struct gve_tx_ring *tx, in gve_tx_buf_get_addr_dqo() argument
629 *va = (char *)tx->com.qpl->dmas[page_id].cpu_addr + offset; in gve_tx_buf_get_addr_dqo()
630 *dma_addr = tx->com.qpl->dmas[page_id].bus_addr + offset; in gve_tx_buf_get_addr_dqo()
634 gve_get_page_dma_handle(struct gve_tx_ring *tx, int32_t index) in gve_get_page_dma_handle() argument
638 return (&tx->com.qpl->dmas[page_id]); in gve_get_page_dma_handle()
642 gve_tx_copy_mbuf_and_write_pkt_descs(struct gve_tx_ring *tx, in gve_tx_copy_mbuf_and_write_pkt_descs() argument
660 buf = gve_tx_alloc_qpl_buf(tx); in gve_tx_copy_mbuf_and_write_pkt_descs()
664 gve_tx_buf_get_addr_dqo(tx, buf, &va, &addr); in gve_tx_copy_mbuf_and_write_pkt_descs()
669 dma = gve_get_page_dma_handle(tx, buf); in gve_tx_copy_mbuf_and_write_pkt_descs()
672 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, in gve_tx_copy_mbuf_and_write_pkt_descs()
681 tx->dqo.qpl_bufs[prev_buf] = buf; in gve_tx_copy_mbuf_and_write_pkt_descs()
687 tx->dqo.qpl_bufs[buf] = -1; in gve_tx_copy_mbuf_and_write_pkt_descs()
691 gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf) in gve_xmit_dqo_qpl() argument
693 uint32_t desc_idx = tx->dqo.desc_tail; in gve_xmit_dqo_qpl()
713 if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed))) in gve_xmit_dqo_qpl()
716 if (!gve_tx_have_enough_qpl_bufs(tx, nsegs)) { in gve_xmit_dqo_qpl()
719 tx->stats.tx_delayed_pkt_nospace_qpl_bufs, 1); in gve_xmit_dqo_qpl()
724 pkt = gve_alloc_pending_packet(tx); in gve_xmit_dqo_qpl()
728 tx->stats.tx_delayed_pkt_nospace_compring, 1); in gve_xmit_dqo_qpl()
732 completion_tag = pkt - tx->dqo.pending_pkts; in gve_xmit_dqo_qpl()
735 err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx); in gve_xmit_dqo_qpl()
739 gve_tx_copy_mbuf_and_write_pkt_descs(tx, mbuf, pkt, in gve_xmit_dqo_qpl()
743 tx->dqo.desc_tail = desc_idx; in gve_xmit_dqo_qpl()
749 gve_tx_request_desc_compl(tx, desc_idx); in gve_xmit_dqo_qpl()
751 tx->req += total_descs_needed; /* tx->req is just a sysctl counter */ in gve_xmit_dqo_qpl()
756 gve_free_pending_packet(tx, pkt); in gve_xmit_dqo_qpl()
761 gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr) in gve_xmit_dqo() argument
764 uint32_t desc_idx = tx->dqo.desc_tail; in gve_xmit_dqo()
790 if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed))) in gve_xmit_dqo()
793 pkt = gve_alloc_pending_packet(tx); in gve_xmit_dqo()
797 tx->stats.tx_delayed_pkt_nospace_compring, 1); in gve_xmit_dqo()
801 completion_tag = pkt - tx->dqo.pending_pkts; in gve_xmit_dqo()
803 err = gve_map_mbuf_dqo(tx, mbuf_ptr, pkt->dmamap, in gve_xmit_dqo()
814 !gve_tx_has_desc_room_dqo(tx, total_descs_needed))) { in gve_xmit_dqo()
819 err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx); in gve_xmit_dqo()
823 bus_dmamap_sync(tx->dqo.buf_dmatag, pkt->dmamap, BUS_DMASYNC_PREWRITE); in gve_xmit_dqo()
825 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, in gve_xmit_dqo()
832 tx->dqo.desc_tail = desc_idx; in gve_xmit_dqo()
838 gve_tx_request_desc_compl(tx, desc_idx); in gve_xmit_dqo()
840 tx->req += total_descs_needed; /* tx->req is just a sysctl counter */ in gve_xmit_dqo()
844 gve_unmap_packet(tx, pkt); in gve_xmit_dqo()
847 gve_free_pending_packet(tx, pkt); in gve_xmit_dqo()
852 gve_reap_qpl_bufs_dqo(struct gve_tx_ring *tx, in gve_reap_qpl_bufs_dqo() argument
862 dma = gve_get_page_dma_handle(tx, buf); in gve_reap_qpl_bufs_dqo()
865 buf = tx->dqo.qpl_bufs[buf]; in gve_reap_qpl_bufs_dqo()
871 old_head = atomic_load_32(&tx->dqo.free_qpl_bufs_prd); in gve_reap_qpl_bufs_dqo()
872 tx->dqo.qpl_bufs[buf] = old_head; in gve_reap_qpl_bufs_dqo()
879 if (atomic_cmpset_rel_32(&tx->dqo.free_qpl_bufs_prd, in gve_reap_qpl_bufs_dqo()
887 atomic_add_rel_32(&tx->dqo.qpl_bufs_produced, pkt->num_qpl_bufs); in gve_reap_qpl_bufs_dqo()
894 struct gve_tx_ring *tx, uint16_t compl_tag) in gve_handle_packet_completion() argument
899 if (__predict_false(compl_tag >= tx->dqo.num_pending_pkts)) { in gve_handle_packet_completion()
900 device_printf(priv->dev, "Invalid TX completion tag: %d\n", in gve_handle_packet_completion()
905 pending_pkt = &tx->dqo.pending_pkts[compl_tag]; in gve_handle_packet_completion()
918 gve_reap_qpl_bufs_dqo(tx, pending_pkt); in gve_handle_packet_completion()
920 gve_unmap_packet(tx, pending_pkt); in gve_handle_packet_completion()
924 gve_free_pending_packet(tx, pending_pkt); in gve_handle_packet_completion()
931 struct gve_tx_ring *tx = arg; in gve_tx_intr_dqo() local
932 struct gve_priv *priv = tx->com.priv; in gve_tx_intr_dqo()
933 struct gve_ring_com *com = &tx->com; in gve_tx_intr_dqo()
944 gve_tx_clear_desc_ring_dqo(struct gve_tx_ring *tx) in gve_tx_clear_desc_ring_dqo() argument
946 struct gve_ring_com *com = &tx->com; in gve_tx_clear_desc_ring_dqo()
950 tx->dqo.desc_ring[i] = (union gve_tx_desc_dqo){}; in gve_tx_clear_desc_ring_dqo()
952 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_tx_clear_desc_ring_dqo()
957 gve_tx_clear_compl_ring_dqo(struct gve_tx_ring *tx) in gve_tx_clear_compl_ring_dqo() argument
959 struct gve_ring_com *com = &tx->com; in gve_tx_clear_compl_ring_dqo()
965 tx->dqo.compl_ring[i] = (struct gve_tx_compl_desc_dqo){}; in gve_tx_clear_compl_ring_dqo()
967 bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, tx->dqo.compl_ring_mem.map, in gve_tx_clear_compl_ring_dqo()
974 struct gve_tx_ring *tx = &priv->tx[i]; in gve_clear_tx_ring_dqo() local
977 tx->dqo.desc_head = 0; in gve_clear_tx_ring_dqo()
978 tx->dqo.desc_tail = 0; in gve_clear_tx_ring_dqo()
979 tx->dqo.desc_mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring_dqo()
980 tx->dqo.last_re_idx = 0; in gve_clear_tx_ring_dqo()
982 tx->dqo.compl_head = 0; in gve_clear_tx_ring_dqo()
983 tx->dqo.compl_mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring_dqo()
984 atomic_store_32(&tx->dqo.hw_tx_head, 0); in gve_clear_tx_ring_dqo()
985 tx->dqo.cur_gen_bit = 0; in gve_clear_tx_ring_dqo()
987 gve_free_tx_mbufs_dqo(tx); in gve_clear_tx_ring_dqo()
989 for (j = 0; j < tx->dqo.num_pending_pkts; j++) { in gve_clear_tx_ring_dqo()
990 if (gve_is_qpl(tx->com.priv)) in gve_clear_tx_ring_dqo()
991 gve_clear_qpl_pending_pkt(&tx->dqo.pending_pkts[j]); in gve_clear_tx_ring_dqo()
992 tx->dqo.pending_pkts[j].next = in gve_clear_tx_ring_dqo()
993 (j == tx->dqo.num_pending_pkts - 1) ? -1 : j + 1; in gve_clear_tx_ring_dqo()
994 tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE; in gve_clear_tx_ring_dqo()
996 tx->dqo.free_pending_pkts_csm = 0; in gve_clear_tx_ring_dqo()
997 atomic_store_rel_32(&tx->dqo.free_pending_pkts_prd, -1); in gve_clear_tx_ring_dqo()
1001 tx->com.qpl->num_pages; in gve_clear_tx_ring_dqo()
1004 tx->dqo.qpl_bufs[j] = j + 1; in gve_clear_tx_ring_dqo()
1005 tx->dqo.qpl_bufs[j] = -1; in gve_clear_tx_ring_dqo()
1007 tx->dqo.free_qpl_bufs_csm = 0; in gve_clear_tx_ring_dqo()
1008 atomic_store_32(&tx->dqo.free_qpl_bufs_prd, -1); in gve_clear_tx_ring_dqo()
1009 atomic_store_32(&tx->dqo.qpl_bufs_produced, qpl_buf_cnt); in gve_clear_tx_ring_dqo()
1010 tx->dqo.qpl_bufs_produced_cached = qpl_buf_cnt; in gve_clear_tx_ring_dqo()
1011 tx->dqo.qpl_bufs_consumed = 0; in gve_clear_tx_ring_dqo()
1014 gve_tx_clear_desc_ring_dqo(tx); in gve_clear_tx_ring_dqo()
1015 gve_tx_clear_compl_ring_dqo(tx); in gve_clear_tx_ring_dqo()
1019 gve_tx_cleanup_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, int budget) in gve_tx_cleanup_dqo() argument
1030 bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, tx->dqo.compl_ring_mem.map, in gve_tx_cleanup_dqo()
1033 compl_desc = &tx->dqo.compl_ring[tx->dqo.compl_head]; in gve_tx_cleanup_dqo()
1034 if (compl_desc->generation == tx->dqo.cur_gen_bit) in gve_tx_cleanup_dqo()
1047 atomic_store_rel_32(&tx->dqo.hw_tx_head, tx_head); in gve_tx_cleanup_dqo()
1051 tx, compl_tag); in gve_tx_cleanup_dqo()
1055 tx->dqo.compl_head = (tx->dqo.compl_head + 1) & in gve_tx_cleanup_dqo()
1056 tx->dqo.compl_mask; in gve_tx_cleanup_dqo()
1058 tx->dqo.cur_gen_bit ^= tx->dqo.compl_head == 0; in gve_tx_cleanup_dqo()
1067 if (atomic_load_bool(&tx->stopped) && work_done) { in gve_tx_cleanup_dqo()
1068 atomic_store_bool(&tx->stopped, false); in gve_tx_cleanup_dqo()
1069 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_tx_cleanup_dqo()
1072 tx->done += work_done; /* tx->done is just a sysctl counter */ in gve_tx_cleanup_dqo()
1074 counter_u64_add_protected(tx->stats.tbytes, bytes_done); in gve_tx_cleanup_dqo()
1075 counter_u64_add_protected(tx->stats.tpackets, pkts_done); in gve_tx_cleanup_dqo()
1084 struct gve_tx_ring *tx = arg; in gve_tx_cleanup_tq_dqo() local
1085 struct gve_priv *priv = tx->com.priv; in gve_tx_cleanup_tq_dqo()
1090 if (gve_tx_cleanup_dqo(priv, tx, /*budget=*/1024)) { in gve_tx_cleanup_tq_dqo()
1091 taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task); in gve_tx_cleanup_tq_dqo()
1095 gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset, in gve_tx_cleanup_tq_dqo()