Lines Matching full:tx

38 gve_unmap_packet(struct gve_tx_ring *tx,  in gve_unmap_packet()  argument
41 bus_dmamap_sync(tx->dqo.buf_dmatag, pending_pkt->dmamap, in gve_unmap_packet()
43 bus_dmamap_unload(tx->dqo.buf_dmatag, pending_pkt->dmamap); in gve_unmap_packet()
54 gve_free_tx_mbufs_dqo(struct gve_tx_ring *tx) in gve_free_tx_mbufs_dqo() argument
59 for (i = 0; i < tx->dqo.num_pending_pkts; i++) { in gve_free_tx_mbufs_dqo()
60 pending_pkt = &tx->dqo.pending_pkts[i]; in gve_free_tx_mbufs_dqo()
64 if (gve_is_qpl(tx->com.priv)) in gve_free_tx_mbufs_dqo()
67 gve_unmap_packet(tx, pending_pkt); in gve_free_tx_mbufs_dqo()
77 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_free_ring_dqo() local
78 struct gve_ring_com *com = &tx->com; in gve_tx_free_ring_dqo()
81 if (tx->dqo.desc_ring != NULL) { in gve_tx_free_ring_dqo()
82 gve_dma_free_coherent(&tx->desc_ring_mem); in gve_tx_free_ring_dqo()
83 tx->dqo.desc_ring = NULL; in gve_tx_free_ring_dqo()
86 if (tx->dqo.compl_ring != NULL) { in gve_tx_free_ring_dqo()
87 gve_dma_free_coherent(&tx->dqo.compl_ring_mem); in gve_tx_free_ring_dqo()
88 tx->dqo.compl_ring = NULL; in gve_tx_free_ring_dqo()
91 if (tx->dqo.pending_pkts != NULL) { in gve_tx_free_ring_dqo()
92 gve_free_tx_mbufs_dqo(tx); in gve_tx_free_ring_dqo()
94 if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) { in gve_tx_free_ring_dqo()
95 for (j = 0; j < tx->dqo.num_pending_pkts; j++) in gve_tx_free_ring_dqo()
96 if (tx->dqo.pending_pkts[j].state != in gve_tx_free_ring_dqo()
98 bus_dmamap_destroy(tx->dqo.buf_dmatag, in gve_tx_free_ring_dqo()
99 tx->dqo.pending_pkts[j].dmamap); in gve_tx_free_ring_dqo()
102 free(tx->dqo.pending_pkts, M_GVE); in gve_tx_free_ring_dqo()
103 tx->dqo.pending_pkts = NULL; in gve_tx_free_ring_dqo()
106 if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) in gve_tx_free_ring_dqo()
107 bus_dma_tag_destroy(tx->dqo.buf_dmatag); in gve_tx_free_ring_dqo()
109 if (gve_is_qpl(priv) && tx->dqo.qpl_bufs != NULL) { in gve_tx_free_ring_dqo()
110 free(tx->dqo.qpl_bufs, M_GVE); in gve_tx_free_ring_dqo()
111 tx->dqo.qpl_bufs = NULL; in gve_tx_free_ring_dqo()
121 gve_tx_alloc_rda_fields_dqo(struct gve_tx_ring *tx) in gve_tx_alloc_rda_fields_dqo() argument
123 struct gve_priv *priv = tx->com.priv; in gve_tx_alloc_rda_fields_dqo()
128 * DMA tag for mapping Tx mbufs in gve_tx_alloc_rda_fields_dqo()
144 &tx->dqo.buf_dmatag); in gve_tx_alloc_rda_fields_dqo()
151 for (j = 0; j < tx->dqo.num_pending_pkts; j++) { in gve_tx_alloc_rda_fields_dqo()
152 err = bus_dmamap_create(tx->dqo.buf_dmatag, 0, in gve_tx_alloc_rda_fields_dqo()
153 &tx->dqo.pending_pkts[j].dmamap); in gve_tx_alloc_rda_fields_dqo()
160 tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE; in gve_tx_alloc_rda_fields_dqo()
169 struct gve_tx_ring *tx = &priv->tx[i]; in gve_tx_alloc_ring_dqo() local
176 CACHE_LINE_SIZE, &tx->desc_ring_mem); in gve_tx_alloc_ring_dqo()
179 "Failed to alloc desc ring for tx ring %d", i); in gve_tx_alloc_ring_dqo()
182 tx->dqo.desc_ring = tx->desc_ring_mem.cpu_addr; in gve_tx_alloc_ring_dqo()
187 CACHE_LINE_SIZE, &tx->dqo.compl_ring_mem); in gve_tx_alloc_ring_dqo()
190 "Failed to alloc compl ring for tx ring %d", i); in gve_tx_alloc_ring_dqo()
193 tx->dqo.compl_ring = tx->dqo.compl_ring_mem.cpu_addr; in gve_tx_alloc_ring_dqo()
211 tx->dqo.num_pending_pkts = num_pending_pkts; in gve_tx_alloc_ring_dqo()
212 tx->dqo.pending_pkts = malloc( in gve_tx_alloc_ring_dqo()
219 tx->com.qpl = gve_alloc_qpl(priv, i, GVE_TX_NUM_QPL_PAGES_DQO, in gve_tx_alloc_ring_dqo()
221 if (tx->com.qpl == NULL) { in gve_tx_alloc_ring_dqo()
223 "Failed to alloc QPL for tx ring %d", i); in gve_tx_alloc_ring_dqo()
229 tx->com.qpl->num_pages; in gve_tx_alloc_ring_dqo()
231 tx->dqo.qpl_bufs = malloc( in gve_tx_alloc_ring_dqo()
232 sizeof(*tx->dqo.qpl_bufs) * qpl_buf_cnt, in gve_tx_alloc_ring_dqo()
235 gve_tx_alloc_rda_fields_dqo(tx); in gve_tx_alloc_ring_dqo()
263 gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, in gve_tx_fill_pkt_desc_dqo() argument
269 &tx->dqo.desc_ring[*desc_idx].pkt; in gve_tx_fill_pkt_desc_dqo()
284 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_pkt_desc_dqo()
391 gve_tx_fill_ctx_descs(struct gve_tx_ring *tx, struct mbuf *mbuf, in gve_tx_fill_ctx_descs() argument
408 tx->stats.tx_delayed_pkt_tsoerr, 1); in gve_tx_fill_ctx_descs()
413 tso_desc = &tx->dqo.desc_ring[*desc_idx].tso_ctx; in gve_tx_fill_ctx_descs()
416 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_ctx_descs()
418 counter_u64_add_protected(tx->stats.tso_packet_cnt, 1); in gve_tx_fill_ctx_descs()
422 gen_desc = &tx->dqo.desc_ring[*desc_idx].general_ctx; in gve_tx_fill_ctx_descs()
424 *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask; in gve_tx_fill_ctx_descs()
429 gve_map_mbuf_dqo(struct gve_tx_ring *tx, in gve_map_mbuf_dqo() argument
436 err = bus_dmamap_load_mbuf_sg(tx->dqo.buf_dmatag, dmamap, in gve_map_mbuf_dqo()
448 tx->stats.tx_mbuf_collapse, 1); in gve_map_mbuf_dqo()
457 tx->stats.tx_mbuf_defrag, 1); in gve_map_mbuf_dqo()
465 tx->stats.tx_mbuf_defrag_err, 1); in gve_map_mbuf_dqo()
474 return (gve_map_mbuf_dqo(tx, mbuf, dmamap, in gve_map_mbuf_dqo()
480 tx->stats.tx_mbuf_dmamap_enomem_err, 1); in gve_map_mbuf_dqo()
491 counter_u64_add_protected(tx->stats.tx_mbuf_dmamap_err, 1); in gve_map_mbuf_dqo()
497 num_avail_desc_ring_slots(const struct gve_tx_ring *tx) in num_avail_desc_ring_slots() argument
499 uint32_t num_used = (tx->dqo.desc_tail - tx->dqo.desc_head) & in num_avail_desc_ring_slots()
500 tx->dqo.desc_mask; in num_avail_desc_ring_slots()
502 return (tx->dqo.desc_mask - num_used); in num_avail_desc_ring_slots()
506 gve_alloc_pending_packet(struct gve_tx_ring *tx) in gve_alloc_pending_packet() argument
508 int32_t index = tx->dqo.free_pending_pkts_csm; in gve_alloc_pending_packet()
516 tx->dqo.free_pending_pkts_csm = atomic_swap_32( in gve_alloc_pending_packet()
517 &tx->dqo.free_pending_pkts_prd, -1); in gve_alloc_pending_packet()
519 index = tx->dqo.free_pending_pkts_csm; in gve_alloc_pending_packet()
524 pending_pkt = &tx->dqo.pending_pkts[index]; in gve_alloc_pending_packet()
527 tx->dqo.free_pending_pkts_csm = pending_pkt->next; in gve_alloc_pending_packet()
536 gve_free_pending_packet(struct gve_tx_ring *tx, in gve_free_pending_packet() argument
539 int index = pending_pkt - tx->dqo.pending_pkts; in gve_free_pending_packet()
548 old_head = atomic_load_acq_32(&tx->dqo.free_pending_pkts_prd); in gve_free_pending_packet()
551 if (atomic_cmpset_32(&tx->dqo.free_pending_pkts_prd, in gve_free_pending_packet()
563 gve_tx_has_desc_room_dqo(struct gve_tx_ring *tx, int needed_descs) in gve_tx_has_desc_room_dqo() argument
565 if (needed_descs <= num_avail_desc_ring_slots(tx)) in gve_tx_has_desc_room_dqo()
568 tx->dqo.desc_head = atomic_load_acq_32(&tx->dqo.hw_tx_head); in gve_tx_has_desc_room_dqo()
569 if (needed_descs > num_avail_desc_ring_slots(tx)) { in gve_tx_has_desc_room_dqo()
572 tx->stats.tx_delayed_pkt_nospace_descring, 1); in gve_tx_has_desc_room_dqo()
581 gve_tx_request_desc_compl(struct gve_tx_ring *tx, uint32_t desc_idx) in gve_tx_request_desc_compl() argument
586 last_desc_idx = (desc_idx - 1) & tx->dqo.desc_mask; in gve_tx_request_desc_compl()
588 (last_desc_idx - tx->dqo.last_re_idx) & tx->dqo.desc_mask; in gve_tx_request_desc_compl()
592 tx->dqo.desc_ring[last_desc_idx].pkt.report_event = true; in gve_tx_request_desc_compl()
593 tx->dqo.last_re_idx = last_desc_idx; in gve_tx_request_desc_compl()
598 gve_tx_have_enough_qpl_bufs(struct gve_tx_ring *tx, int num_bufs) in gve_tx_have_enough_qpl_bufs() argument
600 uint32_t available = tx->dqo.qpl_bufs_produced_cached - in gve_tx_have_enough_qpl_bufs()
601 tx->dqo.qpl_bufs_consumed; in gve_tx_have_enough_qpl_bufs()
606 tx->dqo.qpl_bufs_produced_cached = atomic_load_acq_32( in gve_tx_have_enough_qpl_bufs()
607 &tx->dqo.qpl_bufs_produced); in gve_tx_have_enough_qpl_bufs()
608 available = tx->dqo.qpl_bufs_produced_cached - in gve_tx_have_enough_qpl_bufs()
609 tx->dqo.qpl_bufs_consumed; in gve_tx_have_enough_qpl_bufs()
617 gve_tx_alloc_qpl_buf(struct gve_tx_ring *tx) in gve_tx_alloc_qpl_buf() argument
619 int32_t buf = tx->dqo.free_qpl_bufs_csm; in gve_tx_alloc_qpl_buf()
622 tx->dqo.free_qpl_bufs_csm = atomic_swap_32( in gve_tx_alloc_qpl_buf()
623 &tx->dqo.free_qpl_bufs_prd, -1); in gve_tx_alloc_qpl_buf()
624 buf = tx->dqo.free_qpl_bufs_csm; in gve_tx_alloc_qpl_buf()
629 tx->dqo.free_qpl_bufs_csm = tx->dqo.qpl_bufs[buf]; in gve_tx_alloc_qpl_buf()
630 tx->dqo.qpl_bufs_consumed++; in gve_tx_alloc_qpl_buf()
635 * Tx buffer i corresponds to
640 gve_tx_buf_get_addr_dqo(struct gve_tx_ring *tx, in gve_tx_buf_get_addr_dqo() argument
647 *va = (char *)tx->com.qpl->dmas[page_id].cpu_addr + offset; in gve_tx_buf_get_addr_dqo()
648 *dma_addr = tx->com.qpl->dmas[page_id].bus_addr + offset; in gve_tx_buf_get_addr_dqo()
652 gve_get_page_dma_handle(struct gve_tx_ring *tx, int32_t index) in gve_get_page_dma_handle() argument
656 return (&tx->com.qpl->dmas[page_id]); in gve_get_page_dma_handle()
660 gve_tx_copy_mbuf_and_write_pkt_descs(struct gve_tx_ring *tx, in gve_tx_copy_mbuf_and_write_pkt_descs() argument
678 buf = gve_tx_alloc_qpl_buf(tx); in gve_tx_copy_mbuf_and_write_pkt_descs()
682 gve_tx_buf_get_addr_dqo(tx, buf, &va, &addr); in gve_tx_copy_mbuf_and_write_pkt_descs()
687 dma = gve_get_page_dma_handle(tx, buf); in gve_tx_copy_mbuf_and_write_pkt_descs()
690 gve_tx_fill_pkt_desc_dqo(tx, desc_idx, in gve_tx_copy_mbuf_and_write_pkt_descs()
699 tx->dqo.qpl_bufs[prev_buf] = buf; in gve_tx_copy_mbuf_and_write_pkt_descs()
705 tx->dqo.qpl_bufs[buf] = -1; in gve_tx_copy_mbuf_and_write_pkt_descs()
709 gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf) in gve_xmit_dqo_qpl() argument
711 uint32_t desc_idx = tx->dqo.desc_tail; in gve_xmit_dqo_qpl()
731 if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed))) in gve_xmit_dqo_qpl()
734 if (!gve_tx_have_enough_qpl_bufs(tx, nsegs)) { in gve_xmit_dqo_qpl()
737 tx->stats.tx_delayed_pkt_nospace_qpl_bufs, 1); in gve_xmit_dqo_qpl()
742 pkt = gve_alloc_pending_packet(tx); in gve_xmit_dqo_qpl()
746 tx->stats.tx_delayed_pkt_nospace_compring, 1); in gve_xmit_dqo_qpl()
750 completion_tag = pkt - tx->dqo.pending_pkts; in gve_xmit_dqo_qpl()
753 err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx); in gve_xmit_dqo_qpl()
757 gve_tx_copy_mbuf_and_write_pkt_descs(tx, mbuf, pkt, in gve_xmit_dqo_qpl()
761 tx->dqo.desc_tail = desc_idx; in gve_xmit_dqo_qpl()
767 gve_tx_request_desc_compl(tx, desc_idx); in gve_xmit_dqo_qpl()
769 tx->req += total_descs_needed; /* tx->req is just a sysctl counter */ in gve_xmit_dqo_qpl()
774 gve_free_pending_packet(tx, pkt); in gve_xmit_dqo_qpl()
779 gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr) in gve_xmit_dqo() argument
782 uint32_t desc_idx = tx->dqo.desc_tail; in gve_xmit_dqo()
808 if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed))) in gve_xmit_dqo()
811 pkt = gve_alloc_pending_packet(tx); in gve_xmit_dqo()
815 tx->stats.tx_delayed_pkt_nospace_compring, 1); in gve_xmit_dqo()
819 completion_tag = pkt - tx->dqo.pending_pkts; in gve_xmit_dqo()
821 err = gve_map_mbuf_dqo(tx, mbuf_ptr, pkt->dmamap, in gve_xmit_dqo()
832 !gve_tx_has_desc_room_dqo(tx, total_descs_needed))) { in gve_xmit_dqo()
837 err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx); in gve_xmit_dqo()
841 bus_dmamap_sync(tx->dqo.buf_dmatag, pkt->dmamap, BUS_DMASYNC_PREWRITE); in gve_xmit_dqo()
843 gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, in gve_xmit_dqo()
850 tx->dqo.desc_tail = desc_idx; in gve_xmit_dqo()
856 gve_tx_request_desc_compl(tx, desc_idx); in gve_xmit_dqo()
858 tx->req += total_descs_needed; /* tx->req is just a sysctl counter */ in gve_xmit_dqo()
862 gve_unmap_packet(tx, pkt); in gve_xmit_dqo()
865 gve_free_pending_packet(tx, pkt); in gve_xmit_dqo()
870 gve_reap_qpl_bufs_dqo(struct gve_tx_ring *tx, in gve_reap_qpl_bufs_dqo() argument
880 dma = gve_get_page_dma_handle(tx, buf); in gve_reap_qpl_bufs_dqo()
883 buf = tx->dqo.qpl_bufs[buf]; in gve_reap_qpl_bufs_dqo()
889 old_head = atomic_load_32(&tx->dqo.free_qpl_bufs_prd); in gve_reap_qpl_bufs_dqo()
890 tx->dqo.qpl_bufs[buf] = old_head; in gve_reap_qpl_bufs_dqo()
897 if (atomic_cmpset_rel_32(&tx->dqo.free_qpl_bufs_prd, in gve_reap_qpl_bufs_dqo()
905 atomic_add_rel_32(&tx->dqo.qpl_bufs_produced, pkt->num_qpl_bufs); in gve_reap_qpl_bufs_dqo()
912 struct gve_tx_ring *tx, uint16_t compl_tag) in gve_handle_packet_completion() argument
917 if (__predict_false(compl_tag >= tx->dqo.num_pending_pkts)) { in gve_handle_packet_completion()
918 device_printf(priv->dev, "Invalid TX completion tag: %d\n", in gve_handle_packet_completion()
923 pending_pkt = &tx->dqo.pending_pkts[compl_tag]; in gve_handle_packet_completion()
936 gve_reap_qpl_bufs_dqo(tx, pending_pkt); in gve_handle_packet_completion()
938 gve_unmap_packet(tx, pending_pkt); in gve_handle_packet_completion()
942 gve_free_pending_packet(tx, pending_pkt); in gve_handle_packet_completion()
947 gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_check_tx_timeout_dqo() argument
954 for (pkt_idx = 0; pkt_idx < tx->dqo.num_pending_pkts; pkt_idx++) { in gve_check_tx_timeout_dqo()
955 pending_pkt = &tx->dqo.pending_pkts[pkt_idx]; in gve_check_tx_timeout_dqo()
972 struct gve_tx_ring *tx = arg; in gve_tx_intr_dqo() local
973 struct gve_priv *priv = tx->com.priv; in gve_tx_intr_dqo()
974 struct gve_ring_com *com = &tx->com; in gve_tx_intr_dqo()
985 gve_tx_clear_desc_ring_dqo(struct gve_tx_ring *tx) in gve_tx_clear_desc_ring_dqo() argument
987 struct gve_ring_com *com = &tx->com; in gve_tx_clear_desc_ring_dqo()
991 tx->dqo.desc_ring[i] = (union gve_tx_desc_dqo){}; in gve_tx_clear_desc_ring_dqo()
993 bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map, in gve_tx_clear_desc_ring_dqo()
998 gve_tx_clear_compl_ring_dqo(struct gve_tx_ring *tx) in gve_tx_clear_compl_ring_dqo() argument
1000 struct gve_ring_com *com = &tx->com; in gve_tx_clear_compl_ring_dqo()
1006 tx->dqo.compl_ring[i] = (struct gve_tx_compl_desc_dqo){}; in gve_tx_clear_compl_ring_dqo()
1008 bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, tx->dqo.compl_ring_mem.map, in gve_tx_clear_compl_ring_dqo()
1015 struct gve_tx_ring *tx = &priv->tx[i]; in gve_clear_tx_ring_dqo() local
1018 tx->dqo.desc_head = 0; in gve_clear_tx_ring_dqo()
1019 tx->dqo.desc_tail = 0; in gve_clear_tx_ring_dqo()
1020 tx->dqo.desc_mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring_dqo()
1021 tx->dqo.last_re_idx = 0; in gve_clear_tx_ring_dqo()
1023 tx->dqo.compl_head = 0; in gve_clear_tx_ring_dqo()
1024 tx->dqo.compl_mask = priv->tx_desc_cnt - 1; in gve_clear_tx_ring_dqo()
1025 atomic_store_32(&tx->dqo.hw_tx_head, 0); in gve_clear_tx_ring_dqo()
1026 tx->dqo.cur_gen_bit = 0; in gve_clear_tx_ring_dqo()
1028 gve_free_tx_mbufs_dqo(tx); in gve_clear_tx_ring_dqo()
1030 for (j = 0; j < tx->dqo.num_pending_pkts; j++) { in gve_clear_tx_ring_dqo()
1031 if (gve_is_qpl(tx->com.priv)) in gve_clear_tx_ring_dqo()
1032 gve_clear_qpl_pending_pkt(&tx->dqo.pending_pkts[j]); in gve_clear_tx_ring_dqo()
1034 &tx->dqo.pending_pkts[j].enqueue_time_sec); in gve_clear_tx_ring_dqo()
1035 tx->dqo.pending_pkts[j].next = in gve_clear_tx_ring_dqo()
1036 (j == tx->dqo.num_pending_pkts - 1) ? -1 : j + 1; in gve_clear_tx_ring_dqo()
1037 tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE; in gve_clear_tx_ring_dqo()
1039 tx->dqo.free_pending_pkts_csm = 0; in gve_clear_tx_ring_dqo()
1040 atomic_store_rel_32(&tx->dqo.free_pending_pkts_prd, -1); in gve_clear_tx_ring_dqo()
1044 tx->com.qpl->num_pages; in gve_clear_tx_ring_dqo()
1047 tx->dqo.qpl_bufs[j] = j + 1; in gve_clear_tx_ring_dqo()
1048 tx->dqo.qpl_bufs[j] = -1; in gve_clear_tx_ring_dqo()
1050 tx->dqo.free_qpl_bufs_csm = 0; in gve_clear_tx_ring_dqo()
1051 atomic_store_32(&tx->dqo.free_qpl_bufs_prd, -1); in gve_clear_tx_ring_dqo()
1052 atomic_store_32(&tx->dqo.qpl_bufs_produced, qpl_buf_cnt); in gve_clear_tx_ring_dqo()
1053 tx->dqo.qpl_bufs_produced_cached = qpl_buf_cnt; in gve_clear_tx_ring_dqo()
1054 tx->dqo.qpl_bufs_consumed = 0; in gve_clear_tx_ring_dqo()
1057 gve_tx_clear_desc_ring_dqo(tx); in gve_clear_tx_ring_dqo()
1058 gve_tx_clear_compl_ring_dqo(tx); in gve_clear_tx_ring_dqo()
1075 gve_tx_cleanup_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, int budget) in gve_tx_cleanup_dqo() argument
1086 bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, in gve_tx_cleanup_dqo()
1087 tx->dqo.compl_ring_mem.map, in gve_tx_cleanup_dqo()
1090 compl_desc = &tx->dqo.compl_ring[tx->dqo.compl_head]; in gve_tx_cleanup_dqo()
1092 tx->dqo.cur_gen_bit) in gve_tx_cleanup_dqo()
1099 atomic_store_rel_32(&tx->dqo.hw_tx_head, tx_head); in gve_tx_cleanup_dqo()
1103 tx, compl_tag); in gve_tx_cleanup_dqo()
1107 tx->dqo.compl_head = (tx->dqo.compl_head + 1) & in gve_tx_cleanup_dqo()
1108 tx->dqo.compl_mask; in gve_tx_cleanup_dqo()
1110 tx->dqo.cur_gen_bit ^= tx->dqo.compl_head == 0; in gve_tx_cleanup_dqo()
1119 if (atomic_load_bool(&tx->stopped) && work_done) { in gve_tx_cleanup_dqo()
1120 atomic_store_bool(&tx->stopped, false); in gve_tx_cleanup_dqo()
1121 taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task); in gve_tx_cleanup_dqo()
1124 tx->done += work_done; /* tx->done is just a sysctl counter */ in gve_tx_cleanup_dqo()
1126 counter_u64_add_protected(tx->stats.tbytes, bytes_done); in gve_tx_cleanup_dqo()
1127 counter_u64_add_protected(tx->stats.tpackets, pkts_done); in gve_tx_cleanup_dqo()
1136 struct gve_tx_ring *tx = arg; in gve_tx_cleanup_tq_dqo() local
1137 struct gve_priv *priv = tx->com.priv; in gve_tx_cleanup_tq_dqo()
1142 if (gve_tx_cleanup_dqo(priv, tx, /*budget=*/1024)) { in gve_tx_cleanup_tq_dqo()
1143 taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task); in gve_tx_cleanup_tq_dqo()
1147 gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset, in gve_tx_cleanup_tq_dqo()