Lines Matching full:rx

36 gve_free_rx_mbufs_dqo(struct gve_rx_ring *rx)  in gve_free_rx_mbufs_dqo()  argument
41 if (gve_is_qpl(rx->com.priv)) in gve_free_rx_mbufs_dqo()
44 for (i = 0; i < rx->dqo.buf_cnt; i++) { in gve_free_rx_mbufs_dqo()
45 buf = &rx->dqo.bufs[i]; in gve_free_rx_mbufs_dqo()
49 bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap, in gve_free_rx_mbufs_dqo()
51 bus_dmamap_unload(rx->dqo.buf_dmatag, buf->dmamap); in gve_free_rx_mbufs_dqo()
60 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_free_ring_dqo() local
63 if (rx->dqo.compl_ring != NULL) { in gve_rx_free_ring_dqo()
64 gve_dma_free_coherent(&rx->dqo.compl_ring_mem); in gve_rx_free_ring_dqo()
65 rx->dqo.compl_ring = NULL; in gve_rx_free_ring_dqo()
68 if (rx->dqo.desc_ring != NULL) { in gve_rx_free_ring_dqo()
69 gve_dma_free_coherent(&rx->desc_ring_mem); in gve_rx_free_ring_dqo()
70 rx->dqo.desc_ring = NULL; in gve_rx_free_ring_dqo()
73 if (rx->dqo.bufs != NULL) { in gve_rx_free_ring_dqo()
74 gve_free_rx_mbufs_dqo(rx); in gve_rx_free_ring_dqo()
76 if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag) { in gve_rx_free_ring_dqo()
77 for (j = 0; j < rx->dqo.buf_cnt; j++) in gve_rx_free_ring_dqo()
78 if (rx->dqo.bufs[j].mapped) in gve_rx_free_ring_dqo()
79 bus_dmamap_destroy(rx->dqo.buf_dmatag, in gve_rx_free_ring_dqo()
80 rx->dqo.bufs[j].dmamap); in gve_rx_free_ring_dqo()
83 free(rx->dqo.bufs, M_GVE); in gve_rx_free_ring_dqo()
84 rx->dqo.bufs = NULL; in gve_rx_free_ring_dqo()
87 if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag) in gve_rx_free_ring_dqo()
88 bus_dma_tag_destroy(rx->dqo.buf_dmatag); in gve_rx_free_ring_dqo()
94 struct gve_rx_ring *rx = &priv->rx[i]; in gve_rx_alloc_ring_dqo() local
100 CACHE_LINE_SIZE, &rx->desc_ring_mem); in gve_rx_alloc_ring_dqo()
103 "Failed to alloc desc ring for rx ring %d", i); in gve_rx_alloc_ring_dqo()
106 rx->dqo.desc_ring = rx->desc_ring_mem.cpu_addr; in gve_rx_alloc_ring_dqo()
107 rx->dqo.mask = priv->rx_desc_cnt - 1; in gve_rx_alloc_ring_dqo()
111 CACHE_LINE_SIZE, &rx->dqo.compl_ring_mem); in gve_rx_alloc_ring_dqo()
114 "Failed to alloc compl ring for rx ring %d", i); in gve_rx_alloc_ring_dqo()
117 rx->dqo.compl_ring = rx->dqo.compl_ring_mem.cpu_addr; in gve_rx_alloc_ring_dqo()
118 rx->dqo.mask = priv->rx_desc_cnt - 1; in gve_rx_alloc_ring_dqo()
120 rx->dqo.buf_cnt = gve_is_qpl(priv) ? GVE_RX_NUM_QPL_PAGES_DQO : in gve_rx_alloc_ring_dqo()
122 rx->dqo.bufs = malloc(rx->dqo.buf_cnt * sizeof(struct gve_rx_buf_dqo), in gve_rx_alloc_ring_dqo()
126 rx->com.qpl = &priv->qpls[priv->tx_cfg.max_queues + i]; in gve_rx_alloc_ring_dqo()
127 if (rx->com.qpl == NULL) { in gve_rx_alloc_ring_dqo()
128 device_printf(priv->dev, "No QPL left for rx ring %d", i); in gve_rx_alloc_ring_dqo()
146 &rx->dqo.buf_dmatag); in gve_rx_alloc_ring_dqo()
154 for (j = 0; j < rx->dqo.buf_cnt; j++) { in gve_rx_alloc_ring_dqo()
155 err = bus_dmamap_create(rx->dqo.buf_dmatag, 0, in gve_rx_alloc_ring_dqo()
156 &rx->dqo.bufs[j].dmamap); in gve_rx_alloc_ring_dqo()
159 "err in creating rx buf dmamap %d: %d", in gve_rx_alloc_ring_dqo()
163 rx->dqo.bufs[j].mapped = true; in gve_rx_alloc_ring_dqo()
174 gve_rx_clear_desc_ring_dqo(struct gve_rx_ring *rx) in gve_rx_clear_desc_ring_dqo() argument
176 struct gve_ring_com *com = &rx->com; in gve_rx_clear_desc_ring_dqo()
182 rx->dqo.desc_ring[i] = (struct gve_rx_desc_dqo){}; in gve_rx_clear_desc_ring_dqo()
184 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map, in gve_rx_clear_desc_ring_dqo()
189 gve_rx_clear_compl_ring_dqo(struct gve_rx_ring *rx) in gve_rx_clear_compl_ring_dqo() argument
191 struct gve_ring_com *com = &rx->com; in gve_rx_clear_compl_ring_dqo()
195 rx->dqo.compl_ring[i] = (struct gve_rx_compl_desc_dqo){}; in gve_rx_clear_compl_ring_dqo()
197 bus_dmamap_sync(rx->dqo.compl_ring_mem.tag, rx->dqo.compl_ring_mem.map, in gve_rx_clear_compl_ring_dqo()
204 struct gve_rx_ring *rx = &priv->rx[i]; in gve_clear_rx_ring_dqo() local
207 rx->fill_cnt = 0; in gve_clear_rx_ring_dqo()
208 rx->cnt = 0; in gve_clear_rx_ring_dqo()
209 rx->dqo.mask = priv->rx_desc_cnt - 1; in gve_clear_rx_ring_dqo()
210 rx->dqo.head = 0; in gve_clear_rx_ring_dqo()
211 rx->dqo.tail = 0; in gve_clear_rx_ring_dqo()
212 rx->dqo.cur_gen_bit = 0; in gve_clear_rx_ring_dqo()
214 gve_rx_clear_desc_ring_dqo(rx); in gve_clear_rx_ring_dqo()
215 gve_rx_clear_compl_ring_dqo(rx); in gve_clear_rx_ring_dqo()
217 gve_free_rx_mbufs_dqo(rx); in gve_clear_rx_ring_dqo()
220 SLIST_INIT(&rx->dqo.free_bufs); in gve_clear_rx_ring_dqo()
221 STAILQ_INIT(&rx->dqo.used_bufs); in gve_clear_rx_ring_dqo()
223 for (j = 0; j < rx->dqo.buf_cnt; j++) { in gve_clear_rx_ring_dqo()
224 struct gve_rx_buf_dqo *buf = &rx->dqo.bufs[j]; in gve_clear_rx_ring_dqo()
226 vm_page_t page = rx->com.qpl->pages[buf - rx->dqo.bufs]; in gve_clear_rx_ring_dqo()
234 SLIST_INSERT_HEAD(&rx->dqo.free_bufs, in gve_clear_rx_ring_dqo()
237 STAILQ_INSERT_TAIL(&rx->dqo.used_bufs, in gve_clear_rx_ring_dqo()
244 SLIST_INIT(&rx->dqo.free_bufs); in gve_clear_rx_ring_dqo()
245 for (j = 0; j < rx->dqo.buf_cnt; j++) in gve_clear_rx_ring_dqo()
246 SLIST_INSERT_HEAD(&rx->dqo.free_bufs, in gve_clear_rx_ring_dqo()
247 &rx->dqo.bufs[j], slist_entry); in gve_clear_rx_ring_dqo()
254 struct gve_rx_ring *rx = arg; in gve_rx_intr_dqo() local
255 struct gve_priv *priv = rx->com.priv; in gve_rx_intr_dqo()
256 struct gve_ring_com *com = &rx->com; in gve_rx_intr_dqo()
267 gve_rx_advance_head_dqo(struct gve_rx_ring *rx) in gve_rx_advance_head_dqo() argument
269 rx->dqo.head = (rx->dqo.head + 1) & rx->dqo.mask; in gve_rx_advance_head_dqo()
270 rx->fill_cnt++; /* rx->fill_cnt is just a sysctl counter */ in gve_rx_advance_head_dqo()
272 if ((rx->dqo.head & (GVE_RX_BUF_THRESH_DQO - 1)) == 0) { in gve_rx_advance_head_dqo()
273 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map, in gve_rx_advance_head_dqo()
275 gve_db_bar_dqo_write_4(rx->com.priv, rx->com.db_offset, in gve_rx_advance_head_dqo()
276 rx->dqo.head); in gve_rx_advance_head_dqo()
281 gve_rx_post_buf_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf) in gve_rx_post_buf_dqo() argument
285 bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap, in gve_rx_post_buf_dqo()
288 desc = &rx->dqo.desc_ring[rx->dqo.head]; in gve_rx_post_buf_dqo()
289 desc->buf_id = htole16(buf - rx->dqo.bufs); in gve_rx_post_buf_dqo()
292 gve_rx_advance_head_dqo(rx); in gve_rx_post_buf_dqo()
296 gve_rx_post_new_mbuf_dqo(struct gve_rx_ring *rx, int how) in gve_rx_post_new_mbuf_dqo() argument
303 buf = SLIST_FIRST(&rx->dqo.free_bufs); in gve_rx_post_new_mbuf_dqo()
305 device_printf(rx->com.priv->dev, in gve_rx_post_new_mbuf_dqo()
309 SLIST_REMOVE_HEAD(&rx->dqo.free_bufs, slist_entry); in gve_rx_post_new_mbuf_dqo()
315 counter_u64_add_protected(rx->stats.rx_mbuf_mclget_null, 1); in gve_rx_post_new_mbuf_dqo()
321 err = bus_dmamap_load_mbuf_sg(rx->dqo.buf_dmatag, buf->dmamap, in gve_rx_post_new_mbuf_dqo()
326 counter_u64_add_protected(rx->stats.rx_mbuf_dmamap_err, 1); in gve_rx_post_new_mbuf_dqo()
332 gve_rx_post_buf_dqo(rx, buf); in gve_rx_post_new_mbuf_dqo()
339 SLIST_INSERT_HEAD(&rx->dqo.free_bufs, buf, slist_entry); in gve_rx_post_new_mbuf_dqo()
344 gve_get_page_dma_handle(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf) in gve_get_page_dma_handle() argument
346 return (&(rx->com.qpl->dmas[buf - rx->dqo.bufs])); in gve_get_page_dma_handle()
350 gve_rx_post_qpl_buf_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf, in gve_rx_post_qpl_buf_dqo() argument
353 struct gve_rx_desc_dqo *desc = &rx->dqo.desc_ring[rx->dqo.head]; in gve_rx_post_qpl_buf_dqo()
357 composed_id.buf_id = buf - rx->dqo.bufs; in gve_rx_post_qpl_buf_dqo()
361 page_dma_handle = gve_get_page_dma_handle(rx, buf); in gve_rx_post_qpl_buf_dqo()
368 gve_rx_advance_head_dqo(rx); in gve_rx_post_qpl_buf_dqo()
372 gve_rx_maybe_extract_from_used_bufs(struct gve_rx_ring *rx, bool just_one) in gve_rx_maybe_extract_from_used_bufs() argument
380 buf = STAILQ_FIRST(&rx->dqo.used_bufs); in gve_rx_maybe_extract_from_used_bufs()
384 page = rx->com.qpl->pages[buf - rx->dqo.bufs]; in gve_rx_maybe_extract_from_used_bufs()
392 STAILQ_REMOVE_HEAD(&rx->dqo.used_bufs, in gve_rx_maybe_extract_from_used_bufs()
397 STAILQ_REMOVE_HEAD(&rx->dqo.used_bufs, in gve_rx_maybe_extract_from_used_bufs()
399 SLIST_INSERT_HEAD(&rx->dqo.free_bufs, in gve_rx_maybe_extract_from_used_bufs()
406 STAILQ_INSERT_HEAD(&rx->dqo.used_bufs, in gve_rx_maybe_extract_from_used_bufs()
411 gve_rx_post_new_dqo_qpl_buf(struct gve_rx_ring *rx) in gve_rx_post_new_dqo_qpl_buf() argument
415 buf = SLIST_FIRST(&rx->dqo.free_bufs); in gve_rx_post_new_dqo_qpl_buf()
417 gve_rx_maybe_extract_from_used_bufs(rx, /*just_one=*/true); in gve_rx_post_new_dqo_qpl_buf()
418 buf = SLIST_FIRST(&rx->dqo.free_bufs); in gve_rx_post_new_dqo_qpl_buf()
423 gve_rx_post_qpl_buf_dqo(rx, buf, buf->next_idx); in gve_rx_post_new_dqo_qpl_buf()
436 SLIST_REMOVE_HEAD(&rx->dqo.free_bufs, slist_entry); in gve_rx_post_new_dqo_qpl_buf()
441 gve_rx_post_buffers_dqo(struct gve_rx_ring *rx, int how) in gve_rx_post_buffers_dqo() argument
448 num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask; in gve_rx_post_buffers_dqo()
449 num_to_post = rx->dqo.mask - num_pending_bufs; in gve_rx_post_buffers_dqo()
452 if (gve_is_qpl(rx->com.priv)) in gve_rx_post_buffers_dqo()
453 err = gve_rx_post_new_dqo_qpl_buf(rx); in gve_rx_post_buffers_dqo()
455 err = gve_rx_post_new_mbuf_dqo(rx, how); in gve_rx_post_buffers_dqo()
462 gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx) in gve_rx_prefill_buffers_dqo() argument
464 gve_rx_post_buffers_dqo(rx, M_WAITOK); in gve_rx_prefill_buffers_dqo()
541 gve_rx_input_mbuf_dqo(struct gve_rx_ring *rx, in gve_rx_input_mbuf_dqo() argument
544 struct mbuf *mbuf = rx->ctx.mbuf_head; in gve_rx_input_mbuf_dqo()
545 if_t ifp = rx->com.priv->ifp; in gve_rx_input_mbuf_dqo()
550 ptype = &rx->com.priv->ptype_lut_dqo->ptypes[compl_desc->packet_type]; in gve_rx_input_mbuf_dqo()
556 mbuf->m_pkthdr.len = rx->ctx.total_size; in gve_rx_input_mbuf_dqo()
558 if (((if_getcapenable(rx->com.priv->ifp) & IFCAP_LRO) != 0) && in gve_rx_input_mbuf_dqo()
560 (rx->lro.lro_cnt != 0) && in gve_rx_input_mbuf_dqo()
561 (tcp_lro_rx(&rx->lro, mbuf, 0) == 0)) in gve_rx_input_mbuf_dqo()
568 counter_u64_add_protected(rx->stats.rbytes, rx->ctx.total_size); in gve_rx_input_mbuf_dqo()
569 counter_u64_add_protected(rx->stats.rpackets, 1); in gve_rx_input_mbuf_dqo()
572 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_input_mbuf_dqo()
576 gve_rx_copybreak_dqo(struct gve_rx_ring *rx, void *va, in gve_rx_copybreak_dqo() argument
586 counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1); in gve_rx_copybreak_dqo()
592 rx->ctx.mbuf_head = mbuf; in gve_rx_copybreak_dqo()
593 rx->ctx.mbuf_tail = mbuf; in gve_rx_copybreak_dqo()
594 rx->ctx.total_size += frag_len; in gve_rx_copybreak_dqo()
596 gve_rx_input_mbuf_dqo(rx, compl_desc); in gve_rx_copybreak_dqo()
601 gve_rx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_dqo() argument
606 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_dqo()
614 if (__predict_false(buf_id >= rx->dqo.buf_cnt)) { in gve_rx_dqo()
615 device_printf(priv->dev, "Invalid rx buf id %d on rxq %d, issuing reset\n", in gve_rx_dqo()
616 buf_id, rx->com.id); in gve_rx_dqo()
620 buf = &rx->dqo.bufs[buf_id]; in gve_rx_dqo()
623 buf_id, rx->com.id); in gve_rx_dqo()
633 counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1); in gve_rx_dqo()
638 bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap, in gve_rx_dqo()
643 err = gve_rx_copybreak_dqo(rx, mtod(buf->mbuf, char*), in gve_rx_dqo()
648 gve_rx_post_buf_dqo(rx, buf); in gve_rx_dqo()
659 num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask; in gve_rx_dqo()
672 err = gve_rx_post_new_mbuf_dqo(rx, M_NOWAIT); in gve_rx_dqo()
677 rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1); in gve_rx_dqo()
697 bus_dmamap_unload(rx->dqo.buf_dmatag, buf->dmamap); in gve_rx_dqo()
700 SLIST_INSERT_HEAD(&rx->dqo.free_bufs, buf, slist_entry); in gve_rx_dqo()
703 gve_rx_input_mbuf_dqo(rx, compl_desc); in gve_rx_dqo()
711 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_dqo()
715 gve_rx_post_buf_dqo(rx, buf); in gve_rx_dqo()
723 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1); in gve_rx_dqo()
726 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_dqo()
730 gve_get_cpu_addr_for_qpl_buf(struct gve_rx_ring *rx, in gve_get_cpu_addr_for_qpl_buf() argument
733 int page_idx = buf - rx->dqo.bufs; in gve_get_cpu_addr_for_qpl_buf()
734 void *va = rx->com.qpl->dmas[page_idx].cpu_addr; in gve_get_cpu_addr_for_qpl_buf()
741 gve_rx_add_clmbuf_to_ctx(struct gve_rx_ring *rx, in gve_rx_add_clmbuf_to_ctx() argument
745 void *va = gve_get_cpu_addr_for_qpl_buf(rx, buf, buf_frag_num); in gve_rx_add_clmbuf_to_ctx()
767 counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1); in gve_rx_add_clmbuf_to_ctx()
773 gve_rx_add_extmbuf_to_ctx(struct gve_rx_ring *rx, in gve_rx_add_extmbuf_to_ctx() argument
800 page_idx = buf - rx->dqo.bufs; in gve_rx_add_extmbuf_to_ctx()
801 page = rx->com.qpl->pages[page_idx]; in gve_rx_add_extmbuf_to_ctx()
802 page_addr = rx->com.qpl->dmas[page_idx].cpu_addr; in gve_rx_add_extmbuf_to_ctx()
812 counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1); in gve_rx_add_extmbuf_to_ctx()
822 gve_rx_dqo_qpl(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_dqo_qpl() argument
829 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_dqo_qpl()
841 if (__predict_false(buf_id >= rx->dqo.buf_cnt)) { in gve_rx_dqo_qpl()
842 device_printf(priv->dev, "Invalid rx buf id %d on rxq %d, issuing reset\n", in gve_rx_dqo_qpl()
843 buf_id, rx->com.id); in gve_rx_dqo_qpl()
847 buf = &rx->dqo.bufs[buf_id]; in gve_rx_dqo_qpl()
852 buf_id, rx->com.id, buf_frag_num, buf->num_nic_frags); in gve_rx_dqo_qpl()
864 counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1); in gve_rx_dqo_qpl()
869 page_dma_handle = gve_get_page_dma_handle(rx, buf); in gve_rx_dqo_qpl()
875 void *va = gve_get_cpu_addr_for_qpl_buf(rx, buf, buf_frag_num); in gve_rx_dqo_qpl()
877 err = gve_rx_copybreak_dqo(rx, va, compl_desc, frag_len); in gve_rx_dqo_qpl()
881 gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num); in gve_rx_dqo_qpl()
885 num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask; in gve_rx_dqo_qpl()
886 err = gve_rx_post_new_dqo_qpl_buf(rx); in gve_rx_dqo_qpl()
895 err = gve_rx_add_clmbuf_to_ctx(rx, ctx, buf, in gve_rx_dqo_qpl()
900 rx->stats.rx_dropped_pkt_buf_post_fail, 1); in gve_rx_dqo_qpl()
904 gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num); in gve_rx_dqo_qpl()
906 err = gve_rx_add_extmbuf_to_ctx(rx, ctx, buf, in gve_rx_dqo_qpl()
911 rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1); in gve_rx_dqo_qpl()
927 STAILQ_INSERT_TAIL(&rx->dqo.used_bufs, buf, stailq_entry); in gve_rx_dqo_qpl()
930 gve_rx_input_mbuf_dqo(rx, compl_desc); in gve_rx_dqo_qpl()
938 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_dqo_qpl()
942 gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num); in gve_rx_dqo_qpl()
950 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1); in gve_rx_dqo_qpl()
953 rx->ctx = (struct gve_rx_ctx){}; in gve_rx_dqo_qpl()
957 gve_rx_cleanup_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, int budget) in gve_rx_cleanup_dqo() argument
965 bus_dmamap_sync(rx->dqo.compl_ring_mem.tag, rx->dqo.compl_ring_mem.map, in gve_rx_cleanup_dqo()
968 compl_desc = &rx->dqo.compl_ring[rx->dqo.tail]; in gve_rx_cleanup_dqo()
969 if (compl_desc->generation == rx->dqo.cur_gen_bit) in gve_rx_cleanup_dqo()
977 rx->cnt++; in gve_rx_cleanup_dqo()
978 rx->dqo.tail = (rx->dqo.tail + 1) & rx->dqo.mask; in gve_rx_cleanup_dqo()
979 rx->dqo.cur_gen_bit ^= (rx->dqo.tail == 0); in gve_rx_cleanup_dqo()
982 gve_rx_dqo_qpl(priv, rx, compl_desc, &work_done); in gve_rx_cleanup_dqo()
984 gve_rx_dqo(priv, rx, compl_desc, &work_done); in gve_rx_cleanup_dqo()
988 tcp_lro_flush_all(&rx->lro); in gve_rx_cleanup_dqo()
990 gve_rx_post_buffers_dqo(rx, M_NOWAIT); in gve_rx_cleanup_dqo()
992 gve_rx_maybe_extract_from_used_bufs(rx, /*just_one=*/false); in gve_rx_cleanup_dqo()
999 struct gve_rx_ring *rx = arg; in gve_rx_cleanup_tq_dqo() local
1000 struct gve_priv *priv = rx->com.priv; in gve_rx_cleanup_tq_dqo()
1005 if (gve_rx_cleanup_dqo(priv, rx, /*budget=*/64)) { in gve_rx_cleanup_tq_dqo()
1006 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task); in gve_rx_cleanup_tq_dqo()
1010 gve_db_bar_dqo_write_4(priv, rx->com.irq_db_offset, in gve_rx_cleanup_tq_dqo()