xref: /freebsd/sys/dev/gve/gve_rx_dqo.c (revision f8ed8382daf4b9a97056b1dba4fe4e5cb4f7485c)
1d438b4efSShailend Chand /*-
2d438b4efSShailend Chand  * SPDX-License-Identifier: BSD-3-Clause
3d438b4efSShailend Chand  *
4d438b4efSShailend Chand  * Copyright (c) 2024 Google LLC
5d438b4efSShailend Chand  *
6d438b4efSShailend Chand  * Redistribution and use in source and binary forms, with or without modification,
7d438b4efSShailend Chand  * are permitted provided that the following conditions are met:
8d438b4efSShailend Chand  *
9d438b4efSShailend Chand  * 1. Redistributions of source code must retain the above copyright notice, this
10d438b4efSShailend Chand  *    list of conditions and the following disclaimer.
11d438b4efSShailend Chand  *
12d438b4efSShailend Chand  * 2. Redistributions in binary form must reproduce the above copyright notice,
13d438b4efSShailend Chand  *    this list of conditions and the following disclaimer in the documentation
14d438b4efSShailend Chand  *    and/or other materials provided with the distribution.
15d438b4efSShailend Chand  *
16d438b4efSShailend Chand  * 3. Neither the name of the copyright holder nor the names of its contributors
17d438b4efSShailend Chand  *    may be used to endorse or promote products derived from this software without
18d438b4efSShailend Chand  *    specific prior written permission.
19d438b4efSShailend Chand  *
20d438b4efSShailend Chand  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21d438b4efSShailend Chand  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22d438b4efSShailend Chand  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23d438b4efSShailend Chand  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24d438b4efSShailend Chand  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25d438b4efSShailend Chand  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26d438b4efSShailend Chand  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27d438b4efSShailend Chand  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28d438b4efSShailend Chand  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29d438b4efSShailend Chand  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30d438b4efSShailend Chand  */
31d438b4efSShailend Chand #include "gve.h"
32d438b4efSShailend Chand #include "gve_adminq.h"
33d438b4efSShailend Chand #include "gve_dqo.h"
34d438b4efSShailend Chand 
35d438b4efSShailend Chand static void
gve_free_rx_mbufs_dqo(struct gve_rx_ring * rx)36d438b4efSShailend Chand gve_free_rx_mbufs_dqo(struct gve_rx_ring *rx)
37d438b4efSShailend Chand {
38d438b4efSShailend Chand 	struct gve_rx_buf_dqo *buf;
39d438b4efSShailend Chand 	int i;
40d438b4efSShailend Chand 
412348ac89SShailend Chand 	if (gve_is_qpl(rx->com.priv))
422348ac89SShailend Chand 		return;
432348ac89SShailend Chand 
44d438b4efSShailend Chand 	for (i = 0; i < rx->dqo.buf_cnt; i++) {
45d438b4efSShailend Chand 		buf = &rx->dqo.bufs[i];
46d438b4efSShailend Chand 		if (!buf->mbuf)
47d438b4efSShailend Chand 			continue;
48d438b4efSShailend Chand 
49d438b4efSShailend Chand 		bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap,
50d438b4efSShailend Chand 		    BUS_DMASYNC_POSTREAD);
51d438b4efSShailend Chand 		bus_dmamap_unload(rx->dqo.buf_dmatag, buf->dmamap);
52d438b4efSShailend Chand 		m_freem(buf->mbuf);
53d438b4efSShailend Chand 		buf->mbuf = NULL;
54d438b4efSShailend Chand 	}
55d438b4efSShailend Chand }
56d438b4efSShailend Chand 
57d438b4efSShailend Chand void
gve_rx_free_ring_dqo(struct gve_priv * priv,int i)58d438b4efSShailend Chand gve_rx_free_ring_dqo(struct gve_priv *priv, int i)
59d438b4efSShailend Chand {
60d438b4efSShailend Chand 	struct gve_rx_ring *rx = &priv->rx[i];
61*f8ed8382SVee Agarwal 	struct gve_ring_com *com = &rx->com;
62d438b4efSShailend Chand 	int j;
63d438b4efSShailend Chand 
64d438b4efSShailend Chand 	if (rx->dqo.compl_ring != NULL) {
65d438b4efSShailend Chand 		gve_dma_free_coherent(&rx->dqo.compl_ring_mem);
66d438b4efSShailend Chand 		rx->dqo.compl_ring = NULL;
67d438b4efSShailend Chand 	}
68d438b4efSShailend Chand 
69d438b4efSShailend Chand 	if (rx->dqo.desc_ring != NULL) {
70d438b4efSShailend Chand 		gve_dma_free_coherent(&rx->desc_ring_mem);
71d438b4efSShailend Chand 		rx->dqo.desc_ring = NULL;
72d438b4efSShailend Chand 	}
73d438b4efSShailend Chand 
74d438b4efSShailend Chand 	if (rx->dqo.bufs != NULL) {
75d438b4efSShailend Chand 		gve_free_rx_mbufs_dqo(rx);
76d438b4efSShailend Chand 
772348ac89SShailend Chand 		if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag) {
78d438b4efSShailend Chand 			for (j = 0; j < rx->dqo.buf_cnt; j++)
79d438b4efSShailend Chand 				if (rx->dqo.bufs[j].mapped)
80d438b4efSShailend Chand 					bus_dmamap_destroy(rx->dqo.buf_dmatag,
81d438b4efSShailend Chand 					    rx->dqo.bufs[j].dmamap);
82d438b4efSShailend Chand 		}
83d438b4efSShailend Chand 
84d438b4efSShailend Chand 		free(rx->dqo.bufs, M_GVE);
85d438b4efSShailend Chand 		rx->dqo.bufs = NULL;
86d438b4efSShailend Chand 	}
87d438b4efSShailend Chand 
882348ac89SShailend Chand 	if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag)
89d438b4efSShailend Chand 		bus_dma_tag_destroy(rx->dqo.buf_dmatag);
90*f8ed8382SVee Agarwal 
91*f8ed8382SVee Agarwal 	if (com->qpl != NULL) {
92*f8ed8382SVee Agarwal 		gve_free_qpl(priv, com->qpl);
93*f8ed8382SVee Agarwal 		com->qpl = NULL;
94*f8ed8382SVee Agarwal 	}
95d438b4efSShailend Chand }
96d438b4efSShailend Chand 
97d438b4efSShailend Chand int
gve_rx_alloc_ring_dqo(struct gve_priv * priv,int i)98d438b4efSShailend Chand gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i)
99d438b4efSShailend Chand {
100d438b4efSShailend Chand 	struct gve_rx_ring *rx = &priv->rx[i];
101d438b4efSShailend Chand 	int err;
102d438b4efSShailend Chand 	int j;
103d438b4efSShailend Chand 
104d438b4efSShailend Chand 	err = gve_dma_alloc_coherent(priv,
105d438b4efSShailend Chand 	    sizeof(struct gve_rx_desc_dqo) * priv->rx_desc_cnt,
106d438b4efSShailend Chand 	    CACHE_LINE_SIZE, &rx->desc_ring_mem);
107d438b4efSShailend Chand 	if (err != 0) {
108d438b4efSShailend Chand 		device_printf(priv->dev,
109d438b4efSShailend Chand 		    "Failed to alloc desc ring for rx ring %d", i);
110d438b4efSShailend Chand 		goto abort;
111d438b4efSShailend Chand 	}
112d438b4efSShailend Chand 	rx->dqo.desc_ring = rx->desc_ring_mem.cpu_addr;
113d438b4efSShailend Chand 	rx->dqo.mask = priv->rx_desc_cnt - 1;
114d438b4efSShailend Chand 
1152348ac89SShailend Chand 	err = gve_dma_alloc_coherent(priv,
1162348ac89SShailend Chand 	    sizeof(struct gve_rx_compl_desc_dqo) * priv->rx_desc_cnt,
1172348ac89SShailend Chand 	    CACHE_LINE_SIZE, &rx->dqo.compl_ring_mem);
1182348ac89SShailend Chand 	if (err != 0) {
1192348ac89SShailend Chand 		device_printf(priv->dev,
1202348ac89SShailend Chand 		    "Failed to alloc compl ring for rx ring %d", i);
1212348ac89SShailend Chand 		goto abort;
1222348ac89SShailend Chand 	}
1232348ac89SShailend Chand 	rx->dqo.compl_ring = rx->dqo.compl_ring_mem.cpu_addr;
1242348ac89SShailend Chand 	rx->dqo.mask = priv->rx_desc_cnt - 1;
1252348ac89SShailend Chand 
1262348ac89SShailend Chand 	rx->dqo.buf_cnt = gve_is_qpl(priv) ? GVE_RX_NUM_QPL_PAGES_DQO :
1272348ac89SShailend Chand 	    priv->rx_desc_cnt;
1282348ac89SShailend Chand 	rx->dqo.bufs = malloc(rx->dqo.buf_cnt * sizeof(struct gve_rx_buf_dqo),
1292348ac89SShailend Chand 	    M_GVE, M_WAITOK | M_ZERO);
1302348ac89SShailend Chand 
1312348ac89SShailend Chand 	if (gve_is_qpl(priv)) {
132*f8ed8382SVee Agarwal 		rx->com.qpl = gve_alloc_qpl(priv, i + priv->tx_cfg.max_queues,
133*f8ed8382SVee Agarwal 		    GVE_RX_NUM_QPL_PAGES_DQO, /*single_kva=*/false);
1342348ac89SShailend Chand 		if (rx->com.qpl == NULL) {
135*f8ed8382SVee Agarwal 			device_printf(priv->dev,
136*f8ed8382SVee Agarwal 			    "Failed to alloc QPL for rx ring %d", i);
137*f8ed8382SVee Agarwal 			err = ENOMEM;
138*f8ed8382SVee Agarwal 			goto abort;
1392348ac89SShailend Chand 		}
1402348ac89SShailend Chand 		return (0);
1412348ac89SShailend Chand 	}
1422348ac89SShailend Chand 
143d438b4efSShailend Chand 	err = bus_dma_tag_create(
144d438b4efSShailend Chand 	    bus_get_dma_tag(priv->dev),	/* parent */
145d438b4efSShailend Chand 	    1, 0,			/* alignment, bounds */
146d438b4efSShailend Chand 	    BUS_SPACE_MAXADDR,		/* lowaddr */
147d438b4efSShailend Chand 	    BUS_SPACE_MAXADDR,		/* highaddr */
148d438b4efSShailend Chand 	    NULL, NULL,			/* filter, filterarg */
149d438b4efSShailend Chand 	    MCLBYTES,			/* maxsize */
150d438b4efSShailend Chand 	    1,				/* nsegments */
151d438b4efSShailend Chand 	    MCLBYTES,			/* maxsegsize */
152d438b4efSShailend Chand 	    0,				/* flags */
153d438b4efSShailend Chand 	    NULL,			/* lockfunc */
154d438b4efSShailend Chand 	    NULL,			/* lockarg */
155d438b4efSShailend Chand 	    &rx->dqo.buf_dmatag);
156d438b4efSShailend Chand 	if (err != 0) {
157d438b4efSShailend Chand 		device_printf(priv->dev,
158d438b4efSShailend Chand 		    "%s: bus_dma_tag_create failed: %d\n",
159d438b4efSShailend Chand 		    __func__, err);
160d438b4efSShailend Chand 		goto abort;
161d438b4efSShailend Chand 	}
162d438b4efSShailend Chand 
163d438b4efSShailend Chand 	for (j = 0; j < rx->dqo.buf_cnt; j++) {
164d438b4efSShailend Chand 		err = bus_dmamap_create(rx->dqo.buf_dmatag, 0,
165d438b4efSShailend Chand 		    &rx->dqo.bufs[j].dmamap);
166d438b4efSShailend Chand 		if (err != 0) {
167d438b4efSShailend Chand 			device_printf(priv->dev,
168d438b4efSShailend Chand 			    "err in creating rx buf dmamap %d: %d",
169d438b4efSShailend Chand 			    j, err);
170d438b4efSShailend Chand 			goto abort;
171d438b4efSShailend Chand 		}
172d438b4efSShailend Chand 		rx->dqo.bufs[j].mapped = true;
173d438b4efSShailend Chand 	}
174d438b4efSShailend Chand 
175d438b4efSShailend Chand 	return (0);
176d438b4efSShailend Chand 
177d438b4efSShailend Chand abort:
178d438b4efSShailend Chand 	gve_rx_free_ring_dqo(priv, i);
179d438b4efSShailend Chand 	return (err);
180d438b4efSShailend Chand }
181d438b4efSShailend Chand 
182d438b4efSShailend Chand static void
gve_rx_clear_desc_ring_dqo(struct gve_rx_ring * rx)183d438b4efSShailend Chand gve_rx_clear_desc_ring_dqo(struct gve_rx_ring *rx)
184d438b4efSShailend Chand {
185d438b4efSShailend Chand 	struct gve_ring_com *com = &rx->com;
186d438b4efSShailend Chand 	int entries;
187d438b4efSShailend Chand 	int i;
188d438b4efSShailend Chand 
189d438b4efSShailend Chand 	entries = com->priv->rx_desc_cnt;
190d438b4efSShailend Chand 	for (i = 0; i < entries; i++)
191d438b4efSShailend Chand 		rx->dqo.desc_ring[i] = (struct gve_rx_desc_dqo){};
192d438b4efSShailend Chand 
193d438b4efSShailend Chand 	bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
194d438b4efSShailend Chand 	    BUS_DMASYNC_PREWRITE);
195d438b4efSShailend Chand }
196d438b4efSShailend Chand 
197d438b4efSShailend Chand static void
gve_rx_clear_compl_ring_dqo(struct gve_rx_ring * rx)198d438b4efSShailend Chand gve_rx_clear_compl_ring_dqo(struct gve_rx_ring *rx)
199d438b4efSShailend Chand {
200d438b4efSShailend Chand 	struct gve_ring_com *com = &rx->com;
201d438b4efSShailend Chand 	int i;
202d438b4efSShailend Chand 
203d438b4efSShailend Chand 	for (i = 0; i < com->priv->rx_desc_cnt; i++)
204d438b4efSShailend Chand 		rx->dqo.compl_ring[i] = (struct gve_rx_compl_desc_dqo){};
205d438b4efSShailend Chand 
206d438b4efSShailend Chand 	bus_dmamap_sync(rx->dqo.compl_ring_mem.tag, rx->dqo.compl_ring_mem.map,
207d438b4efSShailend Chand 	    BUS_DMASYNC_PREWRITE);
208d438b4efSShailend Chand }
209d438b4efSShailend Chand 
210d438b4efSShailend Chand void
gve_clear_rx_ring_dqo(struct gve_priv * priv,int i)211d438b4efSShailend Chand gve_clear_rx_ring_dqo(struct gve_priv *priv, int i)
212d438b4efSShailend Chand {
213d438b4efSShailend Chand 	struct gve_rx_ring *rx = &priv->rx[i];
214d438b4efSShailend Chand 	int j;
215d438b4efSShailend Chand 
216d438b4efSShailend Chand 	rx->fill_cnt = 0;
217d438b4efSShailend Chand 	rx->cnt = 0;
218d438b4efSShailend Chand 	rx->dqo.mask = priv->rx_desc_cnt - 1;
219d438b4efSShailend Chand 	rx->dqo.head = 0;
220d438b4efSShailend Chand 	rx->dqo.tail = 0;
221d438b4efSShailend Chand 	rx->dqo.cur_gen_bit = 0;
222d438b4efSShailend Chand 
223d438b4efSShailend Chand 	gve_rx_clear_desc_ring_dqo(rx);
224d438b4efSShailend Chand 	gve_rx_clear_compl_ring_dqo(rx);
225d438b4efSShailend Chand 
226d438b4efSShailend Chand 	gve_free_rx_mbufs_dqo(rx);
227d438b4efSShailend Chand 
2282348ac89SShailend Chand 	if (gve_is_qpl(priv)) {
2292348ac89SShailend Chand 		SLIST_INIT(&rx->dqo.free_bufs);
2302348ac89SShailend Chand 		STAILQ_INIT(&rx->dqo.used_bufs);
2312348ac89SShailend Chand 
2322348ac89SShailend Chand 		for (j = 0; j < rx->dqo.buf_cnt; j++) {
2332348ac89SShailend Chand 			struct gve_rx_buf_dqo *buf = &rx->dqo.bufs[j];
2342348ac89SShailend Chand 
2352348ac89SShailend Chand 			vm_page_t page = rx->com.qpl->pages[buf - rx->dqo.bufs];
2362348ac89SShailend Chand 			u_int ref_count = atomic_load_int(&page->ref_count);
2372348ac89SShailend Chand 
2382348ac89SShailend Chand 			/*
2392348ac89SShailend Chand 			 * An ifconfig down+up might see pages still in flight
2402348ac89SShailend Chand 			 * from the previous innings.
2412348ac89SShailend Chand 			 */
2422348ac89SShailend Chand 			if (VPRC_WIRE_COUNT(ref_count) == 1)
2432348ac89SShailend Chand 				SLIST_INSERT_HEAD(&rx->dqo.free_bufs,
2442348ac89SShailend Chand 				    buf, slist_entry);
2452348ac89SShailend Chand 			else
2462348ac89SShailend Chand 				STAILQ_INSERT_TAIL(&rx->dqo.used_bufs,
2472348ac89SShailend Chand 				    buf, stailq_entry);
2482348ac89SShailend Chand 
2492348ac89SShailend Chand 			buf->num_nic_frags = 0;
2502348ac89SShailend Chand 			buf->next_idx = 0;
2512348ac89SShailend Chand 		}
2522348ac89SShailend Chand 	} else {
253d438b4efSShailend Chand 		SLIST_INIT(&rx->dqo.free_bufs);
254d438b4efSShailend Chand 		for (j = 0; j < rx->dqo.buf_cnt; j++)
255d438b4efSShailend Chand 			SLIST_INSERT_HEAD(&rx->dqo.free_bufs,
256d438b4efSShailend Chand 			    &rx->dqo.bufs[j], slist_entry);
257d438b4efSShailend Chand 	}
2582348ac89SShailend Chand }
259d438b4efSShailend Chand 
260d438b4efSShailend Chand int
gve_rx_intr_dqo(void * arg)261d438b4efSShailend Chand gve_rx_intr_dqo(void *arg)
262d438b4efSShailend Chand {
263d438b4efSShailend Chand 	struct gve_rx_ring *rx = arg;
264d438b4efSShailend Chand 	struct gve_priv *priv = rx->com.priv;
265d438b4efSShailend Chand 	struct gve_ring_com *com = &rx->com;
266d438b4efSShailend Chand 
267d438b4efSShailend Chand 	if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
268d438b4efSShailend Chand 		return (FILTER_STRAY);
269d438b4efSShailend Chand 
270d438b4efSShailend Chand 	/* Interrupts are automatically masked */
271d438b4efSShailend Chand 	taskqueue_enqueue(com->cleanup_tq, &com->cleanup_task);
272d438b4efSShailend Chand 	return (FILTER_HANDLED);
273d438b4efSShailend Chand }
274d438b4efSShailend Chand 
275d438b4efSShailend Chand static void
gve_rx_advance_head_dqo(struct gve_rx_ring * rx)2762348ac89SShailend Chand gve_rx_advance_head_dqo(struct gve_rx_ring *rx)
2772348ac89SShailend Chand {
2782348ac89SShailend Chand 	rx->dqo.head = (rx->dqo.head + 1) & rx->dqo.mask;
2792348ac89SShailend Chand 	rx->fill_cnt++; /* rx->fill_cnt is just a sysctl counter */
2802348ac89SShailend Chand 
2812348ac89SShailend Chand 	if ((rx->dqo.head & (GVE_RX_BUF_THRESH_DQO - 1)) == 0) {
2822348ac89SShailend Chand 		bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
2832348ac89SShailend Chand 		    BUS_DMASYNC_PREWRITE);
2842348ac89SShailend Chand 		gve_db_bar_dqo_write_4(rx->com.priv, rx->com.db_offset,
2852348ac89SShailend Chand 		    rx->dqo.head);
2862348ac89SShailend Chand 	}
2872348ac89SShailend Chand }
2882348ac89SShailend Chand 
2892348ac89SShailend Chand static void
gve_rx_post_buf_dqo(struct gve_rx_ring * rx,struct gve_rx_buf_dqo * buf)290d438b4efSShailend Chand gve_rx_post_buf_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf)
291d438b4efSShailend Chand {
292d438b4efSShailend Chand 	struct gve_rx_desc_dqo *desc;
293d438b4efSShailend Chand 
294d438b4efSShailend Chand 	bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap,
295d438b4efSShailend Chand 	    BUS_DMASYNC_PREREAD);
296d438b4efSShailend Chand 
297d438b4efSShailend Chand 	desc = &rx->dqo.desc_ring[rx->dqo.head];
298d438b4efSShailend Chand 	desc->buf_id = htole16(buf - rx->dqo.bufs);
299d438b4efSShailend Chand 	desc->buf_addr = htole64(buf->addr);
300d438b4efSShailend Chand 
3012348ac89SShailend Chand 	gve_rx_advance_head_dqo(rx);
302d438b4efSShailend Chand }
303d438b4efSShailend Chand 
304d438b4efSShailend Chand static int
gve_rx_post_new_mbuf_dqo(struct gve_rx_ring * rx,int how)305d438b4efSShailend Chand gve_rx_post_new_mbuf_dqo(struct gve_rx_ring *rx, int how)
306d438b4efSShailend Chand {
307d438b4efSShailend Chand 	struct gve_rx_buf_dqo *buf;
308d438b4efSShailend Chand 	bus_dma_segment_t segs[1];
309d438b4efSShailend Chand 	int nsegs;
310d438b4efSShailend Chand 	int err;
311d438b4efSShailend Chand 
312d438b4efSShailend Chand 	buf = SLIST_FIRST(&rx->dqo.free_bufs);
313d438b4efSShailend Chand 	if (__predict_false(!buf)) {
314d438b4efSShailend Chand 		device_printf(rx->com.priv->dev,
315d438b4efSShailend Chand 		    "Unexpected empty free bufs list\n");
316d438b4efSShailend Chand 		return (ENOBUFS);
317d438b4efSShailend Chand 	}
318d438b4efSShailend Chand 	SLIST_REMOVE_HEAD(&rx->dqo.free_bufs, slist_entry);
319d438b4efSShailend Chand 
320d438b4efSShailend Chand 	buf->mbuf = m_getcl(how, MT_DATA, M_PKTHDR);
321d438b4efSShailend Chand 	if (__predict_false(!buf->mbuf)) {
322d438b4efSShailend Chand 		err = ENOMEM;
323d438b4efSShailend Chand 		counter_enter();
324d438b4efSShailend Chand 		counter_u64_add_protected(rx->stats.rx_mbuf_mclget_null, 1);
325d438b4efSShailend Chand 		counter_exit();
326d438b4efSShailend Chand 		goto abort_with_buf;
327d438b4efSShailend Chand 	}
328d438b4efSShailend Chand 	buf->mbuf->m_len = MCLBYTES;
329d438b4efSShailend Chand 
330d438b4efSShailend Chand 	err = bus_dmamap_load_mbuf_sg(rx->dqo.buf_dmatag, buf->dmamap,
331d438b4efSShailend Chand 	    buf->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
332d438b4efSShailend Chand 	KASSERT(nsegs == 1, ("dma segs for a cluster mbuf is not 1"));
333d438b4efSShailend Chand 	if (__predict_false(err != 0)) {
334d438b4efSShailend Chand 		counter_enter();
335d438b4efSShailend Chand 		counter_u64_add_protected(rx->stats.rx_mbuf_dmamap_err, 1);
336d438b4efSShailend Chand 		counter_exit();
337d438b4efSShailend Chand 		goto abort_with_mbuf;
338d438b4efSShailend Chand 	}
339d438b4efSShailend Chand 	buf->addr = segs[0].ds_addr;
340d438b4efSShailend Chand 
341d438b4efSShailend Chand 	gve_rx_post_buf_dqo(rx, buf);
342d438b4efSShailend Chand 	return (0);
343d438b4efSShailend Chand 
344d438b4efSShailend Chand abort_with_mbuf:
345d438b4efSShailend Chand 	m_freem(buf->mbuf);
346d438b4efSShailend Chand 	buf->mbuf = NULL;
347d438b4efSShailend Chand abort_with_buf:
348d438b4efSShailend Chand 	SLIST_INSERT_HEAD(&rx->dqo.free_bufs, buf, slist_entry);
349d438b4efSShailend Chand 	return (err);
350d438b4efSShailend Chand }
351d438b4efSShailend Chand 
3522348ac89SShailend Chand static struct gve_dma_handle *
gve_get_page_dma_handle(struct gve_rx_ring * rx,struct gve_rx_buf_dqo * buf)3532348ac89SShailend Chand gve_get_page_dma_handle(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf)
3542348ac89SShailend Chand {
3552348ac89SShailend Chand 	return (&(rx->com.qpl->dmas[buf - rx->dqo.bufs]));
3562348ac89SShailend Chand }
3572348ac89SShailend Chand 
3582348ac89SShailend Chand static void
gve_rx_post_qpl_buf_dqo(struct gve_rx_ring * rx,struct gve_rx_buf_dqo * buf,uint8_t frag_num)3592348ac89SShailend Chand gve_rx_post_qpl_buf_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf,
3602348ac89SShailend Chand     uint8_t frag_num)
3612348ac89SShailend Chand {
3622348ac89SShailend Chand 	struct gve_rx_desc_dqo *desc = &rx->dqo.desc_ring[rx->dqo.head];
3632348ac89SShailend Chand 	union gve_rx_qpl_buf_id_dqo composed_id;
3642348ac89SShailend Chand 	struct gve_dma_handle *page_dma_handle;
3652348ac89SShailend Chand 
3662348ac89SShailend Chand 	composed_id.buf_id = buf - rx->dqo.bufs;
3672348ac89SShailend Chand 	composed_id.frag_num = frag_num;
3682348ac89SShailend Chand 	desc->buf_id = htole16(composed_id.all);
3692348ac89SShailend Chand 
3702348ac89SShailend Chand 	page_dma_handle = gve_get_page_dma_handle(rx, buf);
3712348ac89SShailend Chand 	bus_dmamap_sync(page_dma_handle->tag, page_dma_handle->map,
3722348ac89SShailend Chand 	    BUS_DMASYNC_PREREAD);
3732348ac89SShailend Chand 	desc->buf_addr = htole64(page_dma_handle->bus_addr +
3742348ac89SShailend Chand 	    frag_num * GVE_DEFAULT_RX_BUFFER_SIZE);
3752348ac89SShailend Chand 
3762348ac89SShailend Chand 	buf->num_nic_frags++;
3772348ac89SShailend Chand 	gve_rx_advance_head_dqo(rx);
3782348ac89SShailend Chand }
3792348ac89SShailend Chand 
3802348ac89SShailend Chand static void
gve_rx_maybe_extract_from_used_bufs(struct gve_rx_ring * rx,bool just_one)3812348ac89SShailend Chand gve_rx_maybe_extract_from_used_bufs(struct gve_rx_ring *rx, bool just_one)
3822348ac89SShailend Chand {
3832348ac89SShailend Chand 	struct gve_rx_buf_dqo *hol_blocker = NULL;
3842348ac89SShailend Chand 	struct gve_rx_buf_dqo *buf;
3852348ac89SShailend Chand 	u_int ref_count;
3862348ac89SShailend Chand 	vm_page_t page;
3872348ac89SShailend Chand 
3882348ac89SShailend Chand 	while (true) {
3892348ac89SShailend Chand 		buf = STAILQ_FIRST(&rx->dqo.used_bufs);
3902348ac89SShailend Chand 		if (__predict_false(buf == NULL))
3912348ac89SShailend Chand 			break;
3922348ac89SShailend Chand 
3932348ac89SShailend Chand 		page = rx->com.qpl->pages[buf - rx->dqo.bufs];
3942348ac89SShailend Chand 		ref_count = atomic_load_int(&page->ref_count);
3952348ac89SShailend Chand 
3962348ac89SShailend Chand 		if (VPRC_WIRE_COUNT(ref_count) != 1) {
3972348ac89SShailend Chand 			/* Account for one head-of-line blocker */
3982348ac89SShailend Chand 			if (hol_blocker != NULL)
3992348ac89SShailend Chand 				break;
4002348ac89SShailend Chand 			hol_blocker = buf;
4012348ac89SShailend Chand 			STAILQ_REMOVE_HEAD(&rx->dqo.used_bufs,
4022348ac89SShailend Chand 			    stailq_entry);
4032348ac89SShailend Chand 			continue;
4042348ac89SShailend Chand 		}
4052348ac89SShailend Chand 
4062348ac89SShailend Chand 		STAILQ_REMOVE_HEAD(&rx->dqo.used_bufs,
4072348ac89SShailend Chand 		    stailq_entry);
4082348ac89SShailend Chand 		SLIST_INSERT_HEAD(&rx->dqo.free_bufs,
4092348ac89SShailend Chand 		    buf, slist_entry);
4102348ac89SShailend Chand 		if (just_one)
4112348ac89SShailend Chand 			break;
4122348ac89SShailend Chand 	}
4132348ac89SShailend Chand 
4142348ac89SShailend Chand 	if (hol_blocker != NULL)
4152348ac89SShailend Chand 		STAILQ_INSERT_HEAD(&rx->dqo.used_bufs,
4162348ac89SShailend Chand 		    hol_blocker, stailq_entry);
4172348ac89SShailend Chand }
4182348ac89SShailend Chand 
4192348ac89SShailend Chand static int
gve_rx_post_new_dqo_qpl_buf(struct gve_rx_ring * rx)4202348ac89SShailend Chand gve_rx_post_new_dqo_qpl_buf(struct gve_rx_ring *rx)
4212348ac89SShailend Chand {
4222348ac89SShailend Chand 	struct gve_rx_buf_dqo *buf;
4232348ac89SShailend Chand 
4242348ac89SShailend Chand 	buf = SLIST_FIRST(&rx->dqo.free_bufs);
4252348ac89SShailend Chand 	if (__predict_false(buf == NULL)) {
4262348ac89SShailend Chand 		gve_rx_maybe_extract_from_used_bufs(rx, /*just_one=*/true);
4272348ac89SShailend Chand 		buf = SLIST_FIRST(&rx->dqo.free_bufs);
4282348ac89SShailend Chand 		if (__predict_false(buf == NULL))
4292348ac89SShailend Chand 			return (ENOBUFS);
4302348ac89SShailend Chand 	}
4312348ac89SShailend Chand 
4322348ac89SShailend Chand 	gve_rx_post_qpl_buf_dqo(rx, buf, buf->next_idx);
4332348ac89SShailend Chand 	if (buf->next_idx == GVE_DQ_NUM_FRAGS_IN_PAGE - 1)
4342348ac89SShailend Chand 		buf->next_idx = 0;
4352348ac89SShailend Chand 	else
4362348ac89SShailend Chand 		buf->next_idx++;
4372348ac89SShailend Chand 
4382348ac89SShailend Chand 	/*
4392348ac89SShailend Chand 	 * We have posted all the frags in this buf to the NIC.
4402348ac89SShailend Chand 	 * - buf will enter used_bufs once the last completion arrives.
4412348ac89SShailend Chand 	 * - It will renter free_bufs in gve_rx_maybe_extract_from_used_bufs
4422348ac89SShailend Chand 	 *   when its wire count drops back to 1.
4432348ac89SShailend Chand 	 */
4442348ac89SShailend Chand 	if (buf->next_idx == 0)
4452348ac89SShailend Chand 		SLIST_REMOVE_HEAD(&rx->dqo.free_bufs, slist_entry);
4462348ac89SShailend Chand 	return (0);
4472348ac89SShailend Chand }
4482348ac89SShailend Chand 
449d438b4efSShailend Chand static void
gve_rx_post_buffers_dqo(struct gve_rx_ring * rx,int how)450d438b4efSShailend Chand gve_rx_post_buffers_dqo(struct gve_rx_ring *rx, int how)
451d438b4efSShailend Chand {
452d438b4efSShailend Chand 	uint32_t num_pending_bufs;
453d438b4efSShailend Chand 	uint32_t num_to_post;
454d438b4efSShailend Chand 	uint32_t i;
455d438b4efSShailend Chand 	int err;
456d438b4efSShailend Chand 
457d438b4efSShailend Chand 	num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask;
458d438b4efSShailend Chand 	num_to_post = rx->dqo.mask - num_pending_bufs;
459d438b4efSShailend Chand 
460d438b4efSShailend Chand 	for (i = 0; i < num_to_post; i++) {
4612348ac89SShailend Chand 		if (gve_is_qpl(rx->com.priv))
4622348ac89SShailend Chand 			err = gve_rx_post_new_dqo_qpl_buf(rx);
4632348ac89SShailend Chand 		else
464d438b4efSShailend Chand 			err = gve_rx_post_new_mbuf_dqo(rx, how);
465d438b4efSShailend Chand 		if (err)
466d438b4efSShailend Chand 			break;
467d438b4efSShailend Chand 	}
468d438b4efSShailend Chand }
469d438b4efSShailend Chand 
470d438b4efSShailend Chand void
gve_rx_prefill_buffers_dqo(struct gve_rx_ring * rx)471d438b4efSShailend Chand gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx)
472d438b4efSShailend Chand {
473d438b4efSShailend Chand 	gve_rx_post_buffers_dqo(rx, M_WAITOK);
474d438b4efSShailend Chand }
475d438b4efSShailend Chand 
476d438b4efSShailend Chand static void
gve_rx_set_hashtype_dqo(struct mbuf * mbuf,struct gve_ptype * ptype,bool * is_tcp)477d438b4efSShailend Chand gve_rx_set_hashtype_dqo(struct mbuf *mbuf, struct gve_ptype *ptype, bool *is_tcp)
478d438b4efSShailend Chand {
479d438b4efSShailend Chand 	switch (ptype->l3_type) {
480d438b4efSShailend Chand 	case GVE_L3_TYPE_IPV4:
481d438b4efSShailend Chand 		switch (ptype->l4_type) {
482d438b4efSShailend Chand 		case GVE_L4_TYPE_TCP:
483d438b4efSShailend Chand 			*is_tcp = true;
484d438b4efSShailend Chand 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
485d438b4efSShailend Chand 			break;
486d438b4efSShailend Chand 		case GVE_L4_TYPE_UDP:
487d438b4efSShailend Chand 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
488d438b4efSShailend Chand 			break;
489d438b4efSShailend Chand 		default:
490d438b4efSShailend Chand 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
491d438b4efSShailend Chand 		}
492d438b4efSShailend Chand 		break;
493d438b4efSShailend Chand 	case GVE_L3_TYPE_IPV6:
494d438b4efSShailend Chand 		switch (ptype->l4_type) {
495d438b4efSShailend Chand 		case GVE_L4_TYPE_TCP:
496d438b4efSShailend Chand 			*is_tcp = true;
497d438b4efSShailend Chand 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
498d438b4efSShailend Chand 			break;
499d438b4efSShailend Chand 		case GVE_L4_TYPE_UDP:
500d438b4efSShailend Chand 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
501d438b4efSShailend Chand 			break;
502d438b4efSShailend Chand 		default:
503d438b4efSShailend Chand 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
504d438b4efSShailend Chand 		}
505d438b4efSShailend Chand 		break;
506d438b4efSShailend Chand 	default:
507d438b4efSShailend Chand 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
508d438b4efSShailend Chand 	}
509d438b4efSShailend Chand }
510d438b4efSShailend Chand 
511d438b4efSShailend Chand static void
gve_rx_set_csum_flags_dqo(struct mbuf * mbuf,struct gve_rx_compl_desc_dqo * desc,struct gve_ptype * ptype)512d438b4efSShailend Chand gve_rx_set_csum_flags_dqo(struct mbuf *mbuf,
513d438b4efSShailend Chand     struct gve_rx_compl_desc_dqo *desc,
514d438b4efSShailend Chand     struct gve_ptype *ptype)
515d438b4efSShailend Chand {
516d438b4efSShailend Chand 	/* HW did not identify and process L3 and L4 headers. */
517d438b4efSShailend Chand 	if (__predict_false(!desc->l3_l4_processed))
518d438b4efSShailend Chand 		return;
519d438b4efSShailend Chand 
520d438b4efSShailend Chand 	if (ptype->l3_type == GVE_L3_TYPE_IPV4) {
521d438b4efSShailend Chand 		if (__predict_false(desc->csum_ip_err ||
522d438b4efSShailend Chand 		    desc->csum_external_ip_err))
523d438b4efSShailend Chand 			return;
524d438b4efSShailend Chand 	} else if (ptype->l3_type == GVE_L3_TYPE_IPV6) {
525d438b4efSShailend Chand 		/* Checksum should be skipped if this flag is set. */
526d438b4efSShailend Chand 		if (__predict_false(desc->ipv6_ex_add))
527d438b4efSShailend Chand 			return;
528d438b4efSShailend Chand 	}
529d438b4efSShailend Chand 
530d438b4efSShailend Chand 	if (__predict_false(desc->csum_l4_err))
531d438b4efSShailend Chand 		return;
532d438b4efSShailend Chand 
533d438b4efSShailend Chand 	switch (ptype->l4_type) {
534d438b4efSShailend Chand 	case GVE_L4_TYPE_TCP:
535d438b4efSShailend Chand 	case GVE_L4_TYPE_UDP:
536d438b4efSShailend Chand 	case GVE_L4_TYPE_ICMP:
537d438b4efSShailend Chand 	case GVE_L4_TYPE_SCTP:
538d438b4efSShailend Chand 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
539d438b4efSShailend Chand 					    CSUM_IP_VALID |
540d438b4efSShailend Chand 					    CSUM_DATA_VALID |
541d438b4efSShailend Chand 					    CSUM_PSEUDO_HDR;
542d438b4efSShailend Chand 		mbuf->m_pkthdr.csum_data = 0xffff;
543d438b4efSShailend Chand 		break;
544d438b4efSShailend Chand 	default:
545d438b4efSShailend Chand 		break;
546d438b4efSShailend Chand 	}
547d438b4efSShailend Chand }
548d438b4efSShailend Chand 
549d438b4efSShailend Chand static void
gve_rx_input_mbuf_dqo(struct gve_rx_ring * rx,struct gve_rx_compl_desc_dqo * compl_desc)550d438b4efSShailend Chand gve_rx_input_mbuf_dqo(struct gve_rx_ring *rx,
551d438b4efSShailend Chand     struct gve_rx_compl_desc_dqo *compl_desc)
552d438b4efSShailend Chand {
553d438b4efSShailend Chand 	struct mbuf *mbuf = rx->ctx.mbuf_head;
554d438b4efSShailend Chand 	if_t ifp = rx->com.priv->ifp;
555d438b4efSShailend Chand 	struct gve_ptype *ptype;
556d438b4efSShailend Chand 	bool do_if_input = true;
557d438b4efSShailend Chand 	bool is_tcp = false;
558d438b4efSShailend Chand 
559d438b4efSShailend Chand 	ptype = &rx->com.priv->ptype_lut_dqo->ptypes[compl_desc->packet_type];
560d438b4efSShailend Chand 	gve_rx_set_hashtype_dqo(mbuf, ptype, &is_tcp);
561d438b4efSShailend Chand 	mbuf->m_pkthdr.flowid = le32toh(compl_desc->hash);
562d438b4efSShailend Chand 	gve_rx_set_csum_flags_dqo(mbuf, compl_desc, ptype);
563d438b4efSShailend Chand 
564d438b4efSShailend Chand 	mbuf->m_pkthdr.rcvif = ifp;
565d438b4efSShailend Chand 	mbuf->m_pkthdr.len = rx->ctx.total_size;
566d438b4efSShailend Chand 
567d438b4efSShailend Chand 	if (((if_getcapenable(rx->com.priv->ifp) & IFCAP_LRO) != 0) &&
568d438b4efSShailend Chand 	    is_tcp &&
569d438b4efSShailend Chand 	    (rx->lro.lro_cnt != 0) &&
570d438b4efSShailend Chand 	    (tcp_lro_rx(&rx->lro, mbuf, 0) == 0))
571d438b4efSShailend Chand 		do_if_input = false;
572d438b4efSShailend Chand 
573d438b4efSShailend Chand 	if (do_if_input)
574d438b4efSShailend Chand 		if_input(ifp, mbuf);
575d438b4efSShailend Chand 
576d438b4efSShailend Chand 	counter_enter();
577d438b4efSShailend Chand 	counter_u64_add_protected(rx->stats.rbytes, rx->ctx.total_size);
578d438b4efSShailend Chand 	counter_u64_add_protected(rx->stats.rpackets, 1);
579d438b4efSShailend Chand 	counter_exit();
580d438b4efSShailend Chand 
581d438b4efSShailend Chand 	rx->ctx = (struct gve_rx_ctx){};
582d438b4efSShailend Chand }
583d438b4efSShailend Chand 
584d438b4efSShailend Chand static int
gve_rx_copybreak_dqo(struct gve_rx_ring * rx,void * va,struct gve_rx_compl_desc_dqo * compl_desc,uint16_t frag_len)5852348ac89SShailend Chand gve_rx_copybreak_dqo(struct gve_rx_ring *rx, void *va,
586d438b4efSShailend Chand     struct gve_rx_compl_desc_dqo *compl_desc, uint16_t frag_len)
587d438b4efSShailend Chand {
588d438b4efSShailend Chand 	struct mbuf *mbuf;
589d438b4efSShailend Chand 
590d438b4efSShailend Chand 	mbuf = m_get2(frag_len, M_NOWAIT, MT_DATA, M_PKTHDR);
591d438b4efSShailend Chand 	if (__predict_false(mbuf == NULL))
592d438b4efSShailend Chand 		return (ENOMEM);
593d438b4efSShailend Chand 
594d438b4efSShailend Chand 	counter_enter();
595d438b4efSShailend Chand 	counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1);
596d438b4efSShailend Chand 	counter_exit();
597d438b4efSShailend Chand 
5982348ac89SShailend Chand 	m_copyback(mbuf, 0, frag_len, va);
599d438b4efSShailend Chand 	mbuf->m_len = frag_len;
600d438b4efSShailend Chand 
601d438b4efSShailend Chand 	rx->ctx.mbuf_head = mbuf;
602d438b4efSShailend Chand 	rx->ctx.mbuf_tail = mbuf;
603d438b4efSShailend Chand 	rx->ctx.total_size += frag_len;
604d438b4efSShailend Chand 
605d438b4efSShailend Chand 	gve_rx_input_mbuf_dqo(rx, compl_desc);
606d438b4efSShailend Chand 	return (0);
607d438b4efSShailend Chand }
608d438b4efSShailend Chand 
609d438b4efSShailend Chand static void
gve_rx_dqo(struct gve_priv * priv,struct gve_rx_ring * rx,struct gve_rx_compl_desc_dqo * compl_desc,int * work_done)610d438b4efSShailend Chand gve_rx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
611d438b4efSShailend Chand     struct gve_rx_compl_desc_dqo *compl_desc,
612d438b4efSShailend Chand     int *work_done)
613d438b4efSShailend Chand {
614d438b4efSShailend Chand 	bool is_last_frag = compl_desc->end_of_packet != 0;
615d438b4efSShailend Chand 	struct gve_rx_ctx *ctx = &rx->ctx;
616d438b4efSShailend Chand 	struct gve_rx_buf_dqo *buf;
617d438b4efSShailend Chand 	uint32_t num_pending_bufs;
618d438b4efSShailend Chand 	uint16_t frag_len;
619d438b4efSShailend Chand 	uint16_t buf_id;
620d438b4efSShailend Chand 	int err;
621d438b4efSShailend Chand 
622d438b4efSShailend Chand 	buf_id = le16toh(compl_desc->buf_id);
623d438b4efSShailend Chand 	if (__predict_false(buf_id >= rx->dqo.buf_cnt)) {
624d438b4efSShailend Chand 		device_printf(priv->dev, "Invalid rx buf id %d on rxq %d, issuing reset\n",
625d438b4efSShailend Chand 		    buf_id, rx->com.id);
626d438b4efSShailend Chand 		gve_schedule_reset(priv);
627d438b4efSShailend Chand 		goto drop_frag_clear_ctx;
628d438b4efSShailend Chand 	}
629d438b4efSShailend Chand 	buf = &rx->dqo.bufs[buf_id];
630d438b4efSShailend Chand 	if (__predict_false(buf->mbuf == NULL)) {
631d438b4efSShailend Chand 		device_printf(priv->dev, "Spurious completion for buf id %d on rxq %d, issuing reset\n",
632d438b4efSShailend Chand 		    buf_id, rx->com.id);
633d438b4efSShailend Chand 		gve_schedule_reset(priv);
634d438b4efSShailend Chand 		goto drop_frag_clear_ctx;
635d438b4efSShailend Chand 	}
636d438b4efSShailend Chand 
637d438b4efSShailend Chand 	if (__predict_false(ctx->drop_pkt))
638d438b4efSShailend Chand 		goto drop_frag;
639d438b4efSShailend Chand 
640d438b4efSShailend Chand 	if (__predict_false(compl_desc->rx_error)) {
641d438b4efSShailend Chand 		counter_enter();
642d438b4efSShailend Chand 		counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1);
643d438b4efSShailend Chand 		counter_exit();
644d438b4efSShailend Chand 		goto drop_frag;
645d438b4efSShailend Chand 	}
646d438b4efSShailend Chand 
647d438b4efSShailend Chand 	bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap,
648d438b4efSShailend Chand 	    BUS_DMASYNC_POSTREAD);
649d438b4efSShailend Chand 
650d438b4efSShailend Chand 	frag_len = compl_desc->packet_len;
651d438b4efSShailend Chand 	if (frag_len <= priv->rx_copybreak && !ctx->mbuf_head && is_last_frag) {
6522348ac89SShailend Chand 		err = gve_rx_copybreak_dqo(rx, mtod(buf->mbuf, char*),
6532348ac89SShailend Chand 		    compl_desc, frag_len);
654d438b4efSShailend Chand 		if (__predict_false(err != 0))
655d438b4efSShailend Chand 			goto drop_frag;
656d438b4efSShailend Chand 		(*work_done)++;
6572348ac89SShailend Chand 		gve_rx_post_buf_dqo(rx, buf);
658d438b4efSShailend Chand 		return;
659d438b4efSShailend Chand 	}
660d438b4efSShailend Chand 
661d438b4efSShailend Chand 	/*
662d438b4efSShailend Chand 	 * Although buffer completions may arrive out of order, buffer
663d438b4efSShailend Chand 	 * descriptors are consumed by the NIC in order. That is, the
664d438b4efSShailend Chand 	 * buffer at desc_ring[tail] might not be the buffer we got the
665d438b4efSShailend Chand 	 * completion compl_ring[tail] for: but we know that desc_ring[tail]
666d438b4efSShailend Chand 	 * has already been read by the NIC.
667d438b4efSShailend Chand 	 */
668d438b4efSShailend Chand 	num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask;
669d438b4efSShailend Chand 
670d438b4efSShailend Chand 	/*
671d438b4efSShailend Chand 	 * For every fragment received, try to post a new buffer.
672d438b4efSShailend Chand 	 *
673d438b4efSShailend Chand 	 * Failures are okay but only so long as the number of outstanding
674d438b4efSShailend Chand 	 * buffers is above a threshold.
675d438b4efSShailend Chand 	 *
676d438b4efSShailend Chand 	 * Beyond that we drop new packets to reuse their buffers.
677d438b4efSShailend Chand 	 * Without ensuring a minimum number of buffers for the NIC to
678d438b4efSShailend Chand 	 * put packets in, we run the risk of getting the queue stuck
679d438b4efSShailend Chand 	 * for good.
680d438b4efSShailend Chand 	 */
681d438b4efSShailend Chand 	err = gve_rx_post_new_mbuf_dqo(rx, M_NOWAIT);
682d438b4efSShailend Chand 	if (__predict_false(err != 0 &&
683d438b4efSShailend Chand 	    num_pending_bufs <= GVE_RX_DQO_MIN_PENDING_BUFS)) {
684d438b4efSShailend Chand 		counter_enter();
685d438b4efSShailend Chand 		counter_u64_add_protected(
686d438b4efSShailend Chand 		    rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1);
687d438b4efSShailend Chand 		counter_exit();
688d438b4efSShailend Chand 		goto drop_frag;
689d438b4efSShailend Chand 	}
690d438b4efSShailend Chand 
691d438b4efSShailend Chand 	buf->mbuf->m_len = frag_len;
692d438b4efSShailend Chand 	ctx->total_size += frag_len;
693d438b4efSShailend Chand 	if (ctx->mbuf_tail == NULL) {
694d438b4efSShailend Chand 		ctx->mbuf_head = buf->mbuf;
695d438b4efSShailend Chand 		ctx->mbuf_tail = buf->mbuf;
696d438b4efSShailend Chand 	} else {
697d438b4efSShailend Chand 		buf->mbuf->m_flags &= ~M_PKTHDR;
698d438b4efSShailend Chand 		ctx->mbuf_tail->m_next = buf->mbuf;
699d438b4efSShailend Chand 		ctx->mbuf_tail = buf->mbuf;
700d438b4efSShailend Chand 	}
701d438b4efSShailend Chand 
702d438b4efSShailend Chand 	/*
703d438b4efSShailend Chand 	 * Disassociate the mbuf from buf and surrender buf to the free list to
704d438b4efSShailend Chand 	 * be used by a future mbuf.
705d438b4efSShailend Chand 	 */
706d438b4efSShailend Chand 	bus_dmamap_unload(rx->dqo.buf_dmatag, buf->dmamap);
707d438b4efSShailend Chand 	buf->mbuf = NULL;
708d438b4efSShailend Chand 	buf->addr = 0;
709d438b4efSShailend Chand 	SLIST_INSERT_HEAD(&rx->dqo.free_bufs, buf, slist_entry);
710d438b4efSShailend Chand 
711d438b4efSShailend Chand 	if (is_last_frag) {
712d438b4efSShailend Chand 		gve_rx_input_mbuf_dqo(rx, compl_desc);
713d438b4efSShailend Chand 		(*work_done)++;
714d438b4efSShailend Chand 	}
715d438b4efSShailend Chand 	return;
716d438b4efSShailend Chand 
717d438b4efSShailend Chand drop_frag:
718d438b4efSShailend Chand 	/* Clear the earlier frags if there were any */
719d438b4efSShailend Chand 	m_freem(ctx->mbuf_head);
720d438b4efSShailend Chand 	rx->ctx = (struct gve_rx_ctx){};
721d438b4efSShailend Chand 	/* Drop the rest of the pkt if there are more frags */
722d438b4efSShailend Chand 	ctx->drop_pkt = true;
723d438b4efSShailend Chand 	/* Reuse the dropped frag's buffer */
724d438b4efSShailend Chand 	gve_rx_post_buf_dqo(rx, buf);
725d438b4efSShailend Chand 
726d438b4efSShailend Chand 	if (is_last_frag)
727d438b4efSShailend Chand 		goto drop_frag_clear_ctx;
728d438b4efSShailend Chand 	return;
729d438b4efSShailend Chand 
730d438b4efSShailend Chand drop_frag_clear_ctx:
731d438b4efSShailend Chand 	counter_enter();
732d438b4efSShailend Chand 	counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
733d438b4efSShailend Chand 	counter_exit();
734d438b4efSShailend Chand 	m_freem(ctx->mbuf_head);
735d438b4efSShailend Chand 	rx->ctx = (struct gve_rx_ctx){};
736d438b4efSShailend Chand }
737d438b4efSShailend Chand 
7382348ac89SShailend Chand static void *
gve_get_cpu_addr_for_qpl_buf(struct gve_rx_ring * rx,struct gve_rx_buf_dqo * buf,uint8_t buf_frag_num)7392348ac89SShailend Chand gve_get_cpu_addr_for_qpl_buf(struct gve_rx_ring *rx,
7402348ac89SShailend Chand     struct gve_rx_buf_dqo *buf, uint8_t buf_frag_num)
7412348ac89SShailend Chand {
7422348ac89SShailend Chand 	int page_idx = buf - rx->dqo.bufs;
7432348ac89SShailend Chand 	void *va = rx->com.qpl->dmas[page_idx].cpu_addr;
7442348ac89SShailend Chand 
7452348ac89SShailend Chand 	va = (char *)va + (buf_frag_num * GVE_DEFAULT_RX_BUFFER_SIZE);
7462348ac89SShailend Chand 	return (va);
7472348ac89SShailend Chand }
7482348ac89SShailend Chand 
7492348ac89SShailend Chand static int
gve_rx_add_clmbuf_to_ctx(struct gve_rx_ring * rx,struct gve_rx_ctx * ctx,struct gve_rx_buf_dqo * buf,uint8_t buf_frag_num,uint16_t frag_len)7502348ac89SShailend Chand gve_rx_add_clmbuf_to_ctx(struct gve_rx_ring *rx,
7512348ac89SShailend Chand     struct gve_rx_ctx *ctx, struct gve_rx_buf_dqo *buf,
7522348ac89SShailend Chand     uint8_t buf_frag_num, uint16_t frag_len)
7532348ac89SShailend Chand {
7542348ac89SShailend Chand 	void *va = gve_get_cpu_addr_for_qpl_buf(rx, buf, buf_frag_num);
7552348ac89SShailend Chand 	struct mbuf *mbuf;
7562348ac89SShailend Chand 
7572348ac89SShailend Chand 	if (ctx->mbuf_tail == NULL) {
7582348ac89SShailend Chand 		mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
7592348ac89SShailend Chand 		if (mbuf == NULL)
7602348ac89SShailend Chand 			return (ENOMEM);
7612348ac89SShailend Chand 		ctx->mbuf_head = mbuf;
7622348ac89SShailend Chand 		ctx->mbuf_tail = mbuf;
7632348ac89SShailend Chand 	} else {
7642348ac89SShailend Chand 		mbuf = m_getcl(M_NOWAIT, MT_DATA, 0);
7652348ac89SShailend Chand 		if (mbuf == NULL)
7662348ac89SShailend Chand 			return (ENOMEM);
7672348ac89SShailend Chand 		ctx->mbuf_tail->m_next = mbuf;
7682348ac89SShailend Chand 		ctx->mbuf_tail = mbuf;
7692348ac89SShailend Chand 	}
7702348ac89SShailend Chand 
7712348ac89SShailend Chand 	mbuf->m_len = frag_len;
7722348ac89SShailend Chand 	ctx->total_size += frag_len;
7732348ac89SShailend Chand 
7742348ac89SShailend Chand 	m_copyback(mbuf, 0, frag_len, va);
7752348ac89SShailend Chand 	counter_enter();
7762348ac89SShailend Chand 	counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1);
7772348ac89SShailend Chand 	counter_exit();
7782348ac89SShailend Chand 	return (0);
7792348ac89SShailend Chand }
7802348ac89SShailend Chand 
7812348ac89SShailend Chand static int
gve_rx_add_extmbuf_to_ctx(struct gve_rx_ring * rx,struct gve_rx_ctx * ctx,struct gve_rx_buf_dqo * buf,uint8_t buf_frag_num,uint16_t frag_len)7822348ac89SShailend Chand gve_rx_add_extmbuf_to_ctx(struct gve_rx_ring *rx,
7832348ac89SShailend Chand     struct gve_rx_ctx *ctx, struct gve_rx_buf_dqo *buf,
7842348ac89SShailend Chand     uint8_t buf_frag_num, uint16_t frag_len)
7852348ac89SShailend Chand {
7862348ac89SShailend Chand 	struct mbuf *mbuf;
7872348ac89SShailend Chand 	void *page_addr;
7882348ac89SShailend Chand 	vm_page_t page;
7892348ac89SShailend Chand 	int page_idx;
7902348ac89SShailend Chand 	void *va;
7912348ac89SShailend Chand 
7922348ac89SShailend Chand 	if (ctx->mbuf_tail == NULL) {
7932348ac89SShailend Chand 		mbuf = m_gethdr(M_NOWAIT, MT_DATA);
7942348ac89SShailend Chand 		if (mbuf == NULL)
7952348ac89SShailend Chand 			return (ENOMEM);
7962348ac89SShailend Chand 		ctx->mbuf_head = mbuf;
7972348ac89SShailend Chand 		ctx->mbuf_tail = mbuf;
7982348ac89SShailend Chand 	} else {
7992348ac89SShailend Chand 		mbuf = m_get(M_NOWAIT, MT_DATA);
8002348ac89SShailend Chand 		if (mbuf == NULL)
8012348ac89SShailend Chand 			return (ENOMEM);
8022348ac89SShailend Chand 		ctx->mbuf_tail->m_next = mbuf;
8032348ac89SShailend Chand 		ctx->mbuf_tail = mbuf;
8042348ac89SShailend Chand 	}
8052348ac89SShailend Chand 
8062348ac89SShailend Chand 	mbuf->m_len = frag_len;
8072348ac89SShailend Chand 	ctx->total_size += frag_len;
8082348ac89SShailend Chand 
8092348ac89SShailend Chand 	page_idx = buf - rx->dqo.bufs;
8102348ac89SShailend Chand 	page = rx->com.qpl->pages[page_idx];
8112348ac89SShailend Chand 	page_addr = rx->com.qpl->dmas[page_idx].cpu_addr;
8122348ac89SShailend Chand 	va = (char *)page_addr + (buf_frag_num * GVE_DEFAULT_RX_BUFFER_SIZE);
8132348ac89SShailend Chand 
8142348ac89SShailend Chand 	/*
8152348ac89SShailend Chand 	 * Grab an extra ref to the page so that gve_mextadd_free
8162348ac89SShailend Chand 	 * does not end up freeing the page while the interface exists.
8172348ac89SShailend Chand 	 */
8182348ac89SShailend Chand 	vm_page_wire(page);
8192348ac89SShailend Chand 
8202348ac89SShailend Chand 	counter_enter();
8212348ac89SShailend Chand 	counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1);
8222348ac89SShailend Chand 	counter_exit();
8232348ac89SShailend Chand 
8242348ac89SShailend Chand 	MEXTADD(mbuf, va, frag_len,
8252348ac89SShailend Chand 	    gve_mextadd_free, page, page_addr,
8262348ac89SShailend Chand 	    0, EXT_NET_DRV);
8272348ac89SShailend Chand 	return (0);
8282348ac89SShailend Chand }
8292348ac89SShailend Chand 
8302348ac89SShailend Chand static void
gve_rx_dqo_qpl(struct gve_priv * priv,struct gve_rx_ring * rx,struct gve_rx_compl_desc_dqo * compl_desc,int * work_done)8312348ac89SShailend Chand gve_rx_dqo_qpl(struct gve_priv *priv, struct gve_rx_ring *rx,
8322348ac89SShailend Chand     struct gve_rx_compl_desc_dqo *compl_desc,
8332348ac89SShailend Chand     int *work_done)
8342348ac89SShailend Chand {
8352348ac89SShailend Chand 	bool is_last_frag = compl_desc->end_of_packet != 0;
8362348ac89SShailend Chand 	union gve_rx_qpl_buf_id_dqo composed_id;
8372348ac89SShailend Chand 	struct gve_dma_handle *page_dma_handle;
8382348ac89SShailend Chand 	struct gve_rx_ctx *ctx = &rx->ctx;
8392348ac89SShailend Chand 	struct gve_rx_buf_dqo *buf;
8402348ac89SShailend Chand 	uint32_t num_pending_bufs;
8412348ac89SShailend Chand 	uint8_t buf_frag_num;
8422348ac89SShailend Chand 	uint16_t frag_len;
8432348ac89SShailend Chand 	uint16_t buf_id;
8442348ac89SShailend Chand 	int err;
8452348ac89SShailend Chand 
8462348ac89SShailend Chand 	composed_id.all = le16toh(compl_desc->buf_id);
8472348ac89SShailend Chand 	buf_id = composed_id.buf_id;
8482348ac89SShailend Chand 	buf_frag_num = composed_id.frag_num;
8492348ac89SShailend Chand 
8502348ac89SShailend Chand 	if (__predict_false(buf_id >= rx->dqo.buf_cnt)) {
8512348ac89SShailend Chand 		device_printf(priv->dev, "Invalid rx buf id %d on rxq %d, issuing reset\n",
8522348ac89SShailend Chand 		    buf_id, rx->com.id);
8532348ac89SShailend Chand 		gve_schedule_reset(priv);
8542348ac89SShailend Chand 		goto drop_frag_clear_ctx;
8552348ac89SShailend Chand 	}
8562348ac89SShailend Chand 	buf = &rx->dqo.bufs[buf_id];
8572348ac89SShailend Chand 	if (__predict_false(buf->num_nic_frags == 0 ||
8582348ac89SShailend Chand 	    buf_frag_num > GVE_DQ_NUM_FRAGS_IN_PAGE - 1)) {
8592348ac89SShailend Chand 		device_printf(priv->dev, "Spurious compl for buf id %d on rxq %d "
8602348ac89SShailend Chand 		    "with buf_frag_num %d and num_nic_frags %d, issuing reset\n",
8612348ac89SShailend Chand 		    buf_id, rx->com.id, buf_frag_num, buf->num_nic_frags);
8622348ac89SShailend Chand 		gve_schedule_reset(priv);
8632348ac89SShailend Chand 		goto drop_frag_clear_ctx;
8642348ac89SShailend Chand 	}
8652348ac89SShailend Chand 
8662348ac89SShailend Chand 	buf->num_nic_frags--;
8672348ac89SShailend Chand 
8682348ac89SShailend Chand 	if (__predict_false(ctx->drop_pkt))
8692348ac89SShailend Chand 		goto drop_frag;
8702348ac89SShailend Chand 
8712348ac89SShailend Chand 	if (__predict_false(compl_desc->rx_error)) {
8722348ac89SShailend Chand 		counter_enter();
8732348ac89SShailend Chand 		counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1);
8742348ac89SShailend Chand 		counter_exit();
8752348ac89SShailend Chand 		goto drop_frag;
8762348ac89SShailend Chand 	}
8772348ac89SShailend Chand 
8782348ac89SShailend Chand 	page_dma_handle = gve_get_page_dma_handle(rx, buf);
8792348ac89SShailend Chand 	bus_dmamap_sync(page_dma_handle->tag, page_dma_handle->map,
8802348ac89SShailend Chand 	    BUS_DMASYNC_POSTREAD);
8812348ac89SShailend Chand 
8822348ac89SShailend Chand 	frag_len = compl_desc->packet_len;
8832348ac89SShailend Chand 	if (frag_len <= priv->rx_copybreak && !ctx->mbuf_head && is_last_frag) {
8842348ac89SShailend Chand 		void *va = gve_get_cpu_addr_for_qpl_buf(rx, buf, buf_frag_num);
8852348ac89SShailend Chand 
8862348ac89SShailend Chand 		err = gve_rx_copybreak_dqo(rx, va, compl_desc, frag_len);
8872348ac89SShailend Chand 		if (__predict_false(err != 0))
8882348ac89SShailend Chand 			goto drop_frag;
8892348ac89SShailend Chand 		(*work_done)++;
8902348ac89SShailend Chand 		gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num);
8912348ac89SShailend Chand 		return;
8922348ac89SShailend Chand 	}
8932348ac89SShailend Chand 
8942348ac89SShailend Chand 	num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask;
8952348ac89SShailend Chand 	err = gve_rx_post_new_dqo_qpl_buf(rx);
8962348ac89SShailend Chand 	if (__predict_false(err != 0 &&
8972348ac89SShailend Chand 	    num_pending_bufs <= GVE_RX_DQO_MIN_PENDING_BUFS)) {
8982348ac89SShailend Chand 		/*
8992348ac89SShailend Chand 		 * Resort to copying this fragment into a cluster mbuf
9002348ac89SShailend Chand 		 * when the above threshold is breached and repost the
9012348ac89SShailend Chand 		 * incoming buffer. If we cannot find cluster mbufs,
9022348ac89SShailend Chand 		 * just drop the packet (to repost its buffer).
9032348ac89SShailend Chand 		 */
9042348ac89SShailend Chand 		err = gve_rx_add_clmbuf_to_ctx(rx, ctx, buf,
9052348ac89SShailend Chand 		    buf_frag_num, frag_len);
9062348ac89SShailend Chand 		if (err != 0) {
9072348ac89SShailend Chand 			counter_enter();
9082348ac89SShailend Chand 			counter_u64_add_protected(
9092348ac89SShailend Chand 			    rx->stats.rx_dropped_pkt_buf_post_fail, 1);
9102348ac89SShailend Chand 			counter_exit();
9112348ac89SShailend Chand 			goto drop_frag;
9122348ac89SShailend Chand 		}
9132348ac89SShailend Chand 		gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num);
9142348ac89SShailend Chand 	} else {
9152348ac89SShailend Chand 		err = gve_rx_add_extmbuf_to_ctx(rx, ctx, buf,
9162348ac89SShailend Chand 		    buf_frag_num, frag_len);
9172348ac89SShailend Chand 		if (__predict_false(err != 0)) {
9182348ac89SShailend Chand 			counter_enter();
9192348ac89SShailend Chand 			counter_u64_add_protected(
9202348ac89SShailend Chand 			    rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1);
9212348ac89SShailend Chand 			counter_exit();
9222348ac89SShailend Chand 			goto drop_frag;
9232348ac89SShailend Chand 		}
9242348ac89SShailend Chand 	}
9252348ac89SShailend Chand 
9262348ac89SShailend Chand 	/*
9272348ac89SShailend Chand 	 * Both the counts need to be checked.
9282348ac89SShailend Chand 	 *
9292348ac89SShailend Chand 	 * num_nic_frags == 0 implies no pending completions
9302348ac89SShailend Chand 	 * but not all frags may have yet been posted.
9312348ac89SShailend Chand 	 *
9322348ac89SShailend Chand 	 * next_idx == 0 implies all frags have been posted
9332348ac89SShailend Chand 	 * but there might be pending completions.
9342348ac89SShailend Chand 	 */
9352348ac89SShailend Chand 	if (buf->num_nic_frags == 0 && buf->next_idx == 0)
9362348ac89SShailend Chand 		STAILQ_INSERT_TAIL(&rx->dqo.used_bufs, buf, stailq_entry);
9372348ac89SShailend Chand 
9382348ac89SShailend Chand 	if (is_last_frag) {
9392348ac89SShailend Chand 		gve_rx_input_mbuf_dqo(rx, compl_desc);
9402348ac89SShailend Chand 		(*work_done)++;
9412348ac89SShailend Chand 	}
9422348ac89SShailend Chand 	return;
9432348ac89SShailend Chand 
9442348ac89SShailend Chand drop_frag:
9452348ac89SShailend Chand 	/* Clear the earlier frags if there were any */
9462348ac89SShailend Chand 	m_freem(ctx->mbuf_head);
9472348ac89SShailend Chand 	rx->ctx = (struct gve_rx_ctx){};
9482348ac89SShailend Chand 	/* Drop the rest of the pkt if there are more frags */
9492348ac89SShailend Chand 	ctx->drop_pkt = true;
9502348ac89SShailend Chand 	/* Reuse the dropped frag's buffer */
9512348ac89SShailend Chand 	gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num);
9522348ac89SShailend Chand 
9532348ac89SShailend Chand 	if (is_last_frag)
9542348ac89SShailend Chand 		goto drop_frag_clear_ctx;
9552348ac89SShailend Chand 	return;
9562348ac89SShailend Chand 
9572348ac89SShailend Chand drop_frag_clear_ctx:
9582348ac89SShailend Chand 	counter_enter();
9592348ac89SShailend Chand 	counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
9602348ac89SShailend Chand 	counter_exit();
9612348ac89SShailend Chand 	m_freem(ctx->mbuf_head);
9622348ac89SShailend Chand 	rx->ctx = (struct gve_rx_ctx){};
9632348ac89SShailend Chand }
9642348ac89SShailend Chand 
965d438b4efSShailend Chand static bool
gve_rx_cleanup_dqo(struct gve_priv * priv,struct gve_rx_ring * rx,int budget)966d438b4efSShailend Chand gve_rx_cleanup_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, int budget)
967d438b4efSShailend Chand {
968d438b4efSShailend Chand 	struct gve_rx_compl_desc_dqo *compl_desc;
969d438b4efSShailend Chand 	uint32_t work_done = 0;
970d438b4efSShailend Chand 
971d438b4efSShailend Chand 	NET_EPOCH_ASSERT();
972d438b4efSShailend Chand 
973d438b4efSShailend Chand 	while (work_done < budget) {
974d438b4efSShailend Chand 		bus_dmamap_sync(rx->dqo.compl_ring_mem.tag, rx->dqo.compl_ring_mem.map,
975d438b4efSShailend Chand 		    BUS_DMASYNC_POSTREAD);
976d438b4efSShailend Chand 
977d438b4efSShailend Chand 		compl_desc = &rx->dqo.compl_ring[rx->dqo.tail];
978d438b4efSShailend Chand 		if (compl_desc->generation == rx->dqo.cur_gen_bit)
979d438b4efSShailend Chand 			break;
980d438b4efSShailend Chand 		/*
981d438b4efSShailend Chand 		 * Prevent generation bit from being read after the rest of the
982d438b4efSShailend Chand 		 * descriptor.
983d438b4efSShailend Chand 		 */
984031800c7SJasper Tran O'Leary 		atomic_thread_fence_acq();
985d438b4efSShailend Chand 
986d438b4efSShailend Chand 		rx->cnt++;
987d438b4efSShailend Chand 		rx->dqo.tail = (rx->dqo.tail + 1) & rx->dqo.mask;
988d438b4efSShailend Chand 		rx->dqo.cur_gen_bit ^= (rx->dqo.tail == 0);
989d438b4efSShailend Chand 
9902348ac89SShailend Chand 		if (gve_is_qpl(priv))
9912348ac89SShailend Chand 			gve_rx_dqo_qpl(priv, rx, compl_desc, &work_done);
9922348ac89SShailend Chand 		else
993d438b4efSShailend Chand 			gve_rx_dqo(priv, rx, compl_desc, &work_done);
994d438b4efSShailend Chand 	}
995d438b4efSShailend Chand 
996d438b4efSShailend Chand 	if (work_done != 0)
997d438b4efSShailend Chand 		tcp_lro_flush_all(&rx->lro);
998d438b4efSShailend Chand 
999d438b4efSShailend Chand 	gve_rx_post_buffers_dqo(rx, M_NOWAIT);
10002348ac89SShailend Chand 	if (gve_is_qpl(priv))
10012348ac89SShailend Chand 		gve_rx_maybe_extract_from_used_bufs(rx, /*just_one=*/false);
1002d438b4efSShailend Chand 	return (work_done == budget);
1003d438b4efSShailend Chand }
1004d438b4efSShailend Chand 
1005d438b4efSShailend Chand void
gve_rx_cleanup_tq_dqo(void * arg,int pending)1006d438b4efSShailend Chand gve_rx_cleanup_tq_dqo(void *arg, int pending)
1007d438b4efSShailend Chand {
1008d438b4efSShailend Chand 	struct gve_rx_ring *rx = arg;
1009d438b4efSShailend Chand 	struct gve_priv *priv = rx->com.priv;
1010d438b4efSShailend Chand 
1011d438b4efSShailend Chand 	if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
1012d438b4efSShailend Chand 		return;
1013d438b4efSShailend Chand 
1014d438b4efSShailend Chand 	if (gve_rx_cleanup_dqo(priv, rx, /*budget=*/64)) {
1015d438b4efSShailend Chand 		taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task);
1016d438b4efSShailend Chand 		return;
1017d438b4efSShailend Chand 	}
1018d438b4efSShailend Chand 
1019d438b4efSShailend Chand 	gve_db_bar_dqo_write_4(priv, rx->com.irq_db_offset,
1020d438b4efSShailend Chand 	    GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
1021d438b4efSShailend Chand }
1022