xref: /linux/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c (revision 60675d4ca1ef0857e44eba5849b74a3a998d0c0f)
193c68f12SHarshitha Ramamurthy // SPDX-License-Identifier: (GPL-2.0 OR MIT)
293c68f12SHarshitha Ramamurthy /* Google virtual Ethernet (gve) driver
393c68f12SHarshitha Ramamurthy  *
493c68f12SHarshitha Ramamurthy  * Copyright (C) 2015-2024 Google, Inc.
593c68f12SHarshitha Ramamurthy  */
693c68f12SHarshitha Ramamurthy 
793c68f12SHarshitha Ramamurthy #include "gve.h"
893c68f12SHarshitha Ramamurthy #include "gve_utils.h"
993c68f12SHarshitha Ramamurthy 
1093c68f12SHarshitha Ramamurthy int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
1193c68f12SHarshitha Ramamurthy {
1293c68f12SHarshitha Ramamurthy 	return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
1393c68f12SHarshitha Ramamurthy }
1493c68f12SHarshitha Ramamurthy 
1593c68f12SHarshitha Ramamurthy struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
1693c68f12SHarshitha Ramamurthy {
1793c68f12SHarshitha Ramamurthy 	struct gve_rx_buf_state_dqo *buf_state;
1893c68f12SHarshitha Ramamurthy 	s16 buffer_id;
1993c68f12SHarshitha Ramamurthy 
2093c68f12SHarshitha Ramamurthy 	buffer_id = rx->dqo.free_buf_states;
2193c68f12SHarshitha Ramamurthy 	if (unlikely(buffer_id == -1))
2293c68f12SHarshitha Ramamurthy 		return NULL;
2393c68f12SHarshitha Ramamurthy 
2493c68f12SHarshitha Ramamurthy 	buf_state = &rx->dqo.buf_states[buffer_id];
2593c68f12SHarshitha Ramamurthy 
2693c68f12SHarshitha Ramamurthy 	/* Remove buf_state from free list */
2793c68f12SHarshitha Ramamurthy 	rx->dqo.free_buf_states = buf_state->next;
2893c68f12SHarshitha Ramamurthy 
2993c68f12SHarshitha Ramamurthy 	/* Point buf_state to itself to mark it as allocated */
3093c68f12SHarshitha Ramamurthy 	buf_state->next = buffer_id;
3193c68f12SHarshitha Ramamurthy 
3293c68f12SHarshitha Ramamurthy 	return buf_state;
3393c68f12SHarshitha Ramamurthy }
3493c68f12SHarshitha Ramamurthy 
3593c68f12SHarshitha Ramamurthy bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
3693c68f12SHarshitha Ramamurthy 				struct gve_rx_buf_state_dqo *buf_state)
3793c68f12SHarshitha Ramamurthy {
3893c68f12SHarshitha Ramamurthy 	s16 buffer_id = buf_state - rx->dqo.buf_states;
3993c68f12SHarshitha Ramamurthy 
4093c68f12SHarshitha Ramamurthy 	return buf_state->next == buffer_id;
4193c68f12SHarshitha Ramamurthy }
4293c68f12SHarshitha Ramamurthy 
4393c68f12SHarshitha Ramamurthy void gve_free_buf_state(struct gve_rx_ring *rx,
4493c68f12SHarshitha Ramamurthy 			struct gve_rx_buf_state_dqo *buf_state)
4593c68f12SHarshitha Ramamurthy {
4693c68f12SHarshitha Ramamurthy 	s16 buffer_id = buf_state - rx->dqo.buf_states;
4793c68f12SHarshitha Ramamurthy 
4893c68f12SHarshitha Ramamurthy 	buf_state->next = rx->dqo.free_buf_states;
4993c68f12SHarshitha Ramamurthy 	rx->dqo.free_buf_states = buffer_id;
5093c68f12SHarshitha Ramamurthy }
5193c68f12SHarshitha Ramamurthy 
5293c68f12SHarshitha Ramamurthy struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
5393c68f12SHarshitha Ramamurthy 						   struct gve_index_list *list)
5493c68f12SHarshitha Ramamurthy {
5593c68f12SHarshitha Ramamurthy 	struct gve_rx_buf_state_dqo *buf_state;
5693c68f12SHarshitha Ramamurthy 	s16 buffer_id;
5793c68f12SHarshitha Ramamurthy 
5893c68f12SHarshitha Ramamurthy 	buffer_id = list->head;
5993c68f12SHarshitha Ramamurthy 	if (unlikely(buffer_id == -1))
6093c68f12SHarshitha Ramamurthy 		return NULL;
6193c68f12SHarshitha Ramamurthy 
6293c68f12SHarshitha Ramamurthy 	buf_state = &rx->dqo.buf_states[buffer_id];
6393c68f12SHarshitha Ramamurthy 
6493c68f12SHarshitha Ramamurthy 	/* Remove buf_state from list */
6593c68f12SHarshitha Ramamurthy 	list->head = buf_state->next;
6693c68f12SHarshitha Ramamurthy 	if (buf_state->next == -1)
6793c68f12SHarshitha Ramamurthy 		list->tail = -1;
6893c68f12SHarshitha Ramamurthy 
6993c68f12SHarshitha Ramamurthy 	/* Point buf_state to itself to mark it as allocated */
7093c68f12SHarshitha Ramamurthy 	buf_state->next = buffer_id;
7193c68f12SHarshitha Ramamurthy 
7293c68f12SHarshitha Ramamurthy 	return buf_state;
7393c68f12SHarshitha Ramamurthy }
7493c68f12SHarshitha Ramamurthy 
7593c68f12SHarshitha Ramamurthy void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
7693c68f12SHarshitha Ramamurthy 			   struct gve_rx_buf_state_dqo *buf_state)
7793c68f12SHarshitha Ramamurthy {
7893c68f12SHarshitha Ramamurthy 	s16 buffer_id = buf_state - rx->dqo.buf_states;
7993c68f12SHarshitha Ramamurthy 
8093c68f12SHarshitha Ramamurthy 	buf_state->next = -1;
8193c68f12SHarshitha Ramamurthy 
8293c68f12SHarshitha Ramamurthy 	if (list->head == -1) {
8393c68f12SHarshitha Ramamurthy 		list->head = buffer_id;
8493c68f12SHarshitha Ramamurthy 		list->tail = buffer_id;
8593c68f12SHarshitha Ramamurthy 	} else {
8693c68f12SHarshitha Ramamurthy 		int tail = list->tail;
8793c68f12SHarshitha Ramamurthy 
8893c68f12SHarshitha Ramamurthy 		rx->dqo.buf_states[tail].next = buffer_id;
8993c68f12SHarshitha Ramamurthy 		list->tail = buffer_id;
9093c68f12SHarshitha Ramamurthy 	}
9193c68f12SHarshitha Ramamurthy }
9293c68f12SHarshitha Ramamurthy 
9393c68f12SHarshitha Ramamurthy struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
9493c68f12SHarshitha Ramamurthy {
9593c68f12SHarshitha Ramamurthy 	struct gve_rx_buf_state_dqo *buf_state;
9693c68f12SHarshitha Ramamurthy 	int i;
9793c68f12SHarshitha Ramamurthy 
9893c68f12SHarshitha Ramamurthy 	/* Recycled buf states are immediately usable. */
9993c68f12SHarshitha Ramamurthy 	buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
10093c68f12SHarshitha Ramamurthy 	if (likely(buf_state))
10193c68f12SHarshitha Ramamurthy 		return buf_state;
10293c68f12SHarshitha Ramamurthy 
10393c68f12SHarshitha Ramamurthy 	if (unlikely(rx->dqo.used_buf_states.head == -1))
10493c68f12SHarshitha Ramamurthy 		return NULL;
10593c68f12SHarshitha Ramamurthy 
10693c68f12SHarshitha Ramamurthy 	/* Used buf states are only usable when ref count reaches 0, which means
10793c68f12SHarshitha Ramamurthy 	 * no SKBs refer to them.
10893c68f12SHarshitha Ramamurthy 	 *
10993c68f12SHarshitha Ramamurthy 	 * Search a limited number before giving up.
11093c68f12SHarshitha Ramamurthy 	 */
11193c68f12SHarshitha Ramamurthy 	for (i = 0; i < 5; i++) {
11293c68f12SHarshitha Ramamurthy 		buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
11393c68f12SHarshitha Ramamurthy 		if (gve_buf_ref_cnt(buf_state) == 0) {
11493c68f12SHarshitha Ramamurthy 			rx->dqo.used_buf_states_cnt--;
11593c68f12SHarshitha Ramamurthy 			return buf_state;
11693c68f12SHarshitha Ramamurthy 		}
11793c68f12SHarshitha Ramamurthy 
11893c68f12SHarshitha Ramamurthy 		gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
11993c68f12SHarshitha Ramamurthy 	}
12093c68f12SHarshitha Ramamurthy 
12193c68f12SHarshitha Ramamurthy 	return NULL;
12293c68f12SHarshitha Ramamurthy }
12393c68f12SHarshitha Ramamurthy 
124ebdfae0dSHarshitha Ramamurthy int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
12593c68f12SHarshitha Ramamurthy 			   struct gve_rx_buf_state_dqo *buf_state)
12693c68f12SHarshitha Ramamurthy {
12793c68f12SHarshitha Ramamurthy 	struct gve_priv *priv = rx->gve;
12893c68f12SHarshitha Ramamurthy 	u32 idx;
12993c68f12SHarshitha Ramamurthy 
13093c68f12SHarshitha Ramamurthy 	idx = rx->dqo.next_qpl_page_idx;
13193c68f12SHarshitha Ramamurthy 	if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
13293c68f12SHarshitha Ramamurthy 		net_err_ratelimited("%s: Out of QPL pages\n",
13393c68f12SHarshitha Ramamurthy 				    priv->dev->name);
13493c68f12SHarshitha Ramamurthy 		return -ENOMEM;
13593c68f12SHarshitha Ramamurthy 	}
13693c68f12SHarshitha Ramamurthy 	buf_state->page_info.page = rx->dqo.qpl->pages[idx];
13793c68f12SHarshitha Ramamurthy 	buf_state->addr = rx->dqo.qpl->page_buses[idx];
13893c68f12SHarshitha Ramamurthy 	rx->dqo.next_qpl_page_idx++;
13993c68f12SHarshitha Ramamurthy 	buf_state->page_info.page_offset = 0;
14093c68f12SHarshitha Ramamurthy 	buf_state->page_info.page_address =
14193c68f12SHarshitha Ramamurthy 		page_address(buf_state->page_info.page);
142ebdfae0dSHarshitha Ramamurthy 	buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
14393c68f12SHarshitha Ramamurthy 	buf_state->last_single_ref_offset = 0;
14493c68f12SHarshitha Ramamurthy 
14593c68f12SHarshitha Ramamurthy 	/* The page already has 1 ref. */
14693c68f12SHarshitha Ramamurthy 	page_ref_add(buf_state->page_info.page, INT_MAX - 1);
14793c68f12SHarshitha Ramamurthy 	buf_state->page_info.pagecnt_bias = INT_MAX;
14893c68f12SHarshitha Ramamurthy 
14993c68f12SHarshitha Ramamurthy 	return 0;
15093c68f12SHarshitha Ramamurthy }
15193c68f12SHarshitha Ramamurthy 
152ebdfae0dSHarshitha Ramamurthy void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
153ebdfae0dSHarshitha Ramamurthy {
154ebdfae0dSHarshitha Ramamurthy 	if (!buf_state->page_info.page)
155ebdfae0dSHarshitha Ramamurthy 		return;
156ebdfae0dSHarshitha Ramamurthy 
157ebdfae0dSHarshitha Ramamurthy 	page_ref_sub(buf_state->page_info.page,
158ebdfae0dSHarshitha Ramamurthy 		     buf_state->page_info.pagecnt_bias - 1);
159ebdfae0dSHarshitha Ramamurthy 	buf_state->page_info.page = NULL;
160ebdfae0dSHarshitha Ramamurthy }
161ebdfae0dSHarshitha Ramamurthy 
16293c68f12SHarshitha Ramamurthy void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
16393c68f12SHarshitha Ramamurthy 			 struct gve_rx_buf_state_dqo *buf_state)
16493c68f12SHarshitha Ramamurthy {
16593c68f12SHarshitha Ramamurthy 	const u16 data_buffer_size = priv->data_buffer_size_dqo;
16693c68f12SHarshitha Ramamurthy 	int pagecount;
16793c68f12SHarshitha Ramamurthy 
16893c68f12SHarshitha Ramamurthy 	/* Can't reuse if we only fit one buffer per page */
16993c68f12SHarshitha Ramamurthy 	if (data_buffer_size * 2 > PAGE_SIZE)
17093c68f12SHarshitha Ramamurthy 		goto mark_used;
17193c68f12SHarshitha Ramamurthy 
17293c68f12SHarshitha Ramamurthy 	pagecount = gve_buf_ref_cnt(buf_state);
17393c68f12SHarshitha Ramamurthy 
17493c68f12SHarshitha Ramamurthy 	/* Record the offset when we have a single remaining reference.
17593c68f12SHarshitha Ramamurthy 	 *
17693c68f12SHarshitha Ramamurthy 	 * When this happens, we know all of the other offsets of the page are
17793c68f12SHarshitha Ramamurthy 	 * usable.
17893c68f12SHarshitha Ramamurthy 	 */
17993c68f12SHarshitha Ramamurthy 	if (pagecount == 1) {
18093c68f12SHarshitha Ramamurthy 		buf_state->last_single_ref_offset =
18193c68f12SHarshitha Ramamurthy 			buf_state->page_info.page_offset;
18293c68f12SHarshitha Ramamurthy 	}
18393c68f12SHarshitha Ramamurthy 
18493c68f12SHarshitha Ramamurthy 	/* Use the next buffer sized chunk in the page. */
18593c68f12SHarshitha Ramamurthy 	buf_state->page_info.page_offset += data_buffer_size;
18693c68f12SHarshitha Ramamurthy 	buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
18793c68f12SHarshitha Ramamurthy 
18893c68f12SHarshitha Ramamurthy 	/* If we wrap around to the same offset without ever dropping to 1
18993c68f12SHarshitha Ramamurthy 	 * reference, then we don't know if this offset was ever freed.
19093c68f12SHarshitha Ramamurthy 	 */
19193c68f12SHarshitha Ramamurthy 	if (buf_state->page_info.page_offset ==
19293c68f12SHarshitha Ramamurthy 	    buf_state->last_single_ref_offset) {
19393c68f12SHarshitha Ramamurthy 		goto mark_used;
19493c68f12SHarshitha Ramamurthy 	}
19593c68f12SHarshitha Ramamurthy 
19693c68f12SHarshitha Ramamurthy 	gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
19793c68f12SHarshitha Ramamurthy 	return;
19893c68f12SHarshitha Ramamurthy 
19993c68f12SHarshitha Ramamurthy mark_used:
20093c68f12SHarshitha Ramamurthy 	gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
20193c68f12SHarshitha Ramamurthy 	rx->dqo.used_buf_states_cnt++;
20293c68f12SHarshitha Ramamurthy }
203ebdfae0dSHarshitha Ramamurthy 
204ebdfae0dSHarshitha Ramamurthy void gve_free_to_page_pool(struct gve_rx_ring *rx,
205ebdfae0dSHarshitha Ramamurthy 			   struct gve_rx_buf_state_dqo *buf_state,
206ebdfae0dSHarshitha Ramamurthy 			   bool allow_direct)
207ebdfae0dSHarshitha Ramamurthy {
208ebdfae0dSHarshitha Ramamurthy 	struct page *page = buf_state->page_info.page;
209ebdfae0dSHarshitha Ramamurthy 
210ebdfae0dSHarshitha Ramamurthy 	if (!page)
211ebdfae0dSHarshitha Ramamurthy 		return;
212ebdfae0dSHarshitha Ramamurthy 
213*4ddf7ccfSHarshitha Ramamurthy 	page_pool_put_full_page(page->pp, page, allow_direct);
214ebdfae0dSHarshitha Ramamurthy 	buf_state->page_info.page = NULL;
215ebdfae0dSHarshitha Ramamurthy }
216ebdfae0dSHarshitha Ramamurthy 
217ebdfae0dSHarshitha Ramamurthy static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
218ebdfae0dSHarshitha Ramamurthy 				    struct gve_rx_buf_state_dqo *buf_state)
219ebdfae0dSHarshitha Ramamurthy {
220ebdfae0dSHarshitha Ramamurthy 	struct gve_priv *priv = rx->gve;
221ebdfae0dSHarshitha Ramamurthy 	struct page *page;
222ebdfae0dSHarshitha Ramamurthy 
223ebdfae0dSHarshitha Ramamurthy 	buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
224ebdfae0dSHarshitha Ramamurthy 	page = page_pool_alloc(rx->dqo.page_pool,
225ebdfae0dSHarshitha Ramamurthy 			       &buf_state->page_info.page_offset,
226ebdfae0dSHarshitha Ramamurthy 			       &buf_state->page_info.buf_size, GFP_ATOMIC);
227ebdfae0dSHarshitha Ramamurthy 
228ebdfae0dSHarshitha Ramamurthy 	if (!page)
229ebdfae0dSHarshitha Ramamurthy 		return -ENOMEM;
230ebdfae0dSHarshitha Ramamurthy 
231ebdfae0dSHarshitha Ramamurthy 	buf_state->page_info.page = page;
232ebdfae0dSHarshitha Ramamurthy 	buf_state->page_info.page_address = page_address(page);
233ebdfae0dSHarshitha Ramamurthy 	buf_state->addr = page_pool_get_dma_addr(page);
234ebdfae0dSHarshitha Ramamurthy 
235ebdfae0dSHarshitha Ramamurthy 	return 0;
236ebdfae0dSHarshitha Ramamurthy }
237ebdfae0dSHarshitha Ramamurthy 
238ebdfae0dSHarshitha Ramamurthy struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
239ebdfae0dSHarshitha Ramamurthy 					  struct gve_rx_ring *rx)
240ebdfae0dSHarshitha Ramamurthy {
241ebdfae0dSHarshitha Ramamurthy 	u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
242ebdfae0dSHarshitha Ramamurthy 	struct page_pool_params pp = {
243ebdfae0dSHarshitha Ramamurthy 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
244ebdfae0dSHarshitha Ramamurthy 		.order = 0,
245ebdfae0dSHarshitha Ramamurthy 		.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
246ebdfae0dSHarshitha Ramamurthy 		.dev = &priv->pdev->dev,
247ebdfae0dSHarshitha Ramamurthy 		.netdev = priv->dev,
248ebdfae0dSHarshitha Ramamurthy 		.napi = &priv->ntfy_blocks[ntfy_id].napi,
249ebdfae0dSHarshitha Ramamurthy 		.max_len = PAGE_SIZE,
250ebdfae0dSHarshitha Ramamurthy 		.dma_dir = DMA_FROM_DEVICE,
251ebdfae0dSHarshitha Ramamurthy 	};
252ebdfae0dSHarshitha Ramamurthy 
253ebdfae0dSHarshitha Ramamurthy 	return page_pool_create(&pp);
254ebdfae0dSHarshitha Ramamurthy }
255ebdfae0dSHarshitha Ramamurthy 
256ebdfae0dSHarshitha Ramamurthy void gve_free_buffer(struct gve_rx_ring *rx,
257ebdfae0dSHarshitha Ramamurthy 		     struct gve_rx_buf_state_dqo *buf_state)
258ebdfae0dSHarshitha Ramamurthy {
259ebdfae0dSHarshitha Ramamurthy 	if (rx->dqo.page_pool) {
260ebdfae0dSHarshitha Ramamurthy 		gve_free_to_page_pool(rx, buf_state, true);
261ebdfae0dSHarshitha Ramamurthy 		gve_free_buf_state(rx, buf_state);
262ebdfae0dSHarshitha Ramamurthy 	} else {
263ebdfae0dSHarshitha Ramamurthy 		gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
264ebdfae0dSHarshitha Ramamurthy 				      buf_state);
265ebdfae0dSHarshitha Ramamurthy 	}
266ebdfae0dSHarshitha Ramamurthy }
267ebdfae0dSHarshitha Ramamurthy 
268ebdfae0dSHarshitha Ramamurthy void gve_reuse_buffer(struct gve_rx_ring *rx,
269ebdfae0dSHarshitha Ramamurthy 		      struct gve_rx_buf_state_dqo *buf_state)
270ebdfae0dSHarshitha Ramamurthy {
271ebdfae0dSHarshitha Ramamurthy 	if (rx->dqo.page_pool) {
272ebdfae0dSHarshitha Ramamurthy 		buf_state->page_info.page = NULL;
273ebdfae0dSHarshitha Ramamurthy 		gve_free_buf_state(rx, buf_state);
274ebdfae0dSHarshitha Ramamurthy 	} else {
275ebdfae0dSHarshitha Ramamurthy 		gve_dec_pagecnt_bias(&buf_state->page_info);
276ebdfae0dSHarshitha Ramamurthy 		gve_try_recycle_buf(rx->gve, rx, buf_state);
277ebdfae0dSHarshitha Ramamurthy 	}
278ebdfae0dSHarshitha Ramamurthy }
279ebdfae0dSHarshitha Ramamurthy 
280ebdfae0dSHarshitha Ramamurthy int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
281ebdfae0dSHarshitha Ramamurthy {
282ebdfae0dSHarshitha Ramamurthy 	struct gve_rx_buf_state_dqo *buf_state;
283ebdfae0dSHarshitha Ramamurthy 
284ebdfae0dSHarshitha Ramamurthy 	if (rx->dqo.page_pool) {
285ebdfae0dSHarshitha Ramamurthy 		buf_state = gve_alloc_buf_state(rx);
286ebdfae0dSHarshitha Ramamurthy 		if (WARN_ON_ONCE(!buf_state))
287ebdfae0dSHarshitha Ramamurthy 			return -ENOMEM;
288ebdfae0dSHarshitha Ramamurthy 
289ebdfae0dSHarshitha Ramamurthy 		if (gve_alloc_from_page_pool(rx, buf_state))
290ebdfae0dSHarshitha Ramamurthy 			goto free_buf_state;
291ebdfae0dSHarshitha Ramamurthy 	} else {
292ebdfae0dSHarshitha Ramamurthy 		buf_state = gve_get_recycled_buf_state(rx);
293ebdfae0dSHarshitha Ramamurthy 		if (unlikely(!buf_state)) {
294ebdfae0dSHarshitha Ramamurthy 			buf_state = gve_alloc_buf_state(rx);
295ebdfae0dSHarshitha Ramamurthy 			if (unlikely(!buf_state))
296ebdfae0dSHarshitha Ramamurthy 				return -ENOMEM;
297ebdfae0dSHarshitha Ramamurthy 
298ebdfae0dSHarshitha Ramamurthy 			if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
299ebdfae0dSHarshitha Ramamurthy 				goto free_buf_state;
300ebdfae0dSHarshitha Ramamurthy 		}
301ebdfae0dSHarshitha Ramamurthy 	}
302ebdfae0dSHarshitha Ramamurthy 	desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
303ebdfae0dSHarshitha Ramamurthy 	desc->buf_addr = cpu_to_le64(buf_state->addr +
304ebdfae0dSHarshitha Ramamurthy 				     buf_state->page_info.page_offset);
305ebdfae0dSHarshitha Ramamurthy 
306ebdfae0dSHarshitha Ramamurthy 	return 0;
307ebdfae0dSHarshitha Ramamurthy 
308ebdfae0dSHarshitha Ramamurthy free_buf_state:
309ebdfae0dSHarshitha Ramamurthy 	gve_free_buf_state(rx, buf_state);
310ebdfae0dSHarshitha Ramamurthy 	return -ENOMEM;
311ebdfae0dSHarshitha Ramamurthy }
312