xref: /linux/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google, Inc.
5  */
6 
7 #include "gve.h"
8 #include "gve_utils.h"
9 
gve_buf_ref_cnt(struct gve_rx_buf_state_dqo * bs)10 int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
11 {
12 	return page_count(bs->page_info.page) - bs->page_info.pagecnt_bias;
13 }
14 
gve_alloc_buf_state(struct gve_rx_ring * rx)15 struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
16 {
17 	struct gve_rx_buf_state_dqo *buf_state;
18 	s16 buffer_id;
19 
20 	buffer_id = rx->dqo.free_buf_states;
21 	if (unlikely(buffer_id == -1))
22 		return NULL;
23 
24 	buf_state = &rx->dqo.buf_states[buffer_id];
25 
26 	/* Remove buf_state from free list */
27 	rx->dqo.free_buf_states = buf_state->next;
28 
29 	/* Point buf_state to itself to mark it as allocated */
30 	buf_state->next = buffer_id;
31 
32 	return buf_state;
33 }
34 
gve_buf_state_is_allocated(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)35 bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
36 				struct gve_rx_buf_state_dqo *buf_state)
37 {
38 	s16 buffer_id = buf_state - rx->dqo.buf_states;
39 
40 	return buf_state->next == buffer_id;
41 }
42 
gve_free_buf_state(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)43 void gve_free_buf_state(struct gve_rx_ring *rx,
44 			struct gve_rx_buf_state_dqo *buf_state)
45 {
46 	s16 buffer_id = buf_state - rx->dqo.buf_states;
47 
48 	buf_state->next = rx->dqo.free_buf_states;
49 	rx->dqo.free_buf_states = buffer_id;
50 }
51 
gve_dequeue_buf_state(struct gve_rx_ring * rx,struct gve_index_list * list)52 struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
53 						   struct gve_index_list *list)
54 {
55 	struct gve_rx_buf_state_dqo *buf_state;
56 	s16 buffer_id;
57 
58 	buffer_id = list->head;
59 	if (unlikely(buffer_id == -1))
60 		return NULL;
61 
62 	buf_state = &rx->dqo.buf_states[buffer_id];
63 
64 	/* Remove buf_state from list */
65 	list->head = buf_state->next;
66 	if (buf_state->next == -1)
67 		list->tail = -1;
68 
69 	/* Point buf_state to itself to mark it as allocated */
70 	buf_state->next = buffer_id;
71 
72 	return buf_state;
73 }
74 
gve_enqueue_buf_state(struct gve_rx_ring * rx,struct gve_index_list * list,struct gve_rx_buf_state_dqo * buf_state)75 void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
76 			   struct gve_rx_buf_state_dqo *buf_state)
77 {
78 	s16 buffer_id = buf_state - rx->dqo.buf_states;
79 
80 	buf_state->next = -1;
81 
82 	if (list->head == -1) {
83 		list->head = buffer_id;
84 		list->tail = buffer_id;
85 	} else {
86 		int tail = list->tail;
87 
88 		rx->dqo.buf_states[tail].next = buffer_id;
89 		list->tail = buffer_id;
90 	}
91 }
92 
gve_get_recycled_buf_state(struct gve_rx_ring * rx)93 struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
94 {
95 	struct gve_rx_buf_state_dqo *buf_state;
96 	int i;
97 
98 	/* Recycled buf states are immediately usable. */
99 	buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states);
100 	if (likely(buf_state))
101 		return buf_state;
102 
103 	if (unlikely(rx->dqo.used_buf_states.head == -1))
104 		return NULL;
105 
106 	/* Used buf states are only usable when ref count reaches 0, which means
107 	 * no SKBs refer to them.
108 	 *
109 	 * Search a limited number before giving up.
110 	 */
111 	for (i = 0; i < 5; i++) {
112 		buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
113 		if (gve_buf_ref_cnt(buf_state) == 0) {
114 			rx->dqo.used_buf_states_cnt--;
115 			return buf_state;
116 		}
117 
118 		gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
119 	}
120 
121 	return NULL;
122 }
123 
gve_alloc_qpl_page_dqo(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)124 int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
125 			   struct gve_rx_buf_state_dqo *buf_state)
126 {
127 	struct gve_priv *priv = rx->gve;
128 	u32 idx;
129 
130 	idx = rx->dqo.next_qpl_page_idx;
131 	if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
132 		net_err_ratelimited("%s: Out of QPL pages\n",
133 				    priv->dev->name);
134 		return -ENOMEM;
135 	}
136 	buf_state->page_info.page = rx->dqo.qpl->pages[idx];
137 	buf_state->addr = rx->dqo.qpl->page_buses[idx];
138 	rx->dqo.next_qpl_page_idx++;
139 	buf_state->page_info.page_offset = 0;
140 	buf_state->page_info.page_address =
141 		page_address(buf_state->page_info.page);
142 	buf_state->page_info.buf_size = rx->packet_buffer_truesize;
143 	buf_state->page_info.pad = rx->rx_headroom;
144 	buf_state->last_single_ref_offset = 0;
145 
146 	/* The page already has 1 ref. */
147 	page_ref_add(buf_state->page_info.page, INT_MAX - 1);
148 	buf_state->page_info.pagecnt_bias = INT_MAX;
149 
150 	return 0;
151 }
152 
gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo * buf_state)153 void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
154 {
155 	if (!buf_state->page_info.page)
156 		return;
157 
158 	page_ref_sub(buf_state->page_info.page,
159 		     buf_state->page_info.pagecnt_bias - 1);
160 	buf_state->page_info.page = NULL;
161 }
162 
gve_try_recycle_buf(struct gve_priv * priv,struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)163 void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
164 			 struct gve_rx_buf_state_dqo *buf_state)
165 {
166 	const u16 data_buffer_size = rx->packet_buffer_truesize;
167 	int pagecount;
168 
169 	/* Can't reuse if we only fit one buffer per page */
170 	if (data_buffer_size * 2 > PAGE_SIZE)
171 		goto mark_used;
172 
173 	pagecount = gve_buf_ref_cnt(buf_state);
174 
175 	/* Record the offset when we have a single remaining reference.
176 	 *
177 	 * When this happens, we know all of the other offsets of the page are
178 	 * usable.
179 	 */
180 	if (pagecount == 1) {
181 		buf_state->last_single_ref_offset =
182 			buf_state->page_info.page_offset;
183 	}
184 
185 	/* Use the next buffer sized chunk in the page. */
186 	buf_state->page_info.page_offset += data_buffer_size;
187 	buf_state->page_info.page_offset &= (PAGE_SIZE - 1);
188 
189 	/* If we wrap around to the same offset without ever dropping to 1
190 	 * reference, then we don't know if this offset was ever freed.
191 	 */
192 	if (buf_state->page_info.page_offset ==
193 	    buf_state->last_single_ref_offset) {
194 		goto mark_used;
195 	}
196 
197 	gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
198 	return;
199 
200 mark_used:
201 	gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
202 	rx->dqo.used_buf_states_cnt++;
203 }
204 
gve_free_to_page_pool(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state,bool allow_direct)205 void gve_free_to_page_pool(struct gve_rx_ring *rx,
206 			   struct gve_rx_buf_state_dqo *buf_state,
207 			   bool allow_direct)
208 {
209 	netmem_ref netmem = buf_state->page_info.netmem;
210 
211 	if (!netmem)
212 		return;
213 
214 	page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
215 	buf_state->page_info.netmem = 0;
216 }
217 
gve_alloc_from_page_pool(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)218 static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
219 				    struct gve_rx_buf_state_dqo *buf_state)
220 {
221 	netmem_ref netmem;
222 
223 	buf_state->page_info.buf_size = rx->packet_buffer_truesize;
224 	netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
225 					&buf_state->page_info.page_offset,
226 					&buf_state->page_info.buf_size,
227 					GFP_ATOMIC);
228 
229 	if (!netmem)
230 		return -ENOMEM;
231 
232 	buf_state->page_info.netmem = netmem;
233 	buf_state->page_info.page_address = netmem_address(netmem);
234 	buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
235 	buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
236 
237 	return 0;
238 }
239 
gve_rx_create_page_pool(struct gve_priv * priv,struct gve_rx_ring * rx,bool xdp)240 struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
241 					  struct gve_rx_ring *rx,
242 					  bool xdp)
243 {
244 	u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
245 	struct page_pool_params pp = {
246 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
247 		.order = 0,
248 		.pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv->rx_desc_cnt,
249 		.dev = &priv->pdev->dev,
250 		.netdev = priv->dev,
251 		.napi = &priv->ntfy_blocks[ntfy_id].napi,
252 		.max_len = PAGE_SIZE,
253 		.dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
254 		.offset = xdp ? XDP_PACKET_HEADROOM : 0,
255 	};
256 
257 	return page_pool_create(&pp);
258 }
259 
gve_free_buffer(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)260 void gve_free_buffer(struct gve_rx_ring *rx,
261 		     struct gve_rx_buf_state_dqo *buf_state)
262 {
263 	if (rx->dqo.page_pool) {
264 		gve_free_to_page_pool(rx, buf_state, true);
265 		gve_free_buf_state(rx, buf_state);
266 	} else {
267 		gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
268 				      buf_state);
269 	}
270 }
271 
gve_reuse_buffer(struct gve_rx_ring * rx,struct gve_rx_buf_state_dqo * buf_state)272 void gve_reuse_buffer(struct gve_rx_ring *rx,
273 		      struct gve_rx_buf_state_dqo *buf_state)
274 {
275 	if (rx->dqo.page_pool) {
276 		buf_state->page_info.netmem = 0;
277 		gve_free_buf_state(rx, buf_state);
278 	} else {
279 		gve_dec_pagecnt_bias(&buf_state->page_info);
280 		gve_try_recycle_buf(rx->gve, rx, buf_state);
281 	}
282 }
283 
gve_alloc_buffer(struct gve_rx_ring * rx,struct gve_rx_desc_dqo * desc)284 int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
285 {
286 	struct gve_rx_buf_state_dqo *buf_state;
287 
288 	if (rx->dqo.page_pool) {
289 		buf_state = gve_alloc_buf_state(rx);
290 		if (WARN_ON_ONCE(!buf_state))
291 			return -ENOMEM;
292 
293 		if (gve_alloc_from_page_pool(rx, buf_state))
294 			goto free_buf_state;
295 	} else {
296 		buf_state = gve_get_recycled_buf_state(rx);
297 		if (unlikely(!buf_state)) {
298 			buf_state = gve_alloc_buf_state(rx);
299 			if (unlikely(!buf_state))
300 				return -ENOMEM;
301 
302 			if (unlikely(gve_alloc_qpl_page_dqo(rx, buf_state)))
303 				goto free_buf_state;
304 		}
305 	}
306 	desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
307 	desc->buf_addr = cpu_to_le64(buf_state->addr +
308 				     buf_state->page_info.page_offset +
309 				     buf_state->page_info.pad);
310 
311 	return 0;
312 
313 free_buf_state:
314 	gve_free_buf_state(rx, buf_state);
315 	return -ENOMEM;
316 }
317