xref: /freebsd/sys/dev/gve/gve_qpl.c (revision b1879975794772ee51f0b4865753364c7d7626c3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include <sys/malloc.h>
32 
33 #include "gve.h"
34 #include "gve_adminq.h"
35 #include "gve_dqo.h"
36 
37 static MALLOC_DEFINE(M_GVE_QPL, "gve qpl", "gve qpl allocations");
38 
39 static uint32_t
40 gve_num_tx_qpls(struct gve_priv *priv)
41 {
42 	if (!gve_is_qpl(priv))
43 		return (0);
44 
45 	return (priv->tx_cfg.max_queues);
46 }
47 
48 static uint32_t
49 gve_num_rx_qpls(struct gve_priv *priv)
50 {
51 	if (!gve_is_qpl(priv))
52 		return (0);
53 
54 	return (priv->rx_cfg.max_queues);
55 }
56 
57 static void
58 gve_free_qpl(struct gve_priv *priv, uint32_t id)
59 {
60 	struct gve_queue_page_list *qpl = &priv->qpls[id];
61 	int i;
62 
63 	for (i = 0; i < qpl->num_dmas; i++) {
64 		gve_dmamap_destroy(&qpl->dmas[i]);
65 	}
66 
67 	if (qpl->kva) {
68 		pmap_qremove(qpl->kva, qpl->num_pages);
69 		kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages);
70 	}
71 
72 	for (i = 0; i < qpl->num_pages; i++) {
73 		/*
74 		 * Free the page only if this is the last ref.
75 		 * Tx pages are known to have no other refs at
76 		 * this point, but Rx pages might still be in
77 		 * use by the networking stack, see gve_mextadd_free.
78 		 */
79 		if (vm_page_unwire_noq(qpl->pages[i])) {
80 			if (!qpl->kva) {
81 				pmap_qremove((vm_offset_t)qpl->dmas[i].cpu_addr, 1);
82 				kva_free((vm_offset_t)qpl->dmas[i].cpu_addr, PAGE_SIZE);
83 			}
84 			vm_page_free(qpl->pages[i]);
85 		}
86 
87 		priv->num_registered_pages--;
88 	}
89 
90 	if (qpl->pages != NULL)
91 		free(qpl->pages, M_GVE_QPL);
92 
93 	if (qpl->dmas != NULL)
94 		free(qpl->dmas, M_GVE_QPL);
95 }
96 
97 static int
98 gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
99 {
100 	struct gve_queue_page_list *qpl = &priv->qpls[id];
101 	int err;
102 	int i;
103 
104 	if (npages + priv->num_registered_pages > priv->max_registered_pages) {
105 		device_printf(priv->dev, "Reached max number of registered pages %ju > %ju\n",
106 		    (uintmax_t)npages + priv->num_registered_pages,
107 		    (uintmax_t)priv->max_registered_pages);
108 		return (EINVAL);
109 	}
110 
111 	qpl->id = id;
112 	qpl->num_pages = 0;
113 	qpl->num_dmas = 0;
114 
115 	qpl->dmas = malloc(npages * sizeof(*qpl->dmas), M_GVE_QPL,
116 	    M_WAITOK | M_ZERO);
117 
118 	qpl->pages = malloc(npages * sizeof(*qpl->pages), M_GVE_QPL,
119 	    M_WAITOK | M_ZERO);
120 
121 	qpl->kva = 0;
122 	if (single_kva) {
123 		qpl->kva = kva_alloc(PAGE_SIZE * npages);
124 		if (!qpl->kva) {
125 			device_printf(priv->dev, "Failed to create the single kva for QPL %d\n", id);
126 			err = ENOMEM;
127 			goto abort;
128 		}
129 	}
130 
131 	for (i = 0; i < npages; i++) {
132 		qpl->pages[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED |
133 						    VM_ALLOC_WAITOK |
134 						    VM_ALLOC_ZERO);
135 
136 		if (!single_kva) {
137 			qpl->dmas[i].cpu_addr = (void *)kva_alloc(PAGE_SIZE);
138 			if (!qpl->dmas[i].cpu_addr) {
139 				device_printf(priv->dev, "Failed to create kva for page %d in QPL %d", i, id);
140 				err = ENOMEM;
141 				goto abort;
142 			}
143 			pmap_qenter((vm_offset_t)qpl->dmas[i].cpu_addr, &(qpl->pages[i]), 1);
144 		} else
145 			qpl->dmas[i].cpu_addr = (void *)(qpl->kva + (PAGE_SIZE * i));
146 
147 
148 		qpl->num_pages++;
149 	}
150 
151 	if (single_kva)
152 		pmap_qenter(qpl->kva, qpl->pages, npages);
153 
154 	for (i = 0; i < npages; i++) {
155 		err = gve_dmamap_create(priv, /*size=*/PAGE_SIZE, /*align=*/PAGE_SIZE,
156 		    &qpl->dmas[i]);
157 		if (err != 0) {
158 			device_printf(priv->dev, "Failed to dma-map page %d in QPL %d\n", i, id);
159 			goto abort;
160 		}
161 
162 		qpl->num_dmas++;
163 		priv->num_registered_pages++;
164 	}
165 
166 	return (0);
167 
168 abort:
169 	gve_free_qpl(priv, id);
170 	return (err);
171 }
172 
173 void
174 gve_free_qpls(struct gve_priv *priv)
175 {
176 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
177 	int i;
178 
179 	if (num_qpls == 0)
180 		return;
181 
182 	if (priv->qpls != NULL) {
183 		for (i = 0; i < num_qpls; i++)
184 			gve_free_qpl(priv, i);
185 		free(priv->qpls, M_GVE_QPL);
186 		priv->qpls = NULL;
187 	}
188 }
189 
190 int gve_alloc_qpls(struct gve_priv *priv)
191 {
192 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
193 	int num_pages;
194 	int err;
195 	int i;
196 
197 	if (num_qpls == 0)
198 		return (0);
199 
200 	priv->qpls = malloc(num_qpls * sizeof(*priv->qpls), M_GVE_QPL,
201 	    M_WAITOK | M_ZERO);
202 
203 	num_pages = gve_is_gqi(priv) ?
204 	    priv->tx_desc_cnt / GVE_QPL_DIVISOR :
205 	    GVE_TX_NUM_QPL_PAGES_DQO;
206 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
207 		err = gve_alloc_qpl(priv, i, num_pages,
208 		    /*single_kva=*/true);
209 		if (err != 0)
210 			goto abort;
211 	}
212 
213 	num_pages = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_RX_NUM_QPL_PAGES_DQO;
214 	for (; i < num_qpls; i++) {
215 		err = gve_alloc_qpl(priv, i, num_pages, /*single_kva=*/false);
216 		if (err != 0)
217 			goto abort;
218 	}
219 
220 	return (0);
221 
222 abort:
223 	gve_free_qpls(priv);
224 	return (err);
225 }
226 
227 static int
228 gve_unregister_n_qpls(struct gve_priv *priv, int n)
229 {
230 	int err;
231 	int i;
232 
233 	for (i = 0; i < n; i++) {
234 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
235 		if (err != 0) {
236 			device_printf(priv->dev,
237 			    "Failed to unregister qpl %d, err: %d\n",
238 			    priv->qpls[i].id, err);
239 		}
240 	}
241 
242 	if (err != 0)
243 		return (err);
244 
245 	return (0);
246 }
247 
248 int
249 gve_register_qpls(struct gve_priv *priv)
250 {
251 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
252 	int err;
253 	int i;
254 
255 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
256 		return (0);
257 
258 	for (i = 0; i < num_qpls; i++) {
259 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
260 		if (err != 0) {
261 			device_printf(priv->dev,
262 			    "Failed to register qpl %d, err: %d\n",
263 			    priv->qpls[i].id, err);
264 			goto abort;
265 		}
266 	}
267 
268 	gve_set_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
269 	return (0);
270 
271 abort:
272 	gve_unregister_n_qpls(priv, i);
273 	return (err);
274 }
275 
276 int
277 gve_unregister_qpls(struct gve_priv *priv)
278 {
279 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
280 	int err;
281 
282 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
283 		return (0);
284 
285 	err = gve_unregister_n_qpls(priv, num_qpls);
286 	if (err != 0)
287 		return (err);
288 
289 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
290 	return (0);
291 }
292 
293 void
294 gve_mextadd_free(struct mbuf *mbuf)
295 {
296 	vm_page_t page = (vm_page_t)mbuf->m_ext.ext_arg1;
297 	vm_offset_t va = (vm_offset_t)mbuf->m_ext.ext_arg2;
298 
299 	/*
300 	 * Free the page only if this is the last ref.
301 	 * The interface might no longer exist by the time
302 	 * this callback is called, see gve_free_qpl.
303 	 */
304 	if (__predict_false(vm_page_unwire_noq(page))) {
305 		pmap_qremove(va, 1);
306 		kva_free(va, PAGE_SIZE);
307 		vm_page_free(page);
308 	}
309 }
310