xref: /freebsd/sys/dev/gve/gve_qpl.c (revision 7a7741af18d6c8a804cc643cb7ecda9d730c6aa6)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include <sys/malloc.h>
32 
33 #include "gve.h"
34 #include "gve_adminq.h"
35 
36 static MALLOC_DEFINE(M_GVE_QPL, "gve qpl", "gve qpl allocations");
37 
38 static uint32_t
39 gve_num_tx_qpls(struct gve_priv *priv)
40 {
41 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
42 		return (0);
43 
44 	return (priv->tx_cfg.max_queues);
45 }
46 
47 static uint32_t
48 gve_num_rx_qpls(struct gve_priv *priv)
49 {
50 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
51 		return (0);
52 
53 	return (priv->rx_cfg.max_queues);
54 }
55 
56 static void
57 gve_free_qpl(struct gve_priv *priv, uint32_t id)
58 {
59 	struct gve_queue_page_list *qpl = &priv->qpls[id];
60 	int i;
61 
62 	for (i = 0; i < qpl->num_dmas; i++) {
63 		gve_dmamap_destroy(&qpl->dmas[i]);
64 	}
65 
66 	if (qpl->kva) {
67 		pmap_qremove(qpl->kva, qpl->num_pages);
68 		kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages);
69 	}
70 
71 	for (i = 0; i < qpl->num_pages; i++) {
72 		/*
73 		 * Free the page only if this is the last ref.
74 		 * Tx pages are known to have no other refs at
75 		 * this point, but Rx pages might still be in
76 		 * use by the networking stack, see gve_mextadd_free.
77 		 */
78 		if (vm_page_unwire_noq(qpl->pages[i])) {
79 			if (!qpl->kva) {
80 				pmap_qremove((vm_offset_t)qpl->dmas[i].cpu_addr, 1);
81 				kva_free((vm_offset_t)qpl->dmas[i].cpu_addr, PAGE_SIZE);
82 			}
83 			vm_page_free(qpl->pages[i]);
84 		}
85 
86 		priv->num_registered_pages--;
87 	}
88 
89 	if (qpl->pages != NULL)
90 		free(qpl->pages, M_GVE_QPL);
91 
92 	if (qpl->dmas != NULL)
93 		free(qpl->dmas, M_GVE_QPL);
94 }
95 
96 static int
97 gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
98 {
99 	struct gve_queue_page_list *qpl = &priv->qpls[id];
100 	int err;
101 	int i;
102 
103 	if (npages + priv->num_registered_pages > priv->max_registered_pages) {
104 		device_printf(priv->dev, "Reached max number of registered pages %ju > %ju\n",
105 		    (uintmax_t)npages + priv->num_registered_pages,
106 		    (uintmax_t)priv->max_registered_pages);
107 		return (EINVAL);
108 	}
109 
110 	qpl->id = id;
111 	qpl->num_pages = 0;
112 	qpl->num_dmas = 0;
113 
114 	qpl->dmas = malloc(npages * sizeof(*qpl->dmas), M_GVE_QPL,
115 	    M_WAITOK | M_ZERO);
116 
117 	qpl->pages = malloc(npages * sizeof(*qpl->pages), M_GVE_QPL,
118 	    M_WAITOK | M_ZERO);
119 
120 	qpl->kva = 0;
121 	if (single_kva) {
122 		qpl->kva = kva_alloc(PAGE_SIZE * npages);
123 		if (!qpl->kva) {
124 			device_printf(priv->dev, "Failed to create the single kva for QPL %d\n", id);
125 			err = ENOMEM;
126 			goto abort;
127 		}
128 	}
129 
130 	for (i = 0; i < npages; i++) {
131 		qpl->pages[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED |
132 						    VM_ALLOC_WAITOK |
133 						    VM_ALLOC_ZERO);
134 
135 		if (!single_kva) {
136 			qpl->dmas[i].cpu_addr = (void *)kva_alloc(PAGE_SIZE);
137 			if (!qpl->dmas[i].cpu_addr) {
138 				device_printf(priv->dev, "Failed to create kva for page %d in QPL %d", i, id);
139 				err = ENOMEM;
140 				goto abort;
141 			}
142 			pmap_qenter((vm_offset_t)qpl->dmas[i].cpu_addr, &(qpl->pages[i]), 1);
143 		} else
144 			qpl->dmas[i].cpu_addr = (void *)(qpl->kva + (PAGE_SIZE * i));
145 
146 
147 		qpl->num_pages++;
148 	}
149 
150 	if (single_kva)
151 		pmap_qenter(qpl->kva, qpl->pages, npages);
152 
153 	for (i = 0; i < npages; i++) {
154 		err = gve_dmamap_create(priv, /*size=*/PAGE_SIZE, /*align=*/PAGE_SIZE,
155 		    &qpl->dmas[i]);
156 		if (err != 0) {
157 			device_printf(priv->dev, "Failed to dma-map page %d in QPL %d\n", i, id);
158 			goto abort;
159 		}
160 
161 		qpl->num_dmas++;
162 		priv->num_registered_pages++;
163 	}
164 
165 	return (0);
166 
167 abort:
168 	gve_free_qpl(priv, id);
169 	return (err);
170 }
171 
172 void
173 gve_free_qpls(struct gve_priv *priv)
174 {
175 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
176 	int i;
177 
178 	if (num_qpls == 0)
179 		return;
180 
181 	if (priv->qpls != NULL) {
182 		for (i = 0; i < num_qpls; i++)
183 			gve_free_qpl(priv, i);
184 		free(priv->qpls, M_GVE_QPL);
185 		priv->qpls = NULL;
186 	}
187 }
188 
189 int gve_alloc_qpls(struct gve_priv *priv)
190 {
191 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
192 	int err;
193 	int i;
194 
195 	if (num_qpls == 0)
196 		return (0);
197 
198 	priv->qpls = malloc(num_qpls * sizeof(*priv->qpls), M_GVE_QPL,
199 	    M_WAITOK | M_ZERO);
200 
201 	for (i = 0; i < gve_num_tx_qpls(priv); i++) {
202 		err = gve_alloc_qpl(priv, i, priv->tx_desc_cnt / GVE_QPL_DIVISOR,
203 		    /*single_kva=*/true);
204 		if (err != 0)
205 			goto abort;
206 	}
207 
208 	for (; i < num_qpls; i++) {
209 		err = gve_alloc_qpl(priv, i, priv->rx_desc_cnt, /*single_kva=*/false);
210 		if (err != 0)
211 			goto abort;
212 	}
213 
214 	return (0);
215 
216 abort:
217 	gve_free_qpls(priv);
218 	return (err);
219 }
220 
221 static int
222 gve_unregister_n_qpls(struct gve_priv *priv, int n)
223 {
224 	int err;
225 	int i;
226 
227 	for (i = 0; i < n; i++) {
228 		err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
229 		if (err != 0) {
230 			device_printf(priv->dev,
231 			    "Failed to unregister qpl %d, err: %d\n",
232 			    priv->qpls[i].id, err);
233 		}
234 	}
235 
236 	if (err != 0)
237 		return (err);
238 
239 	return (0);
240 }
241 
242 int
243 gve_register_qpls(struct gve_priv *priv)
244 {
245 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
246 	int err;
247 	int i;
248 
249 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
250 		return (0);
251 
252 	for (i = 0; i < num_qpls; i++) {
253 		err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
254 		if (err != 0) {
255 			device_printf(priv->dev,
256 			    "Failed to register qpl %d, err: %d\n",
257 			    priv->qpls[i].id, err);
258 			goto abort;
259 		}
260 	}
261 
262 	gve_set_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
263 	return (0);
264 
265 abort:
266 	gve_unregister_n_qpls(priv, i);
267 	return (err);
268 }
269 
270 int
271 gve_unregister_qpls(struct gve_priv *priv)
272 {
273 	int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
274 	int err;
275 
276 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
277 		return (0);
278 
279 	err = gve_unregister_n_qpls(priv, num_qpls);
280 	if (err != 0)
281 		return (err);
282 
283 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
284 	return (0);
285 }
286