xref: /freebsd/sys/dev/gve/gve_qpl.c (revision f8ed8382daf4b9a97056b1dba4fe4e5cb4f7485c)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include <sys/malloc.h>
32 
33 #include "gve.h"
34 #include "gve_adminq.h"
35 #include "gve_dqo.h"
36 
37 static MALLOC_DEFINE(M_GVE_QPL, "gve qpl", "gve qpl allocations");
38 
39 void
gve_free_qpl(struct gve_priv * priv,struct gve_queue_page_list * qpl)40 gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl)
41 {
42 	int i;
43 
44 	for (i = 0; i < qpl->num_dmas; i++) {
45 		gve_dmamap_destroy(&qpl->dmas[i]);
46 	}
47 
48 	if (qpl->kva) {
49 		pmap_qremove(qpl->kva, qpl->num_pages);
50 		kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages);
51 	}
52 
53 	for (i = 0; i < qpl->num_pages; i++) {
54 		/*
55 		 * Free the page only if this is the last ref.
56 		 * Tx pages are known to have no other refs at
57 		 * this point, but Rx pages might still be in
58 		 * use by the networking stack, see gve_mextadd_free.
59 		 */
60 		if (vm_page_unwire_noq(qpl->pages[i])) {
61 			if (!qpl->kva) {
62 				pmap_qremove((vm_offset_t)qpl->dmas[i].cpu_addr, 1);
63 				kva_free((vm_offset_t)qpl->dmas[i].cpu_addr, PAGE_SIZE);
64 			}
65 			vm_page_free(qpl->pages[i]);
66 		}
67 
68 		priv->num_registered_pages--;
69 	}
70 
71 	if (qpl->pages != NULL)
72 		free(qpl->pages, M_GVE_QPL);
73 
74 	if (qpl->dmas != NULL)
75 		free(qpl->dmas, M_GVE_QPL);
76 
77 	free(qpl, M_GVE_QPL);
78 }
79 
80 struct gve_queue_page_list *
gve_alloc_qpl(struct gve_priv * priv,uint32_t id,int npages,bool single_kva)81 gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
82 {
83 	struct gve_queue_page_list *qpl;
84 	int err;
85 	int i;
86 
87 	if (npages + priv->num_registered_pages > priv->max_registered_pages) {
88 		device_printf(priv->dev, "Reached max number of registered pages %ju > %ju\n",
89 		    (uintmax_t)npages + priv->num_registered_pages,
90 		    (uintmax_t)priv->max_registered_pages);
91 		return (NULL);
92 	}
93 
94 	qpl = malloc(sizeof(struct gve_queue_page_list), M_GVE_QPL,
95 	    M_WAITOK | M_ZERO);
96 
97 	qpl->id = id;
98 	qpl->num_pages = 0;
99 	qpl->num_dmas = 0;
100 
101 	qpl->dmas = malloc(npages * sizeof(*qpl->dmas), M_GVE_QPL,
102 	    M_WAITOK | M_ZERO);
103 
104 	qpl->pages = malloc(npages * sizeof(*qpl->pages), M_GVE_QPL,
105 	    M_WAITOK | M_ZERO);
106 
107 	qpl->kva = 0;
108 	if (single_kva) {
109 		qpl->kva = kva_alloc(PAGE_SIZE * npages);
110 		if (!qpl->kva) {
111 			device_printf(priv->dev, "Failed to create the single kva for QPL %d\n", id);
112 			err = ENOMEM;
113 			goto abort;
114 		}
115 	}
116 
117 	for (i = 0; i < npages; i++) {
118 		qpl->pages[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED |
119 						    VM_ALLOC_WAITOK |
120 						    VM_ALLOC_ZERO);
121 
122 		if (!single_kva) {
123 			qpl->dmas[i].cpu_addr = (void *)kva_alloc(PAGE_SIZE);
124 			if (!qpl->dmas[i].cpu_addr) {
125 				device_printf(priv->dev, "Failed to create kva for page %d in QPL %d", i, id);
126 				err = ENOMEM;
127 				goto abort;
128 			}
129 			pmap_qenter((vm_offset_t)qpl->dmas[i].cpu_addr, &(qpl->pages[i]), 1);
130 		} else
131 			qpl->dmas[i].cpu_addr = (void *)(qpl->kva + (PAGE_SIZE * i));
132 
133 
134 		qpl->num_pages++;
135 	}
136 
137 	if (single_kva)
138 		pmap_qenter(qpl->kva, qpl->pages, npages);
139 
140 	for (i = 0; i < npages; i++) {
141 		err = gve_dmamap_create(priv, /*size=*/PAGE_SIZE, /*align=*/PAGE_SIZE,
142 		    &qpl->dmas[i]);
143 		if (err != 0) {
144 			device_printf(priv->dev, "Failed to dma-map page %d in QPL %d\n", i, id);
145 			goto abort;
146 		}
147 
148 		qpl->num_dmas++;
149 		priv->num_registered_pages++;
150 	}
151 
152 	return (qpl);
153 
154 abort:
155 	gve_free_qpl(priv, qpl);
156 	return (NULL);
157 }
158 
159 int
gve_register_qpls(struct gve_priv * priv)160 gve_register_qpls(struct gve_priv *priv)
161 {
162 	struct gve_ring_com *com;
163 	struct gve_tx_ring *tx;
164 	struct gve_rx_ring *rx;
165 	int err;
166 	int i;
167 
168 	if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
169 		return (0);
170 
171 	/* Register TX qpls */
172 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
173 		tx = &priv->tx[i];
174 		com = &tx->com;
175 		err = gve_adminq_register_page_list(priv, com->qpl);
176 		if (err != 0) {
177 			device_printf(priv->dev,
178 			    "Failed to register qpl %d, err: %d\n",
179 			    com->qpl->id, err);
180 			/* Caller schedules a reset when this fails */
181 			return (err);
182 		}
183 	}
184 
185 	/* Register RX qpls */
186 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
187 		rx = &priv->rx[i];
188 		com = &rx->com;
189 		err = gve_adminq_register_page_list(priv, com->qpl);
190 		if (err != 0) {
191 			device_printf(priv->dev,
192 			    "Failed to register qpl %d, err: %d\n",
193 			    com->qpl->id, err);
194 			/* Caller schedules a reset when this fails */
195 			return (err);
196 		}
197 	}
198 	gve_set_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
199 	return (0);
200 }
201 
202 int
gve_unregister_qpls(struct gve_priv * priv)203 gve_unregister_qpls(struct gve_priv *priv)
204 {
205 	int err;
206 	int i;
207 	struct gve_ring_com *com;
208 	struct gve_tx_ring *tx;
209 	struct gve_rx_ring *rx;
210 
211 	if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
212 		return (0);
213 
214 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
215 		tx = &priv->tx[i];
216 		com = &tx->com;
217 		err = gve_adminq_unregister_page_list(priv, com->qpl->id);
218 		if (err != 0) {
219 			device_printf(priv->dev,
220 			    "Failed to unregister qpl %d, err: %d\n",
221 			    com->qpl->id, err);
222 		}
223 	}
224 
225 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
226 		rx = &priv->rx[i];
227 		com = &rx->com;
228 		err = gve_adminq_unregister_page_list(priv, com->qpl->id);
229 		if (err != 0) {
230 			device_printf(priv->dev,
231 			    "Failed to unregister qpl %d, err: %d\n",
232 			    com->qpl->id, err);
233 		}
234 	}
235 
236 	if (err != 0)
237 		return (err);
238 
239 	gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
240 	return (0);
241 }
242 
243 void
gve_mextadd_free(struct mbuf * mbuf)244 gve_mextadd_free(struct mbuf *mbuf)
245 {
246 	vm_page_t page = (vm_page_t)mbuf->m_ext.ext_arg1;
247 	vm_offset_t va = (vm_offset_t)mbuf->m_ext.ext_arg2;
248 
249 	/*
250 	 * Free the page only if this is the last ref.
251 	 * The interface might no longer exist by the time
252 	 * this callback is called, see gve_free_qpl.
253 	 */
254 	if (__predict_false(vm_page_unwire_noq(page))) {
255 		pmap_qremove(va, 1);
256 		kva_free(va, PAGE_SIZE);
257 		vm_page_free(page);
258 	}
259 }
260