xref: /freebsd/sys/dev/gve/gve_utils.c (revision b64c5a0ace59af62eff52bfe110a521dc73c937b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023-2024 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include "gve.h"
32 #include "gve_dqo.h"
33 
34 uint32_t
35 gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset)
36 {
37 	return (be32toh(bus_read_4(priv->reg_bar, offset)));
38 }
39 
40 void
41 gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
42 {
43 	bus_write_4(priv->reg_bar, offset, htobe32(val));
44 }
45 
46 void
47 gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
48 {
49 	bus_write_4(priv->db_bar, offset, htobe32(val));
50 }
51 
52 void
53 gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
54 {
55 	bus_write_4(priv->db_bar, offset, val);
56 }
57 
58 void
59 gve_alloc_counters(counter_u64_t *stat, int num_stats)
60 {
61 	int i;
62 
63 	for (i = 0; i < num_stats; i++)
64 		stat[i] = counter_u64_alloc(M_WAITOK);
65 }
66 
67 void
68 gve_free_counters(counter_u64_t *stat, int num_stats)
69 {
70 	int i;
71 
72 	for (i = 0; i < num_stats; i++)
73 		counter_u64_free(stat[i]);
74 }
75 
76 /* Currently assumes a single segment. */
77 static void
78 gve_dmamap_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
79     int error)
80 {
81 	if (error == 0)
82 		*(bus_addr_t *) arg = segs[0].ds_addr;
83 }
84 
85 int
86 gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
87     struct gve_dma_handle *dma)
88 {
89 	int err;
90 	device_t dev = priv->dev;
91 
92 	err = bus_dma_tag_create(
93 	    bus_get_dma_tag(dev),	/* parent */
94 	    align, 0,			/* alignment, bounds */
95 	    BUS_SPACE_MAXADDR,		/* lowaddr */
96 	    BUS_SPACE_MAXADDR,		/* highaddr */
97 	    NULL, NULL,			/* filter, filterarg */
98 	    size,			/* maxsize */
99 	    1,				/* nsegments */
100 	    size,			/* maxsegsize */
101 	    BUS_DMA_ALLOCNOW,		/* flags */
102 	    NULL,			/* lockfunc */
103 	    NULL,			/* lockarg */
104 	    &dma->tag);
105 	if (err != 0) {
106 		device_printf(dev, "%s: bus_dma_tag_create failed: %d\n",
107 		    __func__, err);
108 		goto clear_tag;
109 	}
110 
111 	err = bus_dmamem_alloc(dma->tag, (void **) &dma->cpu_addr,
112 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
113 	    &dma->map);
114 	if (err != 0) {
115 		device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
116 		    __func__, (uintmax_t)size, err);
117 		goto destroy_tag;
118 	}
119 
120 	/* An address set by the callback will never be -1 */
121 	dma->bus_addr = (bus_addr_t)-1;
122 	err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size,
123 	    gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_NOWAIT);
124 	if (err != 0 || dma->bus_addr == (bus_addr_t)-1) {
125 		device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err);
126 		goto free_mem;
127 	}
128 
129 	return (0);
130 
131 free_mem:
132 	bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map);
133 destroy_tag:
134 	bus_dma_tag_destroy(dma->tag);
135 clear_tag:
136 	dma->tag = NULL;
137 
138 	return (err);
139 }
140 
141 void
142 gve_dma_free_coherent(struct gve_dma_handle *dma)
143 {
144 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
145 	bus_dmamap_unload(dma->tag, dma->map);
146 	bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map);
147 	bus_dma_tag_destroy(dma->tag);
148 }
149 
150 int
151 gve_dmamap_create(struct gve_priv *priv, int size, int align,
152     struct gve_dma_handle *dma)
153 {
154 	int err;
155 	device_t dev = priv->dev;
156 
157 	err = bus_dma_tag_create(
158 	    bus_get_dma_tag(dev),	/* parent */
159 	    align, 0,			/* alignment, bounds */
160 	    BUS_SPACE_MAXADDR,		/* lowaddr */
161 	    BUS_SPACE_MAXADDR,		/* highaddr */
162 	    NULL, NULL,			/* filter, filterarg */
163 	    size,			/* maxsize */
164 	    1,				/* nsegments */
165 	    size,			/* maxsegsize */
166 	    BUS_DMA_ALLOCNOW,		/* flags */
167 	    NULL,			/* lockfunc */
168 	    NULL,			/* lockarg */
169 	    &dma->tag);
170 	if (err != 0) {
171 		device_printf(dev, "%s: bus_dma_tag_create failed: %d\n",
172 		    __func__, err);
173 		goto clear_tag;
174 	}
175 
176 	err = bus_dmamap_create(dma->tag, BUS_DMA_COHERENT, &dma->map);
177 	if (err != 0) {
178 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
179 		    __func__, err);
180 		goto destroy_tag;
181 	}
182 
183 	/* An address set by the callback will never be -1 */
184 	dma->bus_addr = (bus_addr_t)-1;
185 	err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size,
186 	    gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_WAITOK);
187 	if (err != 0 || dma->bus_addr == (bus_addr_t)-1) {
188 		device_printf(dev, "%s: bus_dmamap_load failed: %d\n",
189 		    __func__, err);
190 		goto destroy_map;
191 	}
192 
193 	return (0);
194 
195 destroy_map:
196 	bus_dmamap_destroy(dma->tag, dma->map);
197 destroy_tag:
198 	bus_dma_tag_destroy(dma->tag);
199 clear_tag:
200 	dma->tag = NULL;
201 
202 	return (err);
203 }
204 
205 void
206 gve_dmamap_destroy(struct gve_dma_handle *dma)
207 {
208 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
209 	bus_dmamap_unload(dma->tag, dma->map);
210 	bus_dmamap_destroy(dma->tag, dma->map);
211 	bus_dma_tag_destroy(dma->tag);
212 }
213 
214 static int
215 gve_mgmnt_intr(void *arg)
216 {
217 	struct gve_priv *priv = arg;
218 
219 	taskqueue_enqueue(priv->service_tq, &priv->service_task);
220 	return (FILTER_HANDLED);
221 }
222 
223 void
224 gve_free_irqs(struct gve_priv *priv)
225 {
226 	struct gve_irq *irq;
227 	int num_irqs;
228 	int rid;
229 	int rc;
230 	int i;
231 
232 	if (priv->irq_tbl == NULL) {
233 		device_printf(priv->dev, "No irq table, nothing to free\n");
234 		return;
235 	}
236 
237 	num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1;
238 
239 	for (i = 0; i < num_irqs; i++) {
240 		irq = &priv->irq_tbl[i];
241 		if (irq->res == NULL)
242 			continue;
243 
244 		rid = rman_get_rid(irq->res);
245 
246 		rc = bus_teardown_intr(priv->dev, irq->res, irq->cookie);
247 		if (rc != 0)
248 			device_printf(priv->dev, "Failed to teardown irq num %d\n",
249 			    rid);
250 
251 		rc = bus_release_resource(priv->dev, SYS_RES_IRQ,
252 		    rid, irq->res);
253 		if (rc != 0)
254 			device_printf(priv->dev, "Failed to release irq num %d\n",
255 			    rid);
256 
257 		irq->res = NULL;
258 		irq->cookie = NULL;
259 	}
260 
261 	free(priv->irq_tbl, M_GVE);
262 	priv->irq_tbl = NULL;
263 
264 	/* Safe to call even if msix was never alloced */
265 	pci_release_msi(priv->dev);
266 }
267 
268 int
269 gve_alloc_irqs(struct gve_priv *priv)
270 {
271 	int num_tx = priv->tx_cfg.num_queues;
272 	int num_rx = priv->rx_cfg.num_queues;
273 	int req_nvecs = num_tx + num_rx + 1;
274 	int got_nvecs = req_nvecs;
275 	struct gve_irq *irq;
276 	int i, j, m;
277 	int rid;
278 	int err;
279 
280 	struct gve_ring_com *com;
281 	struct gve_rx_ring *rx;
282 	struct gve_tx_ring *tx;
283 
284 	if (pci_alloc_msix(priv->dev, &got_nvecs) != 0) {
285 		device_printf(priv->dev, "Failed to acquire any msix vectors\n");
286 		err = ENXIO;
287 		goto abort;
288 	} else if (got_nvecs != req_nvecs) {
289 		device_printf(priv->dev, "Tried to acquire %d msix vectors, got only %d\n",
290 		    req_nvecs, got_nvecs);
291 		err = ENOSPC;
292 		goto abort;
293         }
294 
295 	if (bootverbose)
296 		device_printf(priv->dev, "Enabled MSIX with %d vectors\n", got_nvecs);
297 
298 	priv->irq_tbl = malloc(sizeof(struct gve_irq) * req_nvecs, M_GVE,
299 	    M_WAITOK | M_ZERO);
300 
301 	for (i = 0; i < num_tx; i++) {
302 		irq = &priv->irq_tbl[i];
303 		tx = &priv->tx[i];
304 		com = &tx->com;
305 		rid = i + 1;
306 
307 		irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
308 		    &rid, RF_ACTIVE);
309 		if (irq->res == NULL) {
310 			device_printf(priv->dev, "Failed to alloc irq %d for Tx queue %d\n",
311 			    rid, i);
312 			err = ENOMEM;
313 			goto abort;
314 		}
315 
316 		err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
317 		    gve_is_gqi(priv) ? gve_tx_intr : gve_tx_intr_dqo, NULL,
318 		    &priv->tx[i], &irq->cookie);
319 		if (err != 0) {
320 			device_printf(priv->dev, "Failed to setup irq %d for Tx queue %d, "
321 			    "err: %d\n", rid, i, err);
322 			goto abort;
323 		}
324 
325 		bus_describe_intr(priv->dev, irq->res, irq->cookie, "tx%d", i);
326 		com->ntfy_id = i;
327 	}
328 
329 	for (j = 0; j < num_rx; j++) {
330 		irq = &priv->irq_tbl[i + j];
331 		rx = &priv->rx[j];
332 		com = &rx->com;
333 		rid = i + j + 1;
334 
335 		irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
336 		    &rid, RF_ACTIVE);
337 		if (irq->res == NULL) {
338 			device_printf(priv->dev,
339 			    "Failed to alloc irq %d for Rx queue %d", rid, j);
340 			err = ENOMEM;
341 			goto abort;
342 		}
343 
344 		err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
345 		    gve_is_gqi(priv) ? gve_rx_intr : gve_rx_intr_dqo, NULL,
346 		    &priv->rx[j], &irq->cookie);
347 		if (err != 0) {
348 			device_printf(priv->dev, "Failed to setup irq %d for Rx queue %d, "
349 			    "err: %d\n", rid, j, err);
350 			goto abort;
351 		}
352 
353 		bus_describe_intr(priv->dev, irq->res, irq->cookie, "rx%d", j);
354 		com->ntfy_id = i + j;
355 	}
356 
357 	m = i + j;
358 	rid = m + 1;
359 	irq = &priv->irq_tbl[m];
360 
361 	irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
362 	    &rid, RF_ACTIVE);
363 	if (irq->res == NULL) {
364 		device_printf(priv->dev, "Failed to allocate irq %d for mgmnt queue\n", rid);
365 		err = ENOMEM;
366 		goto abort;
367 	}
368 
369 	err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
370 	    gve_mgmnt_intr, NULL, priv, &irq->cookie);
371 	if (err != 0) {
372 		device_printf(priv->dev, "Failed to setup irq %d for mgmnt queue, err: %d\n",
373 		    rid, err);
374 		goto abort;
375 	}
376 
377 	bus_describe_intr(priv->dev, irq->res, irq->cookie, "mgmnt");
378 
379 	return (0);
380 
381 abort:
382 	gve_free_irqs(priv);
383 	return (err);
384 }
385 
386 /*
387  * Builds register value to write to DQO IRQ doorbell to enable with specified
388  * ITR interval.
389  */
390 static uint32_t
391 gve_setup_itr_interval_dqo(uint32_t interval_us)
392 {
393 	uint32_t result = GVE_ITR_ENABLE_BIT_DQO;
394 
395 	/* Interval has 2us granularity. */
396 	interval_us >>= 1;
397 
398 	interval_us &= GVE_ITR_INTERVAL_DQO_MASK;
399 	result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT);
400 
401 	return (result);
402 }
403 
404 void
405 gve_unmask_all_queue_irqs(struct gve_priv *priv)
406 {
407 	struct gve_tx_ring *tx;
408 	struct gve_rx_ring *rx;
409 	int idx;
410 
411 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
412 		tx = &priv->tx[idx];
413 		if (gve_is_gqi(priv))
414 			gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0);
415 		else
416 			gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset,
417 			    gve_setup_itr_interval_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO));
418 	}
419 
420 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
421 		rx = &priv->rx[idx];
422 		if (gve_is_gqi(priv))
423 			gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0);
424 		else
425 			gve_db_bar_dqo_write_4(priv, rx->com.irq_db_offset,
426 			    gve_setup_itr_interval_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO));
427 	}
428 }
429 
430 void
431 gve_mask_all_queue_irqs(struct gve_priv *priv)
432 {
433 	for (int idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
434 		struct gve_tx_ring *tx = &priv->tx[idx];
435 		gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK);
436 	}
437 	for (int idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
438 		struct gve_rx_ring *rx = &priv->rx[idx];
439 		gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
440 	}
441 }
442