xref: /freebsd/sys/dev/xdma/xdma.c (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_platform.h"
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/bus.h>
39 #include <sys/epoch.h>
40 #include <sys/kernel.h>
41 #include <sys/queue.h>
42 #include <sys/kobj.h>
43 #include <sys/malloc.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49 
50 #include <machine/bus.h>
51 
52 #ifdef FDT
53 #include <dev/fdt/fdt_common.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56 #endif
57 
58 #include <dev/xdma/xdma.h>
59 
60 #include <xdma_if.h>
61 
62 /*
63  * Multiple xDMA controllers may work with single DMA device,
64  * so we have global lock for physical channel management.
65  */
66 static struct mtx xdma_mtx;
67 
68 #define	XDMA_LOCK()			mtx_lock(&xdma_mtx)
69 #define	XDMA_UNLOCK()			mtx_unlock(&xdma_mtx)
70 #define	XDMA_ASSERT_LOCKED()		mtx_assert(&xdma_mtx, MA_OWNED)
71 
72 #define	FDT_REG_CELLS	4
73 
74 #ifdef FDT
75 static int
76 xdma_get_iommu_fdt(xdma_controller_t *xdma, xdma_channel_t *xchan)
77 {
78 	struct xdma_iommu *xio;
79 	phandle_t node;
80 	pcell_t prop;
81 	size_t len;
82 
83 	node = ofw_bus_get_node(xdma->dma_dev);
84 	if (OF_getproplen(node, "xdma,iommu") <= 0)
85 		return (0);
86 
87 	len = OF_getencprop(node, "xdma,iommu", &prop, sizeof(prop));
88 	if (len != sizeof(prop)) {
89 		device_printf(xdma->dev,
90 		    "%s: Can't get iommu device node\n", __func__);
91 		return (0);
92 	}
93 
94 	xio = &xchan->xio;
95 	xio->dev = OF_device_from_xref(prop);
96 	if (xio->dev == NULL) {
97 		device_printf(xdma->dev,
98 		    "%s: Can't get iommu device\n", __func__);
99 		return (0);
100 	}
101 
102 	/* Found */
103 	return (1);
104 }
105 #endif
106 
107 /*
108  * Allocate virtual xDMA channel.
109  */
110 xdma_channel_t *
111 xdma_channel_alloc(xdma_controller_t *xdma, uint32_t caps)
112 {
113 	xdma_channel_t *xchan;
114 	int ret;
115 
116 	xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
117 	xchan->xdma = xdma;
118 
119 #ifdef FDT
120 	/* Check if this DMA controller supports IOMMU. */
121 	if (xdma_get_iommu_fdt(xdma, xchan))
122 		caps |= XCHAN_CAP_IOMMU | XCHAN_CAP_NOSEG;
123 #endif
124 
125 	xchan->caps = caps;
126 
127 	XDMA_LOCK();
128 
129 	/* Request a real channel from hardware driver. */
130 	ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
131 	if (ret != 0) {
132 		device_printf(xdma->dev,
133 		    "%s: Can't request hardware channel.\n", __func__);
134 		XDMA_UNLOCK();
135 		free(xchan, M_XDMA);
136 
137 		return (NULL);
138 	}
139 
140 	TAILQ_INIT(&xchan->ie_handlers);
141 
142 	mtx_init(&xchan->mtx_lock, "xDMA chan", NULL, MTX_DEF);
143 	mtx_init(&xchan->mtx_qin_lock, "xDMA qin", NULL, MTX_DEF);
144 	mtx_init(&xchan->mtx_qout_lock, "xDMA qout", NULL, MTX_DEF);
145 	mtx_init(&xchan->mtx_bank_lock, "xDMA bank", NULL, MTX_DEF);
146 	mtx_init(&xchan->mtx_proc_lock, "xDMA proc", NULL, MTX_DEF);
147 
148 	TAILQ_INIT(&xchan->bank);
149 	TAILQ_INIT(&xchan->queue_in);
150 	TAILQ_INIT(&xchan->queue_out);
151 	TAILQ_INIT(&xchan->processing);
152 
153 	if (xchan->caps & XCHAN_CAP_IOMMU)
154 		xdma_iommu_init(&xchan->xio);
155 
156 	TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
157 
158 	XDMA_UNLOCK();
159 
160 	return (xchan);
161 }
162 
163 int
164 xdma_channel_free(xdma_channel_t *xchan)
165 {
166 	xdma_controller_t *xdma;
167 	int err;
168 
169 	xdma = xchan->xdma;
170 	KASSERT(xdma != NULL, ("xdma is NULL"));
171 
172 	XDMA_LOCK();
173 
174 	/* Free the real DMA channel. */
175 	err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
176 	if (err != 0) {
177 		device_printf(xdma->dev,
178 		    "%s: Can't free real hw channel.\n", __func__);
179 		XDMA_UNLOCK();
180 		return (-1);
181 	}
182 
183 	if (xchan->flags & XCHAN_TYPE_SG)
184 		xdma_channel_free_sg(xchan);
185 
186 	if (xchan->caps & XCHAN_CAP_IOMMU)
187 		xdma_iommu_release(&xchan->xio);
188 
189 	xdma_teardown_all_intr(xchan);
190 
191 	mtx_destroy(&xchan->mtx_lock);
192 	mtx_destroy(&xchan->mtx_qin_lock);
193 	mtx_destroy(&xchan->mtx_qout_lock);
194 	mtx_destroy(&xchan->mtx_bank_lock);
195 	mtx_destroy(&xchan->mtx_proc_lock);
196 
197 	TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
198 
199 	free(xchan, M_XDMA);
200 
201 	XDMA_UNLOCK();
202 
203 	return (0);
204 }
205 
206 int
207 xdma_setup_intr(xdma_channel_t *xchan, int flags,
208     int (*cb)(void *, xdma_transfer_status_t *),
209     void *arg, void **ihandler)
210 {
211 	struct xdma_intr_handler *ih;
212 	xdma_controller_t *xdma;
213 
214 	xdma = xchan->xdma;
215 	KASSERT(xdma != NULL, ("xdma is NULL"));
216 
217 	/* Sanity check. */
218 	if (cb == NULL) {
219 		device_printf(xdma->dev,
220 		    "%s: Can't setup interrupt handler.\n",
221 		    __func__);
222 
223 		return (-1);
224 	}
225 
226 	ih = malloc(sizeof(struct xdma_intr_handler),
227 	    M_XDMA, M_WAITOK | M_ZERO);
228 	ih->flags = flags;
229 	ih->cb = cb;
230 	ih->cb_user = arg;
231 
232 	XCHAN_LOCK(xchan);
233 	TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
234 	XCHAN_UNLOCK(xchan);
235 
236 	if (ihandler != NULL)
237 		*ihandler = ih;
238 
239 	return (0);
240 }
241 
242 int
243 xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
244 {
245 	xdma_controller_t *xdma;
246 
247 	xdma = xchan->xdma;
248 	KASSERT(xdma != NULL, ("xdma is NULL"));
249 
250 	/* Sanity check. */
251 	if (ih == NULL) {
252 		device_printf(xdma->dev,
253 		    "%s: Can't teardown interrupt.\n", __func__);
254 		return (-1);
255 	}
256 
257 	TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
258 	free(ih, M_XDMA);
259 
260 	return (0);
261 }
262 
263 int
264 xdma_teardown_all_intr(xdma_channel_t *xchan)
265 {
266 	struct xdma_intr_handler *ih_tmp;
267 	struct xdma_intr_handler *ih;
268 	xdma_controller_t *xdma;
269 
270 	xdma = xchan->xdma;
271 	KASSERT(xdma != NULL, ("xdma is NULL"));
272 
273 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
274 		TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
275 		free(ih, M_XDMA);
276 	}
277 
278 	return (0);
279 }
280 
281 int
282 xdma_request(xdma_channel_t *xchan, struct xdma_request *req)
283 {
284 	xdma_controller_t *xdma;
285 	int ret;
286 
287 	xdma = xchan->xdma;
288 
289 	KASSERT(xdma != NULL, ("xdma is NULL"));
290 
291 	XCHAN_LOCK(xchan);
292 	ret = XDMA_CHANNEL_REQUEST(xdma->dma_dev, xchan, req);
293 	if (ret != 0) {
294 		device_printf(xdma->dev,
295 		    "%s: Can't request a transfer.\n", __func__);
296 		XCHAN_UNLOCK(xchan);
297 
298 		return (-1);
299 	}
300 	XCHAN_UNLOCK(xchan);
301 
302 	return (0);
303 }
304 
305 int
306 xdma_control(xdma_channel_t *xchan, enum xdma_command cmd)
307 {
308 	xdma_controller_t *xdma;
309 	int ret;
310 
311 	xdma = xchan->xdma;
312 	KASSERT(xdma != NULL, ("xdma is NULL"));
313 
314 	ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, cmd);
315 	if (ret != 0) {
316 		device_printf(xdma->dev,
317 		    "%s: Can't process command.\n", __func__);
318 		return (-1);
319 	}
320 
321 	return (0);
322 }
323 
324 void
325 xdma_callback(xdma_channel_t *xchan, xdma_transfer_status_t *status)
326 {
327 	struct xdma_intr_handler *ih_tmp;
328 	struct xdma_intr_handler *ih;
329 	xdma_controller_t *xdma;
330 	struct epoch_tracker et;
331 
332 	xdma = xchan->xdma;
333 	KASSERT(xdma != NULL, ("xdma is NULL"));
334 
335 	TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
336 		if (ih->cb != NULL) {
337 			if (ih->flags & XDMA_INTR_NET)
338 				NET_EPOCH_ENTER(et);
339 			ih->cb(ih->cb_user, status);
340 			if (ih->flags & XDMA_INTR_NET)
341 				NET_EPOCH_EXIT(et);
342 		}
343 	}
344 
345 	if (xchan->flags & XCHAN_TYPE_SG)
346 		xdma_queue_submit(xchan);
347 }
348 
349 #ifdef FDT
350 /*
351  * Notify the DMA driver we have machine-dependent data in FDT.
352  */
353 static int
354 xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
355 {
356 	uint32_t ret;
357 
358 	ret = XDMA_OFW_MD_DATA(xdma->dma_dev,
359 	    cells, ncells, (void **)&xdma->data);
360 
361 	return (ret);
362 }
363 
364 int
365 xdma_handle_mem_node(vmem_t *vmem, phandle_t memory)
366 {
367 	pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
368 	pcell_t *regp;
369 	int addr_cells, size_cells;
370 	int i, reg_len, ret, tuple_size, tuples;
371 	u_long mem_start, mem_size;
372 
373 	if ((ret = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
374 	    &size_cells)) != 0)
375 		return (ret);
376 
377 	if (addr_cells > 2)
378 		return (ERANGE);
379 
380 	tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
381 	reg_len = OF_getproplen(memory, "reg");
382 	if (reg_len <= 0 || reg_len > sizeof(reg))
383 		return (ERANGE);
384 
385 	if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
386 		return (ENXIO);
387 
388 	tuples = reg_len / tuple_size;
389 	regp = (pcell_t *)&reg;
390 	for (i = 0; i < tuples; i++) {
391 		ret = fdt_data_to_res(regp, addr_cells, size_cells,
392 		    &mem_start, &mem_size);
393 		if (ret != 0)
394 			return (ret);
395 
396 		vmem_add(vmem, mem_start, mem_size, 0);
397 		regp += addr_cells + size_cells;
398 	}
399 
400 	return (0);
401 }
402 
403 vmem_t *
404 xdma_get_memory(device_t dev)
405 {
406 	phandle_t mem_node, node;
407 	pcell_t mem_handle;
408 	vmem_t *vmem;
409 
410 	node = ofw_bus_get_node(dev);
411 	if (node <= 0) {
412 		device_printf(dev,
413 		    "%s called on not ofw based device.\n", __func__);
414 		return (NULL);
415 	}
416 
417 	if (!OF_hasprop(node, "memory-region"))
418 		return (NULL);
419 
420 	if (OF_getencprop(node, "memory-region", (void *)&mem_handle,
421 	    sizeof(mem_handle)) <= 0)
422 		return (NULL);
423 
424 	vmem = vmem_create("xDMA vmem", 0, 0, PAGE_SIZE,
425 	    PAGE_SIZE, M_BESTFIT | M_WAITOK);
426 	if (vmem == NULL)
427 		return (NULL);
428 
429 	mem_node = OF_node_from_xref(mem_handle);
430 	if (xdma_handle_mem_node(vmem, mem_node) != 0) {
431 		vmem_destroy(vmem);
432 		return (NULL);
433 	}
434 
435 	return (vmem);
436 }
437 
438 void
439 xdma_put_memory(vmem_t *vmem)
440 {
441 
442 	vmem_destroy(vmem);
443 }
444 
445 void
446 xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem)
447 {
448 
449 	xchan->vmem = vmem;
450 }
451 
452 /*
453  * Allocate xdma controller.
454  */
455 xdma_controller_t *
456 xdma_ofw_get(device_t dev, const char *prop)
457 {
458 	phandle_t node, parent;
459 	xdma_controller_t *xdma;
460 	device_t dma_dev;
461 	pcell_t *cells;
462 	int ncells;
463 	int error;
464 	int ndmas;
465 	int idx;
466 
467 	node = ofw_bus_get_node(dev);
468 	if (node <= 0)
469 		device_printf(dev,
470 		    "%s called on not ofw based device.\n", __func__);
471 
472 	error = ofw_bus_parse_xref_list_get_length(node,
473 	    "dmas", "#dma-cells", &ndmas);
474 	if (error) {
475 		device_printf(dev,
476 		    "%s can't get dmas list.\n", __func__);
477 		return (NULL);
478 	}
479 
480 	if (ndmas == 0) {
481 		device_printf(dev,
482 		    "%s dmas list is empty.\n", __func__);
483 		return (NULL);
484 	}
485 
486 	error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
487 	if (error != 0) {
488 		device_printf(dev,
489 		    "%s can't find string index.\n", __func__);
490 		return (NULL);
491 	}
492 
493 	error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
494 	    idx, &parent, &ncells, &cells);
495 	if (error != 0) {
496 		device_printf(dev,
497 		    "%s can't get dma device xref.\n", __func__);
498 		return (NULL);
499 	}
500 
501 	dma_dev = OF_device_from_xref(parent);
502 	if (dma_dev == NULL) {
503 		device_printf(dev,
504 		    "%s can't get dma device.\n", __func__);
505 		return (NULL);
506 	}
507 
508 	xdma = malloc(sizeof(struct xdma_controller),
509 	    M_XDMA, M_WAITOK | M_ZERO);
510 	xdma->dev = dev;
511 	xdma->dma_dev = dma_dev;
512 
513 	TAILQ_INIT(&xdma->channels);
514 
515 	xdma_ofw_md_data(xdma, cells, ncells);
516 	free(cells, M_OFWPROP);
517 
518 	return (xdma);
519 }
520 #endif
521 
522 /*
523  * Allocate xdma controller.
524  */
525 xdma_controller_t *
526 xdma_get(device_t dev, device_t dma_dev)
527 {
528 	xdma_controller_t *xdma;
529 
530 	xdma = malloc(sizeof(struct xdma_controller),
531 	    M_XDMA, M_WAITOK | M_ZERO);
532 	xdma->dev = dev;
533 	xdma->dma_dev = dma_dev;
534 
535 	TAILQ_INIT(&xdma->channels);
536 
537 	return (xdma);
538 }
539 
540 /*
541  * Free xDMA controller object.
542  */
543 int
544 xdma_put(xdma_controller_t *xdma)
545 {
546 
547 	XDMA_LOCK();
548 
549 	/* Ensure no channels allocated. */
550 	if (!TAILQ_EMPTY(&xdma->channels)) {
551 		device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
552 		return (-1);
553 	}
554 
555 	free(xdma->data, M_DEVBUF);
556 	free(xdma, M_XDMA);
557 
558 	XDMA_UNLOCK();
559 
560 	return (0);
561 }
562 
563 static void
564 xdma_init(void)
565 {
566 
567 	mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
568 }
569 
570 SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
571